[prev in list] [next in list] [prev in thread] [next in thread] 

List:       dri-patches
Subject:    drm: Branch 'drm-ttm-0-2-branch' - 2 commits
From:       thomash () kemper ! freedesktop ! org (Thomas Hellstrom)
Date:       2006-10-11 20:26:53
Message-ID: 20061011202653.D7DEE10078 () kemper ! freedesktop ! org
[Download RAW message or body]

 libdrm/xf86drm.c            |   91 -----
 libdrm/xf86mm.h             |   23 -
 linux-core/drmP.h           |    2 
 linux-core/drm_agpsupport.c |    2 
 linux-core/drm_bo.c         |   99 +++---
 linux-core/drm_compat.c     |  286 ++++++++++++++++++
 linux-core/drm_compat.h     |   91 +++++
 linux-core/drm_drv.c        |    1 
 linux-core/drm_ttm.c        |  681 +++++++++++---------------------------------
 linux-core/drm_ttm.h        |   89 +----
 linux-core/drm_vm.c         |  248 ++++------------
 shared-core/drm.h           |   18 -
 12 files changed, 720 insertions(+), 911 deletions(-)

New commits:
diff-tree 30703893674b3da5b862dee2acd6efca13424398 (from f2db76e2f206d2017f710eaddc4b33add4498898)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date:   Wed Oct 11 22:21:01 2006 +0200

    Compatibility code for 2.6.15-2.6.18. It is ugly but a little comfort is that
    it will go away in the mainstream kernel.
    Some bugfixes, mainly in error paths.

diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index a083ca2..c768318 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2624,7 +2624,8 @@ int drmBOCreate(int fd, void *ttm, unsig
     drm_bo_arg_t arg;
     drm_bo_arg_request_t *req = &arg.d.req;
     drm_bo_arg_reply_t *rep = &arg.d.rep;
-    
+    int ret;
+
     memset(buf, 0, sizeof(*buf));
     memset(&arg, 0, sizeof(arg));
     req->mask = mask;
@@ -2650,7 +2651,11 @@ int drmBOCreate(int fd, void *ttm, unsig
     }
     req->op = drm_bo_create;
 
-    if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+    do {
+	ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+    } while (ret != 0 && errno == EAGAIN);
+
+    if (ret)
 	return -errno;
     if (!arg.handled) {
 	return -EFAULT;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index d8cab2a..0e2b3fa 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -67,14 +67,23 @@ static int drm_move_tt_to_local(drm_buff
 {
 	drm_device_t *dev = buf->dev;
 	drm_buffer_manager_t *bm = &dev->bm;
+	int ret;
 
 	BUG_ON(!buf->tt);
 
 	mutex_lock(&dev->struct_mutex);
 	if (evict)
-		drm_evict_ttm(buf->ttm);
+		ret = drm_evict_ttm(buf->ttm);
 	else
-		drm_unbind_ttm(buf->ttm);
+		ret = drm_unbind_ttm(buf->ttm);
+
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		if (ret == -EAGAIN)
+			schedule();
+		return ret;
+	}
+	
 	drm_mm_put_block(&bm->tt_manager, buf->tt);
 	buf->tt = NULL;
 
@@ -126,13 +135,31 @@ static void drm_bo_destroy_locked(drm_de
 	list_del_init(&bo->tt_lru);
 	list_del_init(&bo->vram_lru);
 
-	if (bo->tt) {
+	if (bo->ttm) {
+		unsigned long _end = jiffies + DRM_HZ;
+		int ret;
 
 		/*
 		 * This temporarily unlocks struct_mutex. 
 		 */
+		
+		do {
+			ret = drm_unbind_ttm(bo->ttm);
+			if (ret == -EAGAIN) {
+				mutex_unlock(&dev->struct_mutex);
+				schedule();
+				mutex_lock(&dev->struct_mutex);
+			}
+		} while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
+
+		if (ret) {
+			DRM_ERROR("Couldn't unbind buffer. "
+				  "Bad. Continuing anyway\n");
+		}
+	}
+		
+	if (bo->tt) {
 
-		drm_unbind_ttm(bo->ttm);
 		drm_mm_put_block(&bm->tt_manager, bo->tt);
 		bo->tt = NULL;
 	}
@@ -435,6 +462,9 @@ static int drm_bo_evict(drm_buffer_objec
 		ret = drm_move_vram_to_local(bo);
 	}
 #endif
+	if (ret)
+		goto out;
+
 	mutex_lock(&dev->struct_mutex);
 	list_del_init((tt) ? &bo->tt_lru : &bo->vram_lru);
 	if (list_empty((tt) ? &bo->vram_lru : &bo->tt_lru))
@@ -442,7 +472,7 @@ static int drm_bo_evict(drm_buffer_objec
 	mutex_unlock(&dev->struct_mutex);
 	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
 			_DRM_BO_FLAG_EVICTED);
-      out:
+ out:
 	return ret;
 }
 
@@ -521,14 +551,18 @@ static int drm_move_local_to_tt(drm_buff
 	if (ret)
 		return ret;
 	DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->tt->start);
+
 	mutex_lock(&dev->struct_mutex);
 	ret = drm_bind_ttm(bo->ttm, bo->tt->start);
 	if (ret) {
 		drm_mm_put_block(&bm->tt_manager, bo->tt);
+		bo->tt = NULL;
 	}
 	mutex_unlock(&dev->struct_mutex);
-	if (ret)
+
+	if (ret) {
 		return ret;
+	}
 
 	be = bo->ttm->be;
 	if (be->needs_cache_adjust(be))
@@ -1296,6 +1330,7 @@ int drm_buffer_object_create(drm_file_t 
 	}
 	bo->priv_flags = 0;
 	bo->flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
+	atomic_inc(&bm->count);
 	ret = drm_bo_new_flags(dev, bo->flags, mask, hint,
 			       1, &new_flags, &bo->mask);
 	if (ret)
@@ -1311,12 +1346,11 @@ int drm_buffer_object_create(drm_file_t 
 
 	mutex_unlock(&bo->mutex);
 	*buf_obj = bo;
-	atomic_inc(&bm->count);
 	return 0;
-
-      out_err:
+	
+ out_err:
 	mutex_unlock(&bo->mutex);
-	drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+	drm_bo_usage_deref_unlocked(dev, bo);	
 	return ret;
 }
 
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 1aa835c..5287614 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -183,3 +183,239 @@ struct page *drm_vm_ttm_nopage(struct vm
 }
 
 #endif
+
+#ifdef DRM_ODD_MM_COMPAT
+
+typedef struct p_mm_entry {
+	struct list_head head;
+	struct mm_struct *mm;
+	atomic_t refcount;
+        int locked;
+} p_mm_entry_t;
+
+typedef struct vma_entry {
+	struct list_head head;
+	struct vm_area_struct *vma;
+} vma_entry_t;
+
+
+struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+			       unsigned long address, 
+			       int *type)
+{
+	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+	unsigned long page_offset;
+	struct page *page;
+	drm_ttm_t *ttm; 
+	drm_buffer_manager_t *bm;
+	drm_device_t *dev;
+
+	/*
+	 * FIXME: Check can't map aperture flag.
+	 */
+
+	if (type)
+		*type = VM_FAULT_MINOR;
+
+	if (!map) 
+		return NOPAGE_OOM;
+
+	if (address > vma->vm_end) 
+		return NOPAGE_SIGBUS;
+
+	ttm = (drm_ttm_t *) map->offset;	
+	dev = ttm->dev;
+	mutex_lock(&dev->struct_mutex);
+	drm_fixup_ttm_caching(ttm);
+	BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
+
+	bm = &dev->bm;
+	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
+	page = ttm->pages[page_offset];
+
+	if (!page) {
+		if (bm->cur_pages >= bm->max_pages) {
+	 		DRM_ERROR("Maximum locked page count exceeded\n"); 
+			page = NOPAGE_OOM;
+			goto out;
+		}
+		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
+		if (!page) {
+			page = NOPAGE_OOM;
+			goto out;
+		}
+		++bm->cur_pages;
+		SetPageLocked(page);
+	}
+
+	get_page(page);
+ out:
+	mutex_unlock(&dev->struct_mutex);
+	return page;
+}
+
+
+
+
+int drm_ttm_map_bound(struct vm_area_struct *vma)
+{
+	drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
+	drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+	int ret = 0;
+
+	if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
+		unsigned long pfn = ttm->aper_offset + 
+			(ttm->be->aperture_base >> PAGE_SHIFT);
+		pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+		
+		ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
+					 vma->vm_end - vma->vm_start,
+					 pgprot);
+	}
+	return ret;
+}
+	
+
+int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+{
+	p_mm_entry_t *entry, *n_entry;
+	vma_entry_t *v_entry;
+	drm_local_map_t *map = (drm_local_map_t *)
+		vma->vm_private_data;
+	struct mm_struct *mm = vma->vm_mm;
+
+	v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM);
+	if (!v_entry) {
+		DRM_ERROR("Allocation of vma pointer entry failed\n");
+		return -ENOMEM;
+	}
+	v_entry->vma = vma;
+	map->handle = (void *) v_entry;
+	list_add_tail(&v_entry->head, &ttm->vma_list);
+
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		if (mm == entry->mm) {
+			atomic_inc(&entry->refcount);
+			return 0;
+		} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
+	}
+
+	n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+	if (!n_entry) {
+		DRM_ERROR("Allocation of process mm pointer entry failed\n");
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&n_entry->head);
+	n_entry->mm = mm;
+	n_entry->locked = 0;
+	atomic_set(&n_entry->refcount, 0);
+	list_add_tail(&n_entry->head, &entry->head);
+
+	return 0;
+}
+
+void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+{
+	p_mm_entry_t *entry, *n;
+	vma_entry_t *v_entry, *v_n;
+	int found = 0;
+	struct mm_struct *mm = vma->vm_mm;
+
+	list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
+		if (v_entry->vma == vma) {
+			found = 1;
+			list_del(&v_entry->head);
+			drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
+			break;
+		}
+	}
+	BUG_ON(!found);
+
+	list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
+		if (mm == entry->mm) {
+			if (atomic_add_negative(-1, &entry->refcount)) {
+				list_del(&entry->head);
+				BUG_ON(entry->locked);
+				drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+			}
+			return;
+		}
+	}
+	BUG_ON(1);
+}
+
+
+
+int drm_ttm_lock_mm(drm_ttm_t * ttm)
+{
+	p_mm_entry_t *entry;
+	int lock_ok = 1;
+	
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		BUG_ON(entry->locked);
+		if (!down_write_trylock(&entry->mm->mmap_sem)) {
+			lock_ok = 0;
+			break;
+		}
+		entry->locked = 1;
+	}
+
+	if (lock_ok)
+		return 0;
+
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		if (!entry->locked) 
+			break;
+		up_write(&entry->mm->mmap_sem);
+		entry->locked = 0;
+	}
+
+	/*
+	 * Possible deadlock. Try again. Our callers should handle this
+	 * and restart.
+	 */
+
+	return -EAGAIN;
+}
+
+void drm_ttm_unlock_mm(drm_ttm_t * ttm)
+{
+	p_mm_entry_t *entry;
+	
+	list_for_each_entry(entry, &ttm->p_mm_list, head) {
+		BUG_ON(!entry->locked);
+		up_write(&entry->mm->mmap_sem);
+		entry->locked = 0;
+	}
+}
+
+int drm_ttm_remap_bound(drm_ttm_t *ttm) 
+{
+	vma_entry_t *v_entry;
+	int ret = 0;
+	
+	list_for_each_entry(v_entry, &ttm->vma_list, head) {
+		ret = drm_ttm_map_bound(v_entry->vma);
+		if (ret)
+			break;
+	}
+
+	drm_ttm_unlock_mm(ttm);
+	return ret;
+}
+
+void drm_ttm_finish_unmap(drm_ttm_t *ttm)
+{
+	vma_entry_t *v_entry;
+	
+	if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
+		return;
+
+	list_for_each_entry(v_entry, &ttm->vma_list, head) {
+		v_entry->vma->vm_flags &= ~VM_PFNMAP; 
+	}
+	drm_ttm_unlock_mm(ttm);
+}	
+
+#endif
+
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 4e95679..5617fb7 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -231,6 +231,13 @@ static inline int remap_pfn_range(struct
 #include <linux/mm.h>
 #include <asm/page.h>
 
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) && \
+     (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) 
+#define DRM_ODD_MM_COMPAT
+#endif
+
+
+
 /*
  * Flush relevant caches and clear a VMA structure so that page references 
  * will cause a page fault. Don't flush tlbs.
@@ -304,4 +311,65 @@ extern struct page *drm_vm_ttm_fault(str
 				     struct fault_data *data);
 
 #endif
+
+#ifdef DRM_ODD_MM_COMPAT
+
+struct drm_ttm;
+
+
+/*
+ * Add a vma to the ttm vma list, and the 
+ * process mm pointer to the ttm mm list. Needs the ttm mutex.
+ */
+
+extern int drm_ttm_add_vma(struct drm_ttm * ttm, 
+			   struct vm_area_struct *vma);
+/*
+ * Delete a vma and the corresponding mm pointer from the
+ * ttm lists. Needs the ttm mutex.
+ */
+extern void drm_ttm_delete_vma(struct drm_ttm * ttm, 
+			       struct vm_area_struct *vma);
+
+/*
+ * Attempts to lock all relevant mmap_sems for a ttm, while
+ * not releasing the ttm mutex. May return -EAGAIN to avoid 
+ * deadlocks. In that case the caller shall release the ttm mutex,
+ * schedule() and try again.
+ */
+
+extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
+
+/*
+ * Unlock all relevant mmap_sems for a ttm.
+ */
+extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
+
+/*
+ * If the ttm was bound to the aperture, this function shall be called
+ * with all relevant mmap sems held. It deletes the flag VM_PFNMAP from all
+ * vmas mapping this ttm. This is needed just after unmapping the ptes of
+ * the vma, otherwise the do_nopage() function will bug :(. The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
+
+/*
+ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot 
+ * fault these pfns in, because the first one will set the vma VM_PFNMAP
+ * flag, which will make the next fault bug in do_nopage(). The function
+ * releases the mmap_sems for this ttm.
+ */
+
+extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
+
+
+/*
+ * Remap a vma for a bound ttm. Call with the ttm mutex held and
+ * the relevant mmap_sem locked.
+ */
+extern int drm_ttm_map_bound(struct vm_area_struct *vma);
+
+#endif
 #endif
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 297d4f7..b56270e 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -66,8 +66,17 @@ static int unmap_vma_pages(drm_ttm_t * t
 	drm_device_t *dev = ttm->dev;
 	loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
 	loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
-	
+
+#ifdef DRM_ODD_MM_COMPAT
+	int ret;
+	ret = drm_ttm_lock_mm(ttm);
+	if (ret)
+		return ret;
+#endif
 	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
+#ifdef DRM_ODD_MM_COMPAT
+	drm_ttm_finish_unmap(ttm);
+#endif
 	return 0;
 }
 
@@ -128,8 +137,11 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
 
 	DRM_DEBUG("Destroying a ttm\n");
 
+#ifdef DRM_TTM_ODD_COMPAT
+	BUG_ON(!list_empty(&ttm->vma_list));
+	BUG_ON(!list_empty(&ttm->p_mm_list));
+#endif
 	be = ttm->be;
-
 	if (be) {
 		be->destroy(be);
 		ttm->be = NULL;
@@ -231,6 +243,11 @@ static drm_ttm_t *drm_init_ttm(struct dr
 	if (!ttm)
 		return NULL;
 
+#ifdef DRM_ODD_MM_COMPAT
+	INIT_LIST_HEAD(&ttm->p_mm_list);
+	INIT_LIST_HEAD(&ttm->vma_list);
+#endif
+
 	ttm->dev = dev;
 	atomic_set(&ttm->vma_count, 0);
 
@@ -263,11 +280,15 @@ static drm_ttm_t *drm_init_ttm(struct dr
 int drm_evict_ttm(drm_ttm_t * ttm)
 {
 	drm_ttm_backend_t *be = ttm->be;
+	int ret;
 
 	switch (ttm->state) {
 	case ttm_bound:
 		if (be->needs_cache_adjust(be)) {
-			unmap_vma_pages(ttm);
+			ret = unmap_vma_pages(ttm);
+			if (ret) {
+				return ret;
+			}
 		}
 		be->unbind(be);
 		break;
@@ -291,12 +312,18 @@ void drm_fixup_ttm_caching(drm_ttm_t * t
 }
 		
 
-void drm_unbind_ttm(drm_ttm_t * ttm)
+int drm_unbind_ttm(drm_ttm_t * ttm)
 {
+	int ret = 0;
+
 	if (ttm->state == ttm_bound) 
-		drm_evict_ttm(ttm);
+		ret = drm_evict_ttm(ttm);
+
+	if (ret)
+		return ret;
 
 	drm_fixup_ttm_caching(ttm);
+	return 0;
 }
 
 int drm_bind_ttm(drm_ttm_t * ttm,
@@ -313,20 +340,45 @@ int drm_bind_ttm(drm_ttm_t * ttm,
 
 	be = ttm->be;
 	
-	drm_ttm_populate(ttm);
+	ret = drm_ttm_populate(ttm);
+	if (ret)
+		return ret;
 	if (ttm->state == ttm_unbound && be->needs_cache_adjust(be)) {
-		unmap_vma_pages(ttm);
+		ret = unmap_vma_pages(ttm);
+		if (ret)
+			return ret;
+
 		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
-	} 
+	}
+#ifdef DRM_ODD_MM_COMPAT 
+	else if (ttm->state == ttm_evicted && be->needs_cache_adjust(be)) {
+		ret = drm_ttm_lock_mm(ttm);
+		if (ret)
+			return ret;
+	}
+#endif
 	if ((ret = be->bind(be, aper_offset))) {
-		drm_unbind_ttm(ttm);
+		ttm->state = ttm_evicted;
+#ifdef DRM_ODD_MM_COMPAT
+		if (be->needs_cache_adjust(be))
+			drm_ttm_unlock_mm(ttm);
+#endif
 		DRM_ERROR("Couldn't bind backend.\n");
 		return ret;
 	}
 
+			
 	ttm->aper_offset = aper_offset;
 	ttm->state = ttm_bound;
 
+#ifdef DRM_ODD_MM_COMPAT
+	if (be->needs_cache_adjust(be)) {
+		ret = drm_ttm_remap_bound(ttm);
+		if (ret) 
+			return ret;
+	}
+#endif
+			
 	return 0;
 }
 
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
index 19c1df5..5421c52 100644
--- a/linux-core/drm_ttm.h
+++ b/linux-core/drm_ttm.h
@@ -74,6 +74,11 @@ typedef struct drm_ttm {
 		ttm_unbound,
 		ttm_unpopulated,
 	} state;
+#ifdef DRM_ODD_MM_COMPAT
+       struct list_head vma_list;
+       struct list_head p_mm_list;
+#endif
+
 } drm_ttm_t;
 
 typedef struct drm_ttm_object {
@@ -95,7 +100,7 @@ extern drm_ttm_object_t *drm_lookup_ttm_
 extern int drm_bind_ttm(drm_ttm_t * ttm,
 			unsigned long aper_offset);
 
-extern void drm_unbind_ttm(drm_ttm_t * ttm);
+extern int drm_unbind_ttm(drm_ttm_t * ttm);
 
 /*
  * Evict a ttm region. Keeps Aperture caching policy.
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 4595115..091b43f 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -204,15 +204,15 @@ struct page *drm_vm_ttm_fault(struct vm_
 	if (!page) {
 		if (bm->cur_pages >= bm->max_pages) {
 	 		DRM_ERROR("Maximum locked page count exceeded\n"); 
-			page = NOPAGE_OOM;
+			data->type = VM_FAULT_OOM;
 			goto out;
 		}
-		++bm->cur_pages;
 		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
 		if (!page) {
 			data->type = VM_FAULT_OOM;
 			goto out;
 		}
+		++bm->cur_pages;
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
 		SetPageLocked(page);
 #else
@@ -236,28 +236,6 @@ struct page *drm_vm_ttm_fault(struct vm_
 	
 	err = vm_insert_pfn(vma, address, pfn, pgprot);
 
-	if (!err && (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && 
-	    ttm->num_pages > 1) {
-
-		/*
-		 * FIXME: Check can't map aperture flag.
-		 */
-
-		/*
-		 * Since we're not racing with anybody else, 
-		 * we might as well populate the whole object space.
-		 * Note that we're touching vma->vm_flags with this
-		 * operation, but we are not changing them, so we should be 
-		 * OK.
-		 */
-
-		BUG_ON(ttm->state == ttm_unpopulated);
-		err = io_remap_pfn_range(vma, address + PAGE_SIZE, pfn+1,
-					 (ttm->num_pages - 1) * PAGE_SIZE,
-					 pgprot);
-	}
-		
-
 	if (!err || err == -EBUSY) 
 		data->type = VM_FAULT_MINOR; 
 	else
@@ -611,6 +589,9 @@ static int drm_vm_ttm_open(struct vm_are
 	mutex_lock(&dev->struct_mutex);
 	ttm = (drm_ttm_t *) map->offset;
 	atomic_inc(&ttm->vma_count);
+#ifdef DRM_ODD_MM_COMPAT
+	drm_ttm_add_vma(ttm, vma);
+#endif
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
 }
@@ -666,6 +647,9 @@ static void drm_vm_ttm_close(struct vm_a
 		ttm = (drm_ttm_t *) map->offset;
 		dev = ttm->dev;
 		mutex_lock(&dev->struct_mutex);
+#ifdef DRM_ODD_MM_COMPAT
+		drm_ttm_delete_vma(ttm, vma);
+#endif
 		if (atomic_dec_and_test(&ttm->vma_count)) {
 			if (ttm->destroy) {
 				ret = drm_destroy_ttm(ttm);
@@ -877,6 +861,11 @@ int drm_mmap(struct file *filp, struct v
 		vma->vm_private_data = (void *) map;
 		vma->vm_file = filp;
 		vma->vm_flags |= VM_RESERVED | VM_IO;
+#ifdef DRM_ODD_MM_COMPAT
+		mutex_lock(&dev->struct_mutex);
+		drm_ttm_map_bound(vma);
+		mutex_unlock(&dev->struct_mutex);
+#endif		
 		if (drm_vm_ttm_open(vma))
 		        return -EAGAIN;
 		return 0;
diff-tree f2db76e2f206d2017f710eaddc4b33add4498898 (from c58574c60505a699e19e1ed59e1b441be2594e53)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date:   Wed Oct 11 13:40:35 2006 +0200

    Big update:
    Adapt for new functions in the 2.6.19 kernel.
    Remove the ability to have multiple regions in one TTM.
       This simplifies a lot of code.
    Remove the ability to access TTMs from user space.
       We don't need it anymore without ttm regions.
    Don't change caching policy for evicted buffers. Instead change it only
       when the buffer is accessed by the CPU (on the first page fault).
       This tremendously speeds up eviction rates.
    Current code is safe for kernels <= 2.6.14.
    Should also be OK with 2.6.19 and above.

diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index bce913d..a083ca2 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2462,74 +2462,6 @@ int drmFenceWait(int fd, unsigned flags,
     return 0;
 }    
 
-int drmTTMCreate(int fd, drmTTM *ttm, unsigned long size, unsigned flags)
-{
-    drm_ttm_arg_t arg;
-
-    memset(&arg, 0, sizeof(arg));
-    arg.op = drm_ttm_create;
-    arg.flags = flags;
-    arg.size = size;
-
-    if (ioctl(fd, DRM_IOCTL_TTM, &arg))
-	return -errno;
-    
-    ttm->handle = arg.handle;
-    ttm->user_token = (drm_handle_t) arg.user_token;
-    ttm->flags = arg.flags;
-    ttm->size = arg.size;
-    ttm->virtual = NULL;
-    ttm->mapCount = 0;
-    return 0;
-}
-
-int drmTTMDestroy(int fd, const drmTTM *ttm)
-{
-    drm_ttm_arg_t arg;
-
-    memset(&arg, 0, sizeof(arg));
-    arg.op = drm_ttm_destroy;
-    arg.handle = ttm->handle;
-    if (ioctl(fd, DRM_IOCTL_TTM, &arg))
-	return -errno;
-    return 0;
-}
-
-
-int drmTTMReference(int fd, unsigned handle, drmTTM *ttm)
-{
-    drm_ttm_arg_t arg;
-
-    memset(&arg, 0, sizeof(arg));
-    arg.handle = handle;
-    arg.op = drm_ttm_reference;
-    if (ioctl(fd, DRM_IOCTL_TTM, &arg))
-	return -errno;
-    ttm->handle = arg.handle;
-    ttm->user_token = (drm_handle_t) arg.user_token;
-    ttm->flags = arg.flags;
-    ttm->size = arg.size;
-    return 0;
-}
-
-int drmTTMUnreference(int fd, const drmTTM *ttm)
-{
-    drm_ttm_arg_t arg;
-
-    memset(&arg, 0, sizeof(arg));
-    arg.op = drm_ttm_destroy;
-    arg.handle = ttm->handle;
-    if (ioctl(fd, DRM_IOCTL_TTM, &arg))
-	return -errno;
-    return 0;
-}
-
-drm_handle_t drmTTMMapHandle(int fd, const drmTTM *ttm)
-{
-    (void) fd;
-    return ttm->user_token;
-}
-
 static int drmAdjustListNodes(drmBOList *list)
 {
     drmBONode *node;
@@ -2685,7 +2617,7 @@ static void drmBOCopyReply(const drm_bo_
     
     
 
-int drmBOCreate(int fd, drmTTM *ttm, unsigned long start, unsigned long size,
+int drmBOCreate(int fd, void *ttm, unsigned long start, unsigned long size,
 		void *user_buffer, drm_bo_type_t type, unsigned mask,
 		unsigned hint, drmBO *buf)
 {
@@ -2700,15 +2632,9 @@ int drmBOCreate(int fd, drmTTM *ttm, uns
     req->size = size;
     req->type = type;
 
-    buf->ttm = NULL;
     buf->virtual = NULL;
 
     switch(type) {
-    case drm_bo_type_ttm:
-	req->arg_handle = ttm->handle;
-	req->buffer_start = start;
-	buf->ttm = ttm;
-	break;
     case drm_bo_type_dc:
         req->buffer_start = start;
 	break;
@@ -2727,10 +2653,10 @@ int drmBOCreate(int fd, drmTTM *ttm, uns
     if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
 	return -errno;
     if (!arg.handled) {
-      fprintf(stderr, "Not handled\n");
 	return -EFAULT;
     }
     if (rep->ret) {
+        fprintf(stderr, "Error %d\n", rep->ret);
 	return rep->ret;
     }
     
@@ -2853,8 +2779,10 @@ int drmBOMap(int fd, drmBO *buf, unsigne
 	virtual = mmap(0, buf->size + buf->start, 
 		       PROT_READ | PROT_WRITE, MAP_SHARED,
 		       fd, buf->mapHandle);
-	if (virtual == MAP_FAILED)
+	if (virtual == MAP_FAILED) {
 	    ret = -errno;
+	    fprintf(stderr, "Map error 0x%016llx\n", buf->mapHandle);
+	}
 	if (ret) 
 	    return ret;
 	buf->mapVirtual = virtual;
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index c0e4f1b..78df37c 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -101,15 +101,6 @@ typedef struct _drmFence{
         unsigned signaled;
 } drmFence;
 
-typedef struct _drmTTM{
-        unsigned handle;
-        drm_u64_t user_token;
-        unsigned flags;
-        unsigned long size;
-        void *virtual;
-        int mapCount;
-} drmTTM;
-
 typedef struct _drmBO{
     drm_bo_type_t type;
     unsigned handle;
@@ -125,7 +116,6 @@ typedef struct _drmBO{
     void *virtual;
     void *mapVirtual;
     int mapCount;
-    drmTTM *ttm;
 } drmBO;
 
 
@@ -164,17 +154,6 @@ extern int           drmFenceBuffers(int
 
 
 /*
- * TTM functions.
- */
-
-extern int drmTTMCreate(int fd, drmTTM *ttm, unsigned long size, 
-			unsigned flags);
-extern int drmTTMDestroy(int fd, const drmTTM *ttm);
-extern int drmTTMReference(int fd, unsigned handle, drmTTM *ttm);
-extern int drmTTMUnreference(int fd, const drmTTM *ttm);
-extern drm_handle_t drmTTMMapHandle(int fd, const drmTTM *ttm);
-
-/*
  * Buffer object list functions.
  */
 
@@ -189,7 +168,7 @@ extern int drmBOCreateList(int numTarget
  * Buffer object functions.
  */
 
-extern int drmBOCreate(int fd, drmTTM *ttm, unsigned long start, unsigned long size,
+extern int drmBOCreate(int fd, void *ttm, unsigned long start, unsigned long size,
 			      void *user_buffer, drm_bo_type_t type, unsigned mask,
 		unsigned hint, drmBO *buf);
 extern int drmBODestroy(int fd, drmBO *buf);
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index bc57bd5..1b6d94e 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -1012,7 +1012,7 @@ typedef struct drm_buffer_object{
 
 	atomic_t usage;
 	drm_ttm_object_t *ttm_object;
-	drm_ttm_backend_list_t *ttm_region;
+        drm_ttm_t *ttm;
 	unsigned long num_pages;
         unsigned long buffer_start;
         drm_bo_type_t type;
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 2dd8016..77994d5 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -683,6 +683,7 @@ drm_ttm_backend_t *drm_agp_init_ttm_unca
 	agp_be->unbind = drm_agp_unbind_ttm;
 	agp_be->destroy = drm_agp_destroy_ttm;
 	agp_be->needs_free = (backend == NULL);
+	agp_be->drm_map_type = _DRM_AGP;
 	return agp_be;
 }
 EXPORT_SYMBOL(drm_agp_init_ttm_uncached);
@@ -720,6 +721,7 @@ drm_ttm_backend_t *drm_agp_init_ttm_cach
 	agp_be->unbind = drm_agp_unbind_ttm;
 	agp_be->destroy = drm_agp_destroy_ttm;
 	agp_be->needs_free = (backend == NULL);
+	agp_be->drm_map_type = _DRM_AGP;
 	return agp_be;
 }
 EXPORT_SYMBOL(drm_agp_init_ttm_cached);
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index d1989e4..d8cab2a 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -63,7 +63,7 @@
  * bo locked.
  */
 
-static int drm_move_tt_to_local(drm_buffer_object_t * buf)
+static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict)
 {
 	drm_device_t *dev = buf->dev;
 	drm_buffer_manager_t *bm = &dev->bm;
@@ -71,7 +71,10 @@ static int drm_move_tt_to_local(drm_buff
 	BUG_ON(!buf->tt);
 
 	mutex_lock(&dev->struct_mutex);
-	drm_unbind_ttm_region(buf->ttm_region);
+	if (evict)
+		drm_evict_ttm(buf->ttm);
+	else
+		drm_unbind_ttm(buf->ttm);
 	drm_mm_put_block(&bm->tt_manager, buf->tt);
 	buf->tt = NULL;
 
@@ -129,7 +132,7 @@ static void drm_bo_destroy_locked(drm_de
 		 * This temporarily unlocks struct_mutex. 
 		 */
 
-		drm_unbind_ttm_region(bo->ttm_region);
+		drm_unbind_ttm(bo->ttm);
 		drm_mm_put_block(&bm->tt_manager, bo->tt);
 		bo->tt = NULL;
 	}
@@ -137,9 +140,6 @@ static void drm_bo_destroy_locked(drm_de
 		drm_mm_put_block(&bm->vram_manager, bo->vram);
 		bo->vram = NULL;
 	}
-	if (bo->ttm_region) {
-		drm_destroy_ttm_region(bo->ttm_region);
-	}
 	if (bo->ttm_object) {
 		drm_ttm_object_deref_locked(dev, bo->ttm_object);
 	}
@@ -428,7 +428,7 @@ static int drm_bo_evict(drm_buffer_objec
 	}
 
 	if (tt) {
-		ret = drm_move_tt_to_local(bo);
+		ret = drm_move_tt_to_local(bo, 1);
 	}
 #if 0
 	else {
@@ -522,7 +522,7 @@ static int drm_move_local_to_tt(drm_buff
 		return ret;
 	DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->tt->start);
 	mutex_lock(&dev->struct_mutex);
-	ret = drm_bind_ttm_region(bo->ttm_region, bo->tt->start);
+	ret = drm_bind_ttm(bo->ttm, bo->tt->start);
 	if (ret) {
 		drm_mm_put_block(&bm->tt_manager, bo->tt);
 	}
@@ -530,7 +530,7 @@ static int drm_move_local_to_tt(drm_buff
 	if (ret)
 		return ret;
 
-	be = bo->ttm_region->be;
+	be = bo->ttm->be;
 	if (be->needs_cache_adjust(be))
 		bo->flags &= ~DRM_BO_FLAG_CACHED;
 	bo->flags &= ~DRM_BO_MASK_MEM;
@@ -1023,7 +1023,7 @@ static int drm_bo_move_buffer(drm_buffer
 		if (ret)
 			return ret;
 	} else {
-		drm_move_tt_to_local(bo);
+		drm_move_tt_to_local(bo, 0);
 	}
 
 	return 0;
@@ -1203,34 +1203,24 @@ static int drm_bo_handle_wait(drm_file_t
  * Call bo->mutex locked.
  */
 
-static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo,
-			  uint32_t ttm_handle)
+static int drm_bo_add_ttm(drm_file_t * priv, drm_buffer_object_t * bo)
 {
 	drm_device_t *dev = bo->dev;
 	drm_ttm_object_t *to = NULL;
-	drm_ttm_t *ttm;
 	int ret = 0;
 	uint32_t ttm_flags = 0;
 
 	bo->ttm_object = NULL;
-	bo->ttm_region = NULL;
+	bo->ttm = NULL;
 
 	switch (bo->type) {
 	case drm_bo_type_dc:
 		mutex_lock(&dev->struct_mutex);
 		ret = drm_ttm_object_create(dev, bo->num_pages * PAGE_SIZE,
+					    bo->mask & DRM_BO_FLAG_BIND_CACHED,
 					    ttm_flags, &to);
 		mutex_unlock(&dev->struct_mutex);
 		break;
-	case drm_bo_type_ttm:
-		mutex_lock(&dev->struct_mutex);
-		to = drm_lookup_ttm_object(priv, ttm_handle, 1);
-		mutex_unlock(&dev->struct_mutex);
-		if (!to) {
-			DRM_ERROR("Could not find TTM object\n");
-			ret = -EINVAL;
-		}
-		break;
 	case drm_bo_type_user:
 	case drm_bo_type_fake:
 		break;
@@ -1246,14 +1236,7 @@ static int drm_bo_add_ttm(drm_file_t * p
 
 	if (to) {
 		bo->ttm_object = to;
-		ttm = drm_ttm_from_object(to);
-		ret = drm_create_ttm_region(ttm, bo->buffer_start >> PAGE_SHIFT,
-					    bo->num_pages,
-					    bo->mask & DRM_BO_FLAG_BIND_CACHED,
-					    &bo->ttm_region);
-		if (ret) {
-			drm_ttm_object_deref_unlocked(dev, to);
-		}
+		bo->ttm = drm_ttm_from_object(to);
 	}
 	return ret;
 }
@@ -1261,7 +1244,6 @@ static int drm_bo_add_ttm(drm_file_t * p
 int drm_buffer_object_create(drm_file_t * priv,
 			     unsigned long size,
 			     drm_bo_type_t type,
-			     uint32_t ttm_handle,
 			     uint32_t mask,
 			     uint32_t hint,
 			     unsigned long buffer_start,
@@ -1318,7 +1300,7 @@ int drm_buffer_object_create(drm_file_t 
 			       1, &new_flags, &bo->mask);
 	if (ret)
 		goto out_err;
-	ret = drm_bo_add_ttm(priv, bo, ttm_handle);
+	ret = drm_bo_add_ttm(priv, bo);
 	if (ret)
 		goto out_err;
 
@@ -1394,7 +1376,6 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
 			rep.ret =
 			    drm_buffer_object_create(priv, req->size,
 						     req->type,
-						     req->arg_handle,
 						     req->mask,
 						     req->hint,
 						     req->buffer_start, &entry);
@@ -1659,7 +1640,7 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
 		if (arg.req.tt_p_size) {
 			ret = drm_mm_init(&bm->tt_manager,
 					  arg.req.tt_p_offset,
-					  arg.req.tt_p_size);
+					  3000 /* arg.req.tt_p_size */);
 			bm->has_tt = 1;
 			bm->use_tt = 1;
 
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 2b449e9..1aa835c 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -63,8 +63,10 @@ pgprot_t vm_get_page_prot(unsigned long 
 #endif
 };
 
-int drm_pte_is_clear(struct vm_area_struct *vma,
-		     unsigned long addr)
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+
+static int drm_pte_is_clear(struct vm_area_struct *vma,
+			    unsigned long addr)
 {
 	struct mm_struct *mm = vma->vm_mm;
 	int ret = 1;
@@ -77,7 +79,7 @@ int drm_pte_is_clear(struct vm_area_stru
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
 	spin_lock(&mm->page_table_lock);
 #else
-	spinlock_t ptl;
+	spinlock_t *ptl;
 #endif
 	
 	pgd = pgd_offset(mm, addr);
@@ -92,7 +94,7 @@ int drm_pte_is_clear(struct vm_area_stru
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
 	pte = pte_offset_map(pmd, addr);
 #else 
-	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 
 #endif
 	if (!pte)
 		goto unlock;
@@ -108,6 +110,17 @@ int drm_pte_is_clear(struct vm_area_stru
 	return ret;
 }
 	
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
+		  unsigned long pfn, pgprot_t pgprot)
+{
+	int ret;
+	if (!drm_pte_is_clear(vma, addr))
+		return -EBUSY;
+
+	ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, pgprot);
+	return ret;
+}
+
 
 static struct {
 	spinlock_t lock;
@@ -141,3 +154,32 @@ void free_nopage_retry(void)
 		spin_unlock(&drm_np_retry.lock);
 	}
 }
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+
+struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+			       unsigned long address, 
+			       int *type)
+{
+	struct fault_data data;
+
+	if (type)
+		*type = VM_FAULT_MINOR;
+
+	data.address = address;
+	data.vma = vma;
+	drm_vm_ttm_fault(vma, &data);
+	switch (data.type) {
+	case VM_FAULT_OOM:
+		return NOPAGE_OOM;
+	case VM_FAULT_SIGBUS:
+		return NOPAGE_SIGBUS;
+	default:
+		break;
+	}
+
+	return NOPAGE_REFAULT;
+}
+
+#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 784b9a7..4e95679 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -278,19 +278,30 @@ extern int drm_map_page_into_agp(struct 
  * static space. The page will be put by do_nopage() since we've already
  * filled out the pte.
  */
-extern struct page * get_nopage_retry(void);
+
+struct fault_data {
+	struct vm_area_struct *vma;
+	unsigned long address;
+	pgoff_t pgoff;
+	unsigned int flags;
+	
+	int type;
+};
+
+extern struct page *get_nopage_retry(void);
 extern void free_nopage_retry(void);
 
-#define NOPAGE_RETRY get_nopage_retry()
+#define NOPAGE_REFAULT get_nopage_retry()
 
-#endif
+extern int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 
+			 unsigned long pfn, pgprot_t pgprot);
 
-/*
- * Is the PTE for this address really clear so that we can use 
- * io_remap_pfn_range?
- */
+extern struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+				      unsigned long address, 
+				      int *type);
 
-int drm_pte_is_clear(struct vm_area_struct *vma,
-		     unsigned long addr);
+extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, 
+				     struct fault_data *data);
 
 #endif
+#endif
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 1122836..c7f0f48 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -120,7 +120,6 @@ static drm_ioctl_desc_t drm_ioctls[] = {
 
 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
 	[DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
-	[DRM_IOCTL_NR(DRM_IOCTL_TTM)] = {drm_ttm_ioctl, DRM_AUTH},
 	[DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
 	[DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, 
 					     DRM_AUTH },
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 51e28ac..297d4f7 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -27,20 +27,6 @@
  **************************************************************************/
 
 #include "drmP.h"
-#include <asm/tlbflush.h>
-
-typedef struct p_mm_entry {
-	struct list_head head;
-	struct mm_struct *mm;
-	atomic_t refcount;
-} p_mm_entry_t;
-
-typedef struct drm_val_action {
-	int needs_rx_flush;
-	int evicted_tt;
-	int evicted_vram;
-	int validated;
-} drm_val_action_t;
 
 /*
  * Use kmalloc if possible. Otherwise fall back to vmalloc.
@@ -75,20 +61,52 @@ static void ttm_free(void *pointer, unsi
  * Unmap all vma pages from vmas mapping this ttm.
  */
 
-static int unmap_vma_pages(drm_ttm_t * ttm, unsigned long page_offset,
-			   unsigned long num_pages)
+static int unmap_vma_pages(drm_ttm_t * ttm)
 {
 	drm_device_t *dev = ttm->dev;
-	loff_t offset = ((loff_t) ttm->mapping_offset + page_offset) 
-		<< PAGE_SHIFT;
-	loff_t holelen = num_pages << PAGE_SHIFT;
+	loff_t offset = ((loff_t) ttm->mapping_offset) << PAGE_SHIFT;
+	loff_t holelen = ((loff_t) ttm->num_pages) << PAGE_SHIFT;
 	
-
 	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
 	return 0;
 }
 
 /*
+ * Change caching policy for the linear kernel map 
+ * for range of pages in a ttm.
+ */
+
+static int drm_set_caching(drm_ttm_t * ttm, int noncached)
+{
+	int i;
+	struct page **cur_page;
+	int do_tlbflush = 0;
+
+	if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
+		return 0;
+
+	for (i = 0; i < ttm->num_pages; ++i) {
+		cur_page = ttm->pages + i;
+		if (*cur_page) {
+			if (!PageHighMem(*cur_page)) {
+				if (noncached) {
+					map_page_into_agp(*cur_page);
+				} else {
+					unmap_page_from_agp(*cur_page);
+				}
+				do_tlbflush = 1;
+			}
+		}
+	}
+	if (do_tlbflush)
+		flush_agp_mappings();
+
+	DRM_MASK_VAL(ttm->page_flags, DRM_TTM_PAGE_UNCACHED, noncached);
+
+	return 0;
+}
+
+/*
  * Free all resources associated with a ttm.
  */
 
@@ -96,8 +114,8 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
 {
 
 	int i;
-	struct list_head *list, *next;
 	struct page **cur_page;
+	drm_ttm_backend_t *be;
 
 	if (!ttm)
 		return 0;
@@ -110,30 +128,26 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
 
 	DRM_DEBUG("Destroying a ttm\n");
 
-	if (ttm->be_list) {
-		list_for_each_safe(list, next, &ttm->be_list->head) {
-			drm_ttm_backend_list_t *entry =
-			    list_entry(list, drm_ttm_backend_list_t, head);
-			drm_destroy_ttm_region(entry);
-		}
+	be = ttm->be;
 
-		drm_free(ttm->be_list, sizeof(*ttm->be_list), DRM_MEM_TTM);
-		ttm->be_list = NULL;
+	if (be) {
+		be->destroy(be);
+		ttm->be = NULL;
 	}
 
 	if (ttm->pages) {
 		drm_buffer_manager_t *bm = &ttm->dev->bm;
-		int do_tlbflush = 0;
+		if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) 
+			drm_set_caching(ttm, 0);
+
 		for (i = 0; i < ttm->num_pages; ++i) {
 			cur_page = ttm->pages + i;
-			if (ttm->page_flags &&
-			    (ttm->page_flags[i] & DRM_TTM_PAGE_UNCACHED) &&
-			    *cur_page && !PageHighMem(*cur_page)) {
-				unmap_page_from_agp(*cur_page);
-				do_tlbflush = 1;
-			}
 			if (*cur_page) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
 				unlock_page(*cur_page);
+#else
+				ClearPageReserved(*cur_page);
+#endif
 				if (page_count(*cur_page) != 1) {
 					DRM_ERROR("Erroneous page count. "
 						  "Leaking pages.\n");
@@ -151,47 +165,66 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
 				--bm->cur_pages;
 			}
 		}
-		if (do_tlbflush)
-			flush_agp_mappings();
 		ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
 			 DRM_MEM_TTM);
 		ttm->pages = NULL;
 	}
 
-	if (ttm->page_flags) {
-		ttm_free(ttm->page_flags, ttm->num_pages*sizeof(*ttm->page_flags), 
-			 DRM_MEM_TTM);
-		ttm->page_flags = NULL;
-	}
-
-	if (ttm->vma_list) {
-		list_for_each_safe(list, next, &ttm->vma_list->head) {
-			drm_ttm_vma_list_t *entry =
-			    list_entry(list, drm_ttm_vma_list_t, head);
-			list_del(list);
-			entry->vma->vm_private_data = NULL;
-			drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
-		}
-		drm_free(ttm->vma_list, sizeof(*ttm->vma_list), DRM_MEM_TTM);
-		ttm->vma_list = NULL;
-	}
-
 	drm_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
 
 	return 0;
 }
 
+static int drm_ttm_populate(drm_ttm_t *ttm)
+{
+	struct page *page;
+	unsigned long i;
+	drm_buffer_manager_t *bm;
+	drm_ttm_backend_t *be;
+
+
+	if (ttm->state != ttm_unpopulated) 
+		return 0;
+	
+	bm = &ttm->dev->bm;
+	be = ttm->be;
+	for (i=0; i<ttm->num_pages; ++i) {
+		page = ttm->pages[i];
+		if (!page) {
+			if (bm->cur_pages >= bm->max_pages) {
+				DRM_ERROR("Maximum locked page count exceeded\n");
+				return -ENOMEM;
+			}
+			page = drm_alloc_gatt_pages(0);
+			if (!page) 
+				return -ENOMEM;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+			SetPageLocked(page);
+#else
+			SetPageReserved(page);
+#endif
+			ttm->pages[i] = page;
+			++bm->cur_pages;
+		}
+	}
+	be->populate(be, ttm->num_pages, ttm->pages);
+	ttm->state = ttm_unbound;
+	return 0;
+}		
+	       
+
+
 /*
  * Initialize a ttm.
- * FIXME: Avoid using vmalloc for the page- and page_flags tables?
  */
 
-static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size)
+static drm_ttm_t *drm_init_ttm(struct drm_device *dev, unsigned long size,
+			       int cached)
 {
-
+	drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
 	drm_ttm_t *ttm;
 
-	if (!dev->driver->bo_driver)
+	if (!bo_driver)
 		return NULL;
 
 	ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
@@ -199,21 +232,12 @@ static drm_ttm_t *drm_init_ttm(struct dr
 		return NULL;
 
 	ttm->dev = dev;
-	ttm->lhandle = 0;
 	atomic_set(&ttm->vma_count, 0);
 
 	ttm->destroy = 0;
 	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-	ttm->page_flags = ttm_alloc(ttm->num_pages * sizeof(*ttm->page_flags),
-				    DRM_MEM_TTM);
-	if (!ttm->page_flags) {
-		drm_destroy_ttm(ttm);
-		DRM_ERROR("Failed allocating page_flags table\n");
-		return NULL;
-	}
-	memset(ttm->page_flags, 0, ttm->num_pages * sizeof(*ttm->page_flags));
-
+	ttm->page_flags = 0;
 	ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages), 
 			       DRM_MEM_TTM);
 	if (!ttm->pages) {
@@ -222,382 +246,86 @@ static drm_ttm_t *drm_init_ttm(struct dr
 		return NULL;
 	}
 	memset(ttm->pages, 0, ttm->num_pages * sizeof(*ttm->pages));
-
-	ttm->be_list = drm_calloc(1, sizeof(*ttm->be_list), DRM_MEM_TTM);
-	if (!ttm->be_list) {
-		DRM_ERROR("Alloc be regions failed\n");
-		drm_destroy_ttm(ttm);
-		return NULL;
-	}
-
-	INIT_LIST_HEAD(&ttm->be_list->head);
-	INIT_LIST_HEAD(&ttm->p_mm_list);
-	atomic_set(&ttm->shared_count, 0);
-	ttm->mm_list_seq = 0;
-
-	ttm->vma_list = drm_calloc(1, sizeof(*ttm->vma_list), DRM_MEM_TTM);
-	if (!ttm->vma_list) {
-		DRM_ERROR("Alloc vma list failed\n");
+	ttm->be = bo_driver->create_ttm_backend_entry(dev, cached);
+	if (!ttm->be) {
 		drm_destroy_ttm(ttm);
+		DRM_ERROR("Failed creating ttm backend entry\n");
 		return NULL;
 	}
-
-	INIT_LIST_HEAD(&ttm->vma_list->head);
-
-	ttm->lhandle = (unsigned long)ttm;
-
+	ttm->state = ttm_unpopulated;
 	return ttm;
 }
 
 /*
- * Change caching policy for the linear kernel map 
- * for range of pages in a ttm.
- */
-
-static int drm_set_caching(drm_ttm_t * ttm, unsigned long page_offset,
-			   unsigned long num_pages, int noncached)
-{
-	int i, cur;
-	struct page **cur_page;
-	int do_tlbflush = 0;
-
-	for (i = 0; i < num_pages; ++i) {
-		cur = page_offset + i;
-		cur_page = ttm->pages + cur;
-		if (*cur_page) {
-			if (PageHighMem(*cur_page)) {
-				if (noncached
-				    && page_address(*cur_page) != NULL) {
-					DRM_ERROR
-					    ("Illegal mapped HighMem Page\n");
-					return -EINVAL;
-				}
-			} else if ((ttm->page_flags[cur] &
-				    DRM_TTM_PAGE_UNCACHED) != noncached) {
-				DRM_MASK_VAL(ttm->page_flags[cur],
-					     DRM_TTM_PAGE_UNCACHED, noncached);
-				if (noncached) {
-					map_page_into_agp(*cur_page);
-				} else {
-					unmap_page_from_agp(*cur_page);
-				}
-				do_tlbflush = 1;
-			}
-		}
-	}
-	if (do_tlbflush)
-		flush_agp_mappings();
-	return 0;
-}
-
-/*
  * Unbind a ttm region from the aperture.
  */
 
-int drm_evict_ttm_region(drm_ttm_backend_list_t * entry)
+int drm_evict_ttm(drm_ttm_t * ttm)
 {
-	drm_ttm_backend_t *be = entry->be;
-	drm_ttm_t *ttm = entry->owner;
+	drm_ttm_backend_t *be = ttm->be;
 
-	if (be) {
-		switch (entry->state) {
-		case ttm_bound:
-			if (ttm && be->needs_cache_adjust(be)) {
-				unmap_vma_pages(ttm, entry->page_offset,
-						entry->num_pages);
-			}
-			be->unbind(entry->be);
-			if (ttm && be->needs_cache_adjust(be)) {
-				drm_set_caching(ttm, entry->page_offset,
-						entry->num_pages, 0);
-			}
-			break;
-		default:
-			break;
+	switch (ttm->state) {
+	case ttm_bound:
+		if (be->needs_cache_adjust(be)) {
+			unmap_vma_pages(ttm);
 		}
+		be->unbind(be);
+		break;
+	default:
+		break;
 	}
-	entry->state = ttm_evicted;
+	ttm->state = ttm_evicted;
 	return 0;
 }
 
-void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry)
-{
-	drm_evict_ttm_region(entry);
-	entry->state = ttm_unbound;
-}
-
-/*
- * Destroy and clean up all resources associated with a ttm region.
- * FIXME: release pages to OS when doing this operation.
- */
-
-void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry)
+void drm_fixup_ttm_caching(drm_ttm_t * ttm)
 {
-	drm_ttm_backend_t *be = entry->be;
-	drm_ttm_t *ttm = entry->owner;
-	uint32_t *cur_page_flags;
-	int i;
-
-	DRM_DEBUG("Destroying a TTM region\n");
-	list_del_init(&entry->head);
 
-	drm_unbind_ttm_region(entry);
-	if (be) {
-		be->clear(be);
-		be->destroy(be);
-	}
-	cur_page_flags = ttm->page_flags + entry->page_offset;
-	for (i = 0; i < entry->num_pages; ++i) {
-		DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED, 0);
-		cur_page_flags++;
+	if (ttm->state == ttm_evicted) {
+		drm_ttm_backend_t *be = ttm->be;
+		if (be->needs_cache_adjust(be)) {
+			drm_set_caching(ttm, 0);
+		}
+		ttm->state = ttm_unbound;
 	}
-
-	drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
 }
+		
 
-/*
- * Create a ttm region from a range of ttm pages.
- */
-
-int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
-			  unsigned long n_pages, int cached,
-			  drm_ttm_backend_list_t ** region)
+void drm_unbind_ttm(drm_ttm_t * ttm)
 {
-	struct page **cur_page;
-	uint32_t *cur_page_flags;
-	drm_ttm_backend_list_t *entry;
-	drm_ttm_backend_t *be;
-	int ret, i;
-	drm_buffer_manager_t *bm = &ttm->dev->bm;
-
-	if ((page_offset + n_pages) > ttm->num_pages || n_pages == 0) {
-		DRM_ERROR("Region Doesn't fit ttm\n");
-		return -EINVAL;
-	}
-
-	cur_page_flags = ttm->page_flags + page_offset;
-	for (i = 0; i < n_pages; ++i, ++cur_page_flags) {
-		if (*cur_page_flags & DRM_TTM_PAGE_USED) {
-			DRM_ERROR("TTM region overlap\n");
-			return -EINVAL;
-		} else {
-			DRM_MASK_VAL(*cur_page_flags, DRM_TTM_PAGE_USED,
-				     DRM_TTM_PAGE_USED);
-		}
-	}
+	if (ttm->state == ttm_bound) 
+		drm_evict_ttm(ttm);
 
-	entry = drm_calloc(1, sizeof(*entry), DRM_MEM_TTM);
-	if (!entry)
-		return -ENOMEM;
-
-	be = ttm->dev->driver->bo_driver->create_ttm_backend_entry(ttm->dev,
-								   cached);
-	if (!be) {
-		drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
-		DRM_ERROR("Couldn't create backend.\n");
-		return -EINVAL;
-	}
-	entry->state = ttm_unbound;
-	entry->page_offset = page_offset;
-	entry->num_pages = n_pages;
-	entry->be = be;
-	entry->owner = ttm;
-
-	INIT_LIST_HEAD(&entry->head);
-	list_add_tail(&entry->head, &ttm->be_list->head);
-
-	for (i = 0; i < entry->num_pages; ++i) {
-		cur_page = ttm->pages + (page_offset + i);
-		if (!*cur_page) {
-			if (bm->cur_pages >= bm->max_pages) {
-				DRM_ERROR("Maximum locked page count exceeded\n");
-				drm_destroy_ttm_region(entry);
-				return -ENOMEM;
-			}
-			*cur_page = drm_alloc_gatt_pages(0);
-			if (!*cur_page) {
-				DRM_ERROR("Page allocation failed\n");
-				drm_destroy_ttm_region(entry);
-				return -ENOMEM;
-			}
-			SetPageLocked(*cur_page);
-			++bm->cur_pages;
-		}
-	}
-
-	if ((ret = be->populate(be, n_pages, ttm->pages + page_offset))) {
-		drm_destroy_ttm_region(entry);
-		DRM_ERROR("Couldn't populate backend.\n");
-		return ret;
-	}
-	ttm->aperture_base = be->aperture_base;
-
-	*region = entry;
-	return 0;
+	drm_fixup_ttm_caching(ttm);
 }
 
-/*
- * Bind a ttm region. Set correct caching policy.
- */
-
-int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
-			unsigned long aper_offset)
+int drm_bind_ttm(drm_ttm_t * ttm,
+		 unsigned long aper_offset)
 {
 
-	int i;
-	uint32_t *cur_page_flag;
 	int ret = 0;
 	drm_ttm_backend_t *be;
-	drm_ttm_t *ttm;
 
-	if (!region || region->state == ttm_bound)
+	if (!ttm)
 		return -EINVAL;
+	if (ttm->state == ttm_bound)
+		return 0;
 
-	be = region->be;
-	ttm = region->owner;
-
-	if (ttm && be->needs_cache_adjust(be)) {
-		if (ret)
-			return ret;
-
-		unmap_vma_pages(ttm, region->page_offset,
-				region->num_pages);
-		drm_set_caching(ttm, region->page_offset, region->num_pages,
-				DRM_TTM_PAGE_UNCACHED);
-	} else {
-		DRM_DEBUG("Binding cached\n");
-	}
-
+	be = ttm->be;
+	
+	drm_ttm_populate(ttm);
+	if (ttm->state == ttm_unbound && be->needs_cache_adjust(be)) {
+		unmap_vma_pages(ttm);
+		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
+	} 
 	if ((ret = be->bind(be, aper_offset))) {
-		drm_unbind_ttm_region(region);
+		drm_unbind_ttm(ttm);
 		DRM_ERROR("Couldn't bind backend.\n");
 		return ret;
 	}
 
-	cur_page_flag = ttm->page_flags + region->page_offset;
-	for (i = 0; i < region->num_pages; ++i) {
-		DRM_MASK_VAL(*cur_page_flag, DRM_TTM_MASK_PFN,
-			     (i + aper_offset) << PAGE_SHIFT);
-		cur_page_flag++;
-	}
-
-	region->state = ttm_bound;
-	return 0;
-}
-
-int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
-			  unsigned long aper_offset)
-{
-	return drm_bind_ttm_region(entry, aper_offset);
-
-}
-
-/*
- * Destroy an anonymous ttm region.
- */
-
-void drm_user_destroy_region(drm_ttm_backend_list_t * entry)
-{
-	drm_ttm_backend_t *be;
-	struct page **cur_page;
-	int i;
-
-	if (!entry || entry->owner)
-		return;
-
-	be = entry->be;
-	if (!be) {
-		drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
-		return;
-	}
-
-	be->unbind(be);
-
-	if (entry->anon_pages) {
-		cur_page = entry->anon_pages;
-		for (i = 0; i < entry->anon_locked; ++i) {
-			if (!PageReserved(*cur_page))
-				SetPageDirty(*cur_page);
-			page_cache_release(*cur_page);
-			cur_page++;
-		}
-		ttm_free(entry->anon_pages, 
-			 sizeof(*entry->anon_pages)*entry->anon_locked,
-			 DRM_MEM_TTM);
-	}
-
-	be->destroy(be);
-	drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
-	return;
-}
-
-/*
- * Create a ttm region from an arbitrary region of user pages.
- * Since this region has no backing ttm, it's owner is set to
- * null, and it is registered with the file of the caller.
- * Gets destroyed when the file is closed. We call this an
- * anonymous ttm region.
- */
-
-int drm_user_create_region(drm_device_t * dev, unsigned long start, int len,
-			   drm_ttm_backend_list_t ** entry)
-{
-	drm_ttm_backend_list_t *tmp;
-	drm_ttm_backend_t *be;
-	int ret;
-
-	if (len <= 0)
-		return -EINVAL;
-	if (!dev->driver->bo_driver->create_ttm_backend_entry)
-		return -EFAULT;
-
-	tmp = drm_calloc(1, sizeof(*tmp), DRM_MEM_TTM);
-
-	if (!tmp)
-		return -ENOMEM;
-
-	be = dev->driver->bo_driver->create_ttm_backend_entry(dev, 1);
-	tmp->be = be;
-
-	if (!be) {
-		drm_user_destroy_region(tmp);
-		return -ENOMEM;
-	}
-	if (be->needs_cache_adjust(be)) {
-		drm_user_destroy_region(tmp);
-		return -EFAULT;
-	}
-
-	tmp->anon_pages = ttm_alloc(sizeof(*(tmp->anon_pages)) * len,
-				    DRM_MEM_TTM);
-
-	if (!tmp->anon_pages) {
-		drm_user_destroy_region(tmp);
-		return -ENOMEM;
-	}
-
-	down_read(&current->mm->mmap_sem);
-	ret = get_user_pages(current, current->mm, start, len, 1, 0,
-			     tmp->anon_pages, NULL);
-	up_read(&current->mm->mmap_sem);
-
-	if (ret != len) {
-		drm_user_destroy_region(tmp);
-		DRM_ERROR("Could not lock %d pages. Return code was %d\n",
-			  len, ret);
-		return -EPERM;
-	}
-	tmp->anon_locked = len;
-
-	ret = be->populate(be, len, tmp->anon_pages);
-
-	if (ret) {
-		drm_user_destroy_region(tmp);
-		return ret;
-	}
-
-	tmp->state = ttm_unbound;
-	*entry = tmp;
+	ttm->aper_offset = aper_offset;
+	ttm->state = ttm_bound;
 
 	return 0;
 }
@@ -652,28 +380,17 @@ void drm_ttm_object_deref_unlocked(drm_d
 }
 
 /*
- * dev->struct_mutex locked.
- */
-static void drm_ttm_user_deref_locked(drm_file_t * priv,
-				      drm_user_object_t * base)
-{
-	drm_ttm_object_deref_locked(priv->head->dev,
-				    drm_user_object_entry(base,
-							  drm_ttm_object_t,
-							  base));
-}
-
-/*
  * Create a ttm and add it to the drm book-keeping. 
  * dev->struct_mutex locked.
  */
 
 int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
-			  uint32_t flags, drm_ttm_object_t ** ttm_object)
+			  uint32_t flags, int cached,
+			  drm_ttm_object_t ** ttm_object)
 {
 	drm_ttm_object_t *object;
 	drm_map_list_t *list;
-	drm_map_t *map;
+	drm_local_map_t *map;
 	drm_ttm_t *ttm;
 
 	object = drm_calloc(1, sizeof(*object), DRM_MEM_TTM);
@@ -689,14 +406,14 @@ int drm_ttm_object_create(drm_device_t *
 	}
 	map = list->map;
 
-	ttm = drm_init_ttm(dev, size);
+	ttm = drm_init_ttm(dev, size, cached);
 	if (!ttm) {
 		DRM_ERROR("Could not create ttm\n");
 		drm_ttm_object_remove(dev, object);
 		return -ENOMEM;
 	}
 
-	map->offset = ttm->lhandle;
+	map->offset = (unsigned long) ttm;
 	map->type = _DRM_TTM;
 	map->flags = _DRM_REMOVABLE;
 	map->size = ttm->num_pages * PAGE_SIZE;
@@ -725,87 +442,3 @@ int drm_ttm_object_create(drm_device_t *
 	*ttm_object = object;
 	return 0;
 }
-
-drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv, uint32_t handle,
-					int check_owner)
-{
-	drm_user_object_t *uo;
-	drm_ttm_object_t *to;
-
-	uo = drm_lookup_user_object(priv, handle);
-
-	if (!uo || (uo->type != drm_ttm_type))
-		return NULL;
-
-	if (check_owner && priv != uo->owner) {
-		if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
-			return NULL;
-	}
-
-	to = drm_user_object_entry(uo, drm_ttm_object_t, base);
-	atomic_inc(&to->usage);
-	return to;
-}
-
-int drm_ttm_ioctl(DRM_IOCTL_ARGS)
-{
-	DRM_DEVICE;
-	drm_ttm_arg_t arg;
-	drm_ttm_object_t *entry;
-	drm_user_object_t *uo;
-	unsigned long size;
-	int ret;
-
-	DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
-
-	switch (arg.op) {
-	case drm_ttm_create:
-		mutex_lock(&dev->struct_mutex);
-		size = arg.size;
-		ret = drm_ttm_object_create(dev, size, arg.flags, &entry);
-		if (ret) {
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
-		ret = drm_add_user_object(priv, &entry->base,
-					  arg.flags & DRM_TTM_FLAG_SHAREABLE);
-		if (ret) {
-			drm_ttm_object_remove(dev, entry);
-			mutex_unlock(&dev->struct_mutex);
-			return ret;
-		}
-		entry->base.remove = drm_ttm_user_deref_locked;
-		entry->base.type = drm_ttm_type;
-		entry->base.ref_struct_locked = NULL;
-		entry->base.unref = NULL;
-		atomic_inc(&entry->usage);
-		break;
-	case drm_ttm_reference:
-		ret = drm_user_object_ref(priv, arg.handle, drm_ttm_type, &uo);
-		if (ret)
-			return ret;
-		mutex_lock(&dev->struct_mutex);
-		entry = drm_lookup_ttm_object(priv, arg.handle, 0);
-		break;
-	case drm_ttm_unreference:
-		return drm_user_object_unref(priv, arg.handle, drm_ttm_type);
-	case drm_ttm_destroy:
-		mutex_lock(&dev->struct_mutex);
-		uo = drm_lookup_user_object(priv, arg.handle);
-		if (!uo || (uo->type != drm_ttm_type) || uo->owner != priv) {
-			mutex_unlock(&dev->struct_mutex);
-			return -EINVAL;
-		}
-		ret = drm_remove_user_object(priv, uo);
-		mutex_unlock(&dev->struct_mutex);
-		return ret;
-	}
-	arg.handle = entry->base.hash.key;
-	arg.user_token = entry->map_list.user_token;
-	arg.size = entry->map_list.map->size;
-	drm_ttm_object_deref_locked(dev, entry);
-	mutex_unlock(&dev->struct_mutex);
-
-	DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
-	return 0;
-}
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
index fcac06b..19c1df5 100644
--- a/linux-core/drm_ttm.h
+++ b/linux-core/drm_ttm.h
@@ -48,6 +48,7 @@ typedef struct drm_ttm_backend {
 	unsigned long aperture_base;
 	void *private;
 	int needs_free;
+        uint32_t drm_map_type;
 	int (*needs_cache_adjust) (struct drm_ttm_backend * backend);
 	int (*populate) (struct drm_ttm_backend * backend,
 			 unsigned long num_pages, struct page ** pages);
@@ -57,61 +58,32 @@ typedef struct drm_ttm_backend {
 	void (*destroy) (struct drm_ttm_backend * backend);
 } drm_ttm_backend_t;
 
-#define DRM_FLUSH_READ  (0x01)
-#define DRM_FLUSH_WRITE (0x02)
-#define DRM_FLUSH_EXE   (0x04)
-
-typedef struct drm_ttm_backend_list {
-	uint32_t flags;
-	struct list_head head;
-	drm_ttm_backend_t *be;
-	unsigned page_offset;
-	unsigned num_pages;
-	struct drm_ttm *owner;
-	drm_file_t *anon_owner;
-	struct page **anon_pages;
-	int anon_locked;
-	enum {
-		ttm_bound,
-		ttm_evicted,
-		ttm_unbound
-	} state;
-} drm_ttm_backend_list_t;
-
-typedef struct drm_ttm_vma_list {
-	struct list_head head;
-	pgprot_t orig_protection;
-	struct vm_area_struct *vma;
-	drm_map_t *map;
-} drm_ttm_vma_list_t;
-
 typedef struct drm_ttm {
-	struct list_head p_mm_list;
-	atomic_t shared_count;
-	uint32_t mm_list_seq;
-	unsigned long aperture_base;
 	struct page **pages;
-	uint32_t *page_flags;
-	unsigned long lhandle;
+	uint32_t page_flags;
 	unsigned long num_pages;
-	drm_ttm_vma_list_t *vma_list;
+	unsigned long aper_offset;
+        atomic_t vma_count;
 	struct drm_device *dev;
-	drm_ttm_backend_list_t *be_list;
-	atomic_t vma_count;
-	int mmap_sem_locked;
 	int destroy;
         uint32_t mapping_offset;
+        drm_ttm_backend_t *be;
+	enum {
+		ttm_bound,
+		ttm_evicted,
+		ttm_unbound,
+		ttm_unpopulated,
+	} state;
 } drm_ttm_t;
 
 typedef struct drm_ttm_object {
-	drm_user_object_t base;
 	atomic_t usage;
 	uint32_t flags;
 	drm_map_list_t map_list;
 } drm_ttm_object_t;
 
 extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
-				 uint32_t flags,
+				 uint32_t flags, int cached,
 				 drm_ttm_object_t ** ttm_object);
 extern void drm_ttm_object_deref_locked(struct drm_device *dev,
 					drm_ttm_object_t * to);
@@ -120,41 +92,18 @@ extern void drm_ttm_object_deref_unlocke
 extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
 					       uint32_t handle,
 					       int check_owner);
-
-/*
- * Bind a part of the ttm starting at page_offset size n_pages into the GTT, at
- * aperture offset aper_offset. The region handle will be used to reference this
- * bound region in the future. Note that the region may be the whole ttm. 
- * Regions should not overlap.
- * This function sets all affected pages as noncacheable and flushes cashes and TLB.
- */
-
-int drm_create_ttm_region(drm_ttm_t * ttm, unsigned long page_offset,
-			  unsigned long n_pages, int cached,
-			  drm_ttm_backend_list_t ** region);
-
-int drm_bind_ttm_region(drm_ttm_backend_list_t * region,
+extern int drm_bind_ttm(drm_ttm_t * ttm,
 			unsigned long aper_offset);
 
-/*
- * Unbind a ttm region. Restores caching policy. Flushes caches and TLB.
- */
-
-void drm_unbind_ttm_region(drm_ttm_backend_list_t * entry);
-void drm_destroy_ttm_region(drm_ttm_backend_list_t * entry);
+extern void drm_unbind_ttm(drm_ttm_t * ttm);
 
 /*
  * Evict a ttm region. Keeps Aperture caching policy.
  */
 
-int drm_evict_ttm_region(drm_ttm_backend_list_t * entry);
-
-/*
- * Rebind an already evicted region into a possibly new location in the aperture.
- */
+extern int drm_evict_ttm(drm_ttm_t * ttm);
+extern void drm_fixup_ttm_caching(drm_ttm_t *ttm);
 
-int drm_rebind_ttm_region(drm_ttm_backend_list_t * entry,
-			  unsigned long aper_offset);
 
 /*
  * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, 
@@ -163,7 +112,6 @@ int drm_rebind_ttm_region(drm_ttm_backen
  */
 
 extern int drm_destroy_ttm(drm_ttm_t * ttm);
-extern void drm_user_destroy_region(drm_ttm_backend_list_t * entry);
 extern int drm_ttm_ioctl(DRM_IOCTL_ARGS);
 
 static __inline__ drm_ttm_t *drm_ttm_from_object(drm_ttm_object_t * to)
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 5fbbaad..4595115 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -159,120 +159,48 @@ static __inline__ struct page *drm_do_vm
 }
 #endif				/* __OS_HAS_AGP */
 
-
-static int drm_ttm_remap_bound_pfn(struct vm_area_struct *vma,
-				   unsigned long address,
-				   unsigned long size)
-{
-	unsigned long
-		page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
-	unsigned long
-		num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
-		vma->vm_private_data; 
-	drm_map_t *map = entry->map;
-	drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
-	unsigned long i, cur_pfn;
-	unsigned long start = 0;
-	unsigned long end = 0;
-	unsigned long last_pfn = 0; 
-	unsigned long start_pfn = 0;
-	int bound_sequence = FALSE;
-	int ret = 0;
-	uint32_t cur_flags;
-
-	for (i=page_offset; i<page_offset + num_pages; ++i) {
-		cur_flags = ttm->page_flags[i];
-
-		if (!bound_sequence && (cur_flags & DRM_TTM_PAGE_UNCACHED)) {
-
-			start = i;
-			end = i;
-			last_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
-			start_pfn = last_pfn;
-			bound_sequence = TRUE;
-
-		} else if (bound_sequence) {
-
-			cur_pfn = (cur_flags & DRM_TTM_MASK_PFN) >> PAGE_SHIFT;
-
-			if ( !(cur_flags & DRM_TTM_PAGE_UNCACHED) || 
-			     (cur_pfn != last_pfn + 1)) {
-
-				ret = io_remap_pfn_range(vma, 
-							 vma->vm_start + (start << PAGE_SHIFT),
-							 (ttm->aperture_base >> PAGE_SHIFT) 
-							 + start_pfn,
-							 (end - start + 1) << PAGE_SHIFT,
-							 drm_io_prot(_DRM_AGP, vma));
-				
-				if (ret) 
-					break;
-
-				bound_sequence = (cur_flags & DRM_TTM_PAGE_UNCACHED);
-				if (!bound_sequence) 
-					continue;
-
-				start = i;
-				end = i;
-				last_pfn = cur_pfn;
-				start_pfn = last_pfn;
-
-			} else {
-				
-				end++;
-				last_pfn = cur_pfn;
-
-			}
-		}
-	}
-
-	if (!ret && bound_sequence) {
-		ret = io_remap_pfn_range(vma, 
-					 vma->vm_start + (start << PAGE_SHIFT),
-					 (ttm->aperture_base >> PAGE_SHIFT) 
-					 + start_pfn,
-					 (end - start + 1) << PAGE_SHIFT,
-					 drm_io_prot(_DRM_AGP, vma));
-	}
-
-	if (ret) {
-		DRM_ERROR("Map returned %c\n", ret);
-	}
-	return ret;
-}
-	
-static __inline__ struct page *drm_do_vm_ttm_nopage(struct vm_area_struct *vma,
-						    unsigned long address)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+static
+#endif
+struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, 
+			      struct fault_data *data)
 {
-	drm_ttm_vma_list_t *entry = (drm_ttm_vma_list_t *)
-		vma->vm_private_data; 
-	drm_map_t *map;
+	unsigned long address = data->address;
+	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
 	unsigned long page_offset;
 	struct page *page;
 	drm_ttm_t *ttm; 
-	pgprot_t default_prot;
-	uint32_t page_flags;
 	drm_buffer_manager_t *bm;
 	drm_device_t *dev;
+	unsigned long pfn;
+	int err;
+	pgprot_t pgprot;
+
+	if (!map) {
+		data->type = VM_FAULT_OOM;
+		return NULL;
+	}
 
-	if (address > vma->vm_end)
-		return NOPAGE_SIGBUS;	/* Disallow mremap */
-	if (!entry)
-		return NOPAGE_OOM;	/* Nothing allocated */
+	if (address > vma->vm_end) {
+		data->type = VM_FAULT_SIGBUS;
+		return NULL;
+	}
 
-	map = (drm_map_t *) entry->map;
 	ttm = (drm_ttm_t *) map->offset;
 	
 	dev = ttm->dev;
+
+	/*
+	 * Perhaps retry here?
+	 */
+
 	mutex_lock(&dev->struct_mutex);
+	drm_fixup_ttm_caching(ttm);
 
 	bm = &dev->bm;
 	page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
 	page = ttm->pages[page_offset];
 
-	page_flags = ttm->page_flags[page_offset];
-
 	if (!page) {
 		if (bm->cur_pages >= bm->max_pages) {
 	 		DRM_ERROR("Maximum locked page count exceeded\n"); 
@@ -281,40 +209,65 @@ static __inline__ struct page *drm_do_vm
 		}
 		++bm->cur_pages;
 		page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
-		if (page) {
-			SetPageLocked(page);
-		} else {
-			page = NOPAGE_OOM;
+		if (!page) {
+			data->type = VM_FAULT_OOM;
+			goto out;
 		}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
+		SetPageLocked(page);
+#else
+		SetPageReserved(page);
+#endif
 	}
 
-	if (page_flags & DRM_TTM_PAGE_UNCACHED) {
+	if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) {
 
 		/*
-		 * This makes sure we don't race with another 
-		 * drm_ttm_remap_bound_pfn();
+		 * FIXME: Check can't map aperture flag.
 		 */
 
-		if (!drm_pte_is_clear(vma, address)) {
-			page = NOPAGE_RETRY;
-			goto out1;
-		}
-		       		
-		drm_ttm_remap_bound_pfn(vma, address, PAGE_SIZE);
-		page = NOPAGE_RETRY;
-		goto out1;
+		pfn = ttm->aper_offset + page_offset + 
+			(ttm->be->aperture_base >> PAGE_SHIFT);
+		pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
+	} else {
+		pfn = page_to_pfn(page);
+		pgprot = vma->vm_page_prot;
 	}
-	get_page(page);
 	
- out1:
-	default_prot = vm_get_page_prot(vma->vm_flags);	    
-	vma->vm_page_prot = default_prot;
+	err = vm_insert_pfn(vma, address, pfn, pgprot);
 
+	if (!err && (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) && 
+	    ttm->num_pages > 1) {
+
+		/*
+		 * FIXME: Check can't map aperture flag.
+		 */
+
+		/*
+		 * Since we're not racing with anybody else, 
+		 * we might as well populate the whole object space.
+		 * Note that we're touching vma->vm_flags with this
+		 * operation, but we are not changing them, so we should be 
+		 * OK.
+		 */
+
+		BUG_ON(ttm->state == ttm_unpopulated);
+		err = io_remap_pfn_range(vma, address + PAGE_SIZE, pfn+1,
+					 (ttm->num_pages - 1) * PAGE_SIZE,
+					 pgprot);
+	}
+		
+
+	if (!err || err == -EBUSY) 
+		data->type = VM_FAULT_MINOR; 
+	else
+		data->type = VM_FAULT_OOM;
  out:
 	mutex_unlock(&dev->struct_mutex);
-	return page;
+	return NULL;
 }
 
+
 /**
  * \c nopage method for shared virtual memory.
  *
@@ -547,14 +500,6 @@ static struct page *drm_vm_sg_nopage(str
 	return drm_do_vm_sg_nopage(vma, address);
 }
 
-static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
-				     unsigned long address, int *type)
-{
-	if (type)
-		*type = VM_FAULT_MINOR;
-	return drm_do_vm_ttm_nopage(vma, address);
-}
-
 
 #else				/* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
 
@@ -582,13 +527,6 @@ static struct page *drm_vm_sg_nopage(str
 	return drm_do_vm_sg_nopage(vma, address);
 }
 
-static struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
-				     unsigned long address, int unused)
-{
-	return drm_do_vm_ttm_nopage(vma, address);
-}
-
-
 #endif
 
 /** AGP virtual memory operations */
@@ -619,11 +557,19 @@ static struct vm_operations_struct drm_v
 	.close = drm_vm_close,
 };
 
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
 static struct vm_operations_struct drm_vm_ttm_ops = {
 	.nopage = drm_vm_ttm_nopage,
 	.open = drm_vm_ttm_open_wrapper,
 	.close = drm_vm_ttm_close,
 };
+#else
+static struct vm_operations_struct drm_vm_ttm_ops = {
+	.fault = drm_vm_ttm_fault,
+	.open = drm_vm_ttm_open_wrapper,
+	.close = drm_vm_ttm_close,
+};
+#endif
 
 /**
  * \c open method for shared virtual memory.
@@ -656,36 +602,17 @@ static void drm_vm_open(struct vm_area_s
 
 static int drm_vm_ttm_open(struct vm_area_struct *vma) {
   
-	drm_ttm_vma_list_t *entry, *tmp_vma = 
-		(drm_ttm_vma_list_t *) vma->vm_private_data;
-	drm_map_t *map;
+	drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
 	drm_ttm_t *ttm;
 	drm_file_t *priv = vma->vm_file->private_data;
 	drm_device_t *dev = priv->head->dev;
-	int ret = 0;
 
 	drm_vm_open(vma);
 	mutex_lock(&dev->struct_mutex);
-	entry = drm_calloc(1, sizeof(*entry), DRM_MEM_VMAS);
-	if (entry) {
-	        *entry = *tmp_vma;
-		map = (drm_map_t *) entry->map;
-		ttm = (drm_ttm_t *) map->offset;
-		if (!ret) {
-			atomic_inc(&ttm->vma_count);
-			INIT_LIST_HEAD(&entry->head);
-			entry->vma = vma;
-			entry->orig_protection = vma->vm_page_prot;
-			list_add_tail(&entry->head, &ttm->vma_list->head);
-			vma->vm_private_data = (void *) entry;
-			DRM_DEBUG("Added VMA to ttm at 0x%016lx\n", 
-				  (unsigned long) ttm);
-		}
-	} else {
-		ret = -ENOMEM;
-	}
+	ttm = (drm_ttm_t *) map->offset;
+	atomic_inc(&ttm->vma_count);
 	mutex_unlock(&dev->struct_mutex);
-	return ret;
+	return 0;
 }
 
 static void drm_vm_ttm_open_wrapper(struct vm_area_struct *vma) 
@@ -729,21 +656,16 @@ static void drm_vm_close(struct vm_area_
 
 static void drm_vm_ttm_close(struct vm_area_struct *vma)
 {
-	drm_ttm_vma_list_t *ttm_vma = 
-		(drm_ttm_vma_list_t *) vma->vm_private_data;
-	drm_map_t *map; 
+	drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data; 
 	drm_ttm_t *ttm; 
         drm_device_t *dev;
 	int ret;
 
 	drm_vm_close(vma); 
-	if (ttm_vma) {
-		map = (drm_map_t *) ttm_vma->map;
+	if (map) {
 		ttm = (drm_ttm_t *) map->offset;
 		dev = ttm->dev;
 		mutex_lock(&dev->struct_mutex);
-		list_del(&ttm_vma->head);
-		drm_free(ttm_vma, sizeof(*ttm_vma), DRM_MEM_VMAS);
 		if (atomic_dec_and_test(&ttm->vma_count)) {
 			if (ttm->destroy) {
 				ret = drm_destroy_ttm(ttm);
@@ -951,17 +873,10 @@ int drm_mmap(struct file *filp, struct v
 #endif
 		break;
 	case _DRM_TTM: {
-		drm_ttm_vma_list_t tmp_vma;
-		tmp_vma.orig_protection = vma->vm_page_prot;
-		tmp_vma.map = map;
 		vma->vm_ops = &drm_vm_ttm_ops;
-		vma->vm_private_data = (void *) &tmp_vma;
+		vma->vm_private_data = (void *) map;
 		vma->vm_file = filp;
 		vma->vm_flags |= VM_RESERVED | VM_IO;
-		if (drm_ttm_remap_bound_pfn(vma,
-					    vma->vm_start,
-					    vma->vm_end - vma->vm_start))
-			return -EAGAIN;
 		if (drm_vm_ttm_open(vma))
 		        return -EAGAIN;
 		return 0;
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 17bf993..32cad3b 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -693,22 +693,6 @@ typedef struct drm_fence_arg {
 	} op;
 } drm_fence_arg_t;
 
-#define DRM_TTM_FLAG_SHAREABLE           0x00000001
-
-typedef struct drm_ttm_arg {
-	enum {
-		drm_ttm_create,
-		drm_ttm_destroy,
-		drm_ttm_reference,
-		drm_ttm_unreference
-	} op;
-	unsigned handle;
-        drm_u64_t user_token;
-	drm_u64_t size;
-        unsigned flags;
-}drm_ttm_arg_t;
-
-
 /* Buffer permissions, referring to how the GPU uses the buffers.
    these translate to fence types used for the buffers. 
    Typically a texture buffer is read, A destination buffer is write and
@@ -771,7 +755,6 @@ typedef struct drm_ttm_arg {
 #define DRM_BO_MASK_DRIVER      0x00F00000
 
 typedef enum {
-	drm_bo_type_ttm,
 	drm_bo_type_dc,
 	drm_bo_type_user,
 	drm_bo_type_fake
@@ -920,7 +903,6 @@ typedef union drm_mm_init_arg{
 
 #ifdef __linux__
 #define DRM_IOCTL_FENCE                 DRM_IOWR(0x3b, drm_fence_arg_t)
-#define DRM_IOCTL_TTM                   DRM_IOWR(0x3c, drm_ttm_arg_t)
 #define DRM_IOCTL_BUFOBJ                DRM_IOWR(0x3d, drm_bo_arg_t)
 #define DRM_IOCTL_MM_INIT               DRM_IOWR(0x3e, drm_mm_init_arg_t)
 #endif

-------------------------------------------------------------------------
Using Tomcat but need to do more? Need to support web services, security?
Get stuff done quickly with pre-integrated technology to make your job easier
Download IBM WebSphere Application Server v.1.0.1 based on Apache Geronimo
http://sel.as-us.falkag.net/sel?cmd=lnk&kid=120709&bid=263057&dat=121642
--
_______________________________________________
Dri-patches mailing list
Dri-patches@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-patches
[prev in list] [next in list] [prev in thread] [next in thread] 

Configure | About | News | Add a list | Sponsored by KoreLogic