[prev in list] [next in list] [prev in thread] [next in thread]
List: dri-patches
Subject: drm: Branch 'drm-ttm-0-2-branch' - 9 commits
From: thomash () kemper ! freedesktop ! org (Thomas Hellstrom)
Date: 2006-10-17 18:20:27
Message-ID: 20061017182027.9472A10078 () kemper ! freedesktop ! org
[Download RAW message or body]
libdrm/xf86drm.c | 54 ++-
libdrm/xf86mm.h | 9
linux-core/drmP.h | 105 ++++--
linux-core/drm_agpsupport.c | 23 -
linux-core/drm_bo.c | 751 ++++++++++++++++++++++++++++----------------
linux-core/drm_compat.c | 12
linux-core/drm_drawable.c | 1
linux-core/drm_drv.c | 25 -
linux-core/drm_fence.c | 45 +-
linux-core/drm_hashtab.c | 40 +-
linux-core/drm_memory.c | 69 ++++
linux-core/drm_mm.c | 27 -
linux-core/drm_object.c | 4
linux-core/drm_proc.c | 19 -
linux-core/drm_sman.c | 3
linux-core/drm_stub.c | 4
linux-core/drm_ttm.c | 91 ++---
linux-core/drm_ttm.h | 22 -
linux-core/drm_vm.c | 12
linux-core/i915_buffer.c | 17
linux-core/i915_drv.c | 5
linux-core/i915_fence.c | 15
shared-core/drm.h | 33 +
shared-core/i915_dma.c | 4
24 files changed, 900 insertions(+), 490 deletions(-)
New commits:
diff-tree 25fe4a80490bba709099f0401535d2f96ac7729c (from \
c34faf224b959bf61e4c3eb29c66a12edbd31841)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 20:04:41 2006 +0200
Remove some debugging messages.
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 5c799b6..9047c8d 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -2786,16 +2786,11 @@ int drmBOMap(int fd, drmBO *buf, unsigne
fd, buf->mapHandle);
if (virtual == MAP_FAILED) {
ret = -errno;
- fprintf(stderr, "Map error 0x%016llx\n", buf->mapHandle);
}
if (ret)
return ret;
buf->mapVirtual = virtual;
buf->virtual = ((char *) virtual) + buf->start;
-#ifdef BODEBUG
- fprintf(stderr,"Mapvirtual, virtual: 0x%08x 0x%08x\n",
- buf->mapVirtual, buf->virtual);
-#endif
}
memset(&arg, 0, sizeof(arg));
diff-tree c34faf224b959bf61e4c3eb29c66a12edbd31841 (from \
89b944179856fadf8667587eff142129c2c6b826)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 20:03:26 2006 +0200
Remove max number of locked pages check and call, since
that is now handled by the memory accounting.
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 253ba69..5c799b6 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -3187,21 +3187,6 @@ int drmMMTakedown(int fd, unsigned memTy
return 0;
}
-int drmMMMaxLockedSize(int fd, unsigned long maxLockedSize)
-{
- drm_mm_init_arg_t arg;
-
-
- memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_set_max_pages;
- arg.req.p_size = maxLockedSize / getpagesize();
-
- if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
- return -errno;
-
- return 0;
-}
-
int drmMMLock(int fd, unsigned memType)
{
drm_mm_init_arg_t arg;
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index c3112c9..da868fe 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -198,7 +198,6 @@ extern int drmBOWaitIdle(int fd, drmBO *
extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
unsigned memType);
extern int drmMMTakedown(int fd, unsigned memType);
-extern int drmMMMaxLockedSize(int fd, unsigned long maxLockedSize);
extern int drmMMLock(int fd, unsigned memType);
extern int drmMMUnlock(int fd, unsigned memType);
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index b10e988..fab3608 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -814,7 +814,6 @@ typedef struct drm_buffer_manager{
struct list_head ddestroy;
struct work_struct wq;
uint32_t fence_type;
- unsigned long max_pages;
unsigned long cur_pages;
atomic_t count;
} drm_buffer_manager_t;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index e8e8a27..b8ee6c1 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1859,7 +1859,6 @@ int drm_bo_driver_init(drm_device_t * de
drm_bo_driver_t *driver = dev->driver->bo_driver;
drm_buffer_manager_t *bm = &dev->bm;
int ret = -EINVAL;
- struct sysinfo si;
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
@@ -1880,8 +1879,6 @@ int drm_bo_driver_init(drm_device_t * de
bm->nice_mode = 1;
atomic_set(&bm->count, 0);
bm->cur_pages = 0;
- si_meminfo(&si);
- bm->max_pages = si.totalram >> 1;
INIT_LIST_HEAD(&bm->unfenced);
INIT_LIST_HEAD(&bm->ddestroy);
out_unlock:
@@ -1944,30 +1941,6 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
"Delaying takedown\n", arg.req.mem_type);
}
break;
- case mm_set_max_pages:{
- struct sysinfo si;
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- if (arg.req.p_size < bm->cur_pages) {
- DRM_ERROR
- ("Cannot currently decrease max number of "
- "locked pages below the number currently "
- "locked.\n");
- ret = -EINVAL;
- break;
- }
- si_meminfo(&si);
- if (arg.req.p_size > si.totalram) {
- DRM_ERROR
- ("Cannot set max number of locked pages "
- "to %lu since the total number of RAM pages "
- "is %lu.\n", (unsigned long)arg.req.p_size,
- (unsigned long)si.totalram);
- ret = -EINVAL;
- break;
- }
- bm->max_pages = arg.req.p_size;
- }
case mm_lock:
LOCK_TEST_WITH_RETURN(dev, filp);
mutex_lock(&dev->bm.init_mutex);
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 4a035f4..90e5341 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -234,11 +234,6 @@ struct page *drm_vm_ttm_nopage(struct vm
page = ttm->pages[page_offset];
if (!page) {
- if (bm->cur_pages >= bm->max_pages) {
- DRM_ERROR("Maximum locked page count exceeded\n");
- page = NOPAGE_OOM;
- goto out;
- }
if (drm_alloc_memctl(PAGE_SIZE)) {
page = NOPAGE_OOM;
goto out;
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 915befb..4a6a370 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -829,7 +829,6 @@ typedef union drm_mm_init_arg{
struct {
enum {
mm_init,
- mm_set_max_pages,
mm_takedown,
mm_query,
mm_lock,
diff-tree 89b944179856fadf8667587eff142129c2c6b826 (from \
f22f89e6b3c970a29197d3a53c170fb7d0340cbe)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 19:57:06 2006 +0200
Lindent.
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index fb90098..e8e8a27 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -67,7 +67,7 @@ static inline uint32_t drm_bo_type_flags
static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
unsigned type)
{
- switch(type) {
+ switch (type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return list_entry(list, drm_buffer_object_t, lru_ttm);
@@ -80,10 +80,10 @@ static inline drm_buffer_object_t *drm_b
return NULL;
}
-static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t *bo,
+static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t * bo,
unsigned type)
{
- switch(type) {
+ switch (type) {
case DRM_BO_MEM_LOCAL:
case DRM_BO_MEM_TT:
return bo->node_ttm;
@@ -95,29 +95,38 @@ static inline drm_mm_node_t *drm_bo_mm_n
}
return NULL;
}
-
+
/*
* bo locked. dev->struct_mutex locked.
*/
-static void drm_bo_add_to_lru(drm_buffer_object_t *buf,
- drm_buffer_manager_t *bm)
+static void drm_bo_add_to_lru(drm_buffer_object_t * buf,
+ drm_buffer_manager_t * bm)
{
struct list_head *list;
unsigned mem_type;
if (buf->flags & DRM_BO_FLAG_MEM_TT) {
mem_type = DRM_BO_MEM_TT;
- list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? \
&bm->pinned[mem_type] : &bm->lru[mem_type]; + list =
+ (buf->
+ flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+ &bm->pinned[mem_type] : &bm->lru[mem_type];
list_add_tail(&buf->lru_ttm, list);
} else {
mem_type = DRM_BO_MEM_LOCAL;
- list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? \
&bm->pinned[mem_type] : &bm->lru[mem_type]; + list =
+ (buf->
+ flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+ &bm->pinned[mem_type] : &bm->lru[mem_type];
list_add_tail(&buf->lru_ttm, list);
}
if (buf->flags & DRM_BO_FLAG_MEM_VRAM) {
mem_type = DRM_BO_MEM_VRAM;
- list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? \
&bm->pinned[mem_type] : &bm->lru[mem_type]; + list =
+ (buf->
+ flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ?
+ &bm->pinned[mem_type] : &bm->lru[mem_type];
list_add_tail(&buf->lru_card, list);
}
}
@@ -145,9 +154,8 @@ static int drm_move_tt_to_local(drm_buff
schedule();
return ret;
}
-
- if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) ||
- force_no_move) {
+
+ if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
drm_mm_put_block(buf->node_ttm);
buf->node_ttm = NULL;
}
@@ -169,14 +177,13 @@ static void drm_bo_destroy_locked(drm_de
drm_buffer_manager_t *bm = &dev->bm;
-
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
/*
* Somone might try to access us through the still active BM lists.
*/
- if (atomic_read(&bo->usage) != 0)
+ if (atomic_read(&bo->usage) != 0)
return;
if (!list_empty(&bo->ddestroy))
return;
@@ -209,7 +216,7 @@ static void drm_bo_destroy_locked(drm_de
/*
* This temporarily unlocks struct_mutex.
*/
-
+
do {
ret = drm_unbind_ttm(bo->ttm);
if (ret == -EAGAIN) {
@@ -224,7 +231,7 @@ static void drm_bo_destroy_locked(drm_de
"Bad. Continuing anyway\n");
}
}
-
+
if (bo->node_ttm) {
drm_mm_put_block(bo->node_ttm);
bo->node_ttm = NULL;
@@ -249,8 +256,8 @@ static void drm_bo_delayed_delete(drm_de
drm_fence_object_t *fence;
mutex_lock(&dev->struct_mutex);
- if (!bm->initialized)
- goto out;
+ if (!bm->initialized)
+ goto out;
list = bm->ddestroy.next;
list_for_each_safe(list, next, &bm->ddestroy) {
@@ -264,7 +271,7 @@ static void drm_bo_delayed_delete(drm_de
if (atomic_read(&entry->usage) != 0)
continue;
-
+
/*
* Since we're the only users, No need to take the
* bo->mutex to watch the fence.
@@ -284,10 +291,10 @@ static void drm_bo_delayed_delete(drm_de
* drm_bo_destroy_locked temporarily releases the
* struct_mutex;
*/
-
- nentry = NULL;
+
+ nentry = NULL;
if (next != &bm->ddestroy) {
- nentry = list_entry(next, drm_buffer_object_t,
+ nentry = list_entry(next, drm_buffer_object_t,
ddestroy);
atomic_inc(&nentry->usage);
}
@@ -296,13 +303,12 @@ static void drm_bo_delayed_delete(drm_de
drm_bo_destroy_locked(dev, entry);
if (next != &bm->ddestroy)
atomic_dec(&nentry->usage);
- }
+ }
}
- out:
+ out:
mutex_unlock(&dev->struct_mutex);
}
-
static void drm_bo_delayed_workqueue(void *data)
{
drm_device_t *dev = (drm_device_t *) data;
@@ -403,8 +409,8 @@ int drm_fence_buffer_objects(drm_file_t
}
} else {
mutex_unlock(&dev->struct_mutex);
- ret = drm_fence_object_create(dev, fence_type,
- fence_flags | DRM_FENCE_FLAG_EMIT,
+ ret = drm_fence_object_create(dev, fence_type,
+ fence_flags | DRM_FENCE_FLAG_EMIT,
&fence);
mutex_lock(&dev->struct_mutex);
if (ret)
@@ -470,9 +476,9 @@ static int drm_bo_wait(drm_buffer_object
ret =
drm_fence_object_wait(dev, fence, lazy, ignore_signals,
bo->fence_type);
- if (ret)
- return ret;
-
+ if (ret)
+ return ret;
+
drm_fence_usage_deref_unlocked(dev, fence);
bo->fence = NULL;
@@ -484,7 +490,7 @@ static int drm_bo_wait(drm_buffer_object
* bo->mutex locked
*/
-static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
+static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
int no_wait, int force_no_move)
{
int ret = 0;
@@ -495,7 +501,7 @@ static int drm_bo_evict(drm_buffer_objec
* Someone might have modified the buffer before we took the buffer mutex.
*/
- if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
goto out;
if (!(bo->flags & drm_bo_type_flags(mem_type)))
goto out;
@@ -531,7 +537,7 @@ static int drm_bo_evict(drm_buffer_objec
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
_DRM_BO_FLAG_EVICTED);
- out:
+ out:
return ret;
}
@@ -539,7 +545,7 @@ static int drm_bo_evict(drm_buffer_objec
* buf->mutex locked.
*/
-int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
+int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
int no_wait)
{
drm_device_t *dev = buf->dev;
@@ -601,7 +607,7 @@ static int drm_move_local_to_tt(drm_buff
drm_ttm_backend_t *be;
int ret;
- if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
+ if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
BUG_ON(bo->node_ttm);
ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
if (ret)
@@ -653,18 +659,19 @@ static int drm_bo_new_flags(drm_device_t
* First adjust the mask to take away nonexistant memory types.
*/
- for (i=0; i<DRM_BO_MEM_TYPES; ++i) {
+ for (i = 0; i < DRM_BO_MEM_TYPES; ++i) {
if (!bm->use_type[i])
new_mask &= ~drm_bo_type_flags(i);
}
- if ((new_mask & DRM_BO_FLAG_NO_EVICT ) && !DRM_SUSER(DRM_CURPROC)) {
- DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
- "processes\n");
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+ DRM_ERROR
+ ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+ "processes\n");
return -EPERM;
}
if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
- if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
+ if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
!driver->cached[DRM_BO_MEM_TT]) &&
((new_mask & DRM_BO_FLAG_MEM_VRAM)
&& !driver->cached[DRM_BO_MEM_VRAM])) {
@@ -831,12 +838,12 @@ static int drm_bo_read_cached(drm_buffer
int ret = 0;
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- if (bo->node_card)
+ if (bo->node_card)
ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0);
if (ret)
return ret;
if (bo->node_ttm)
- ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
return ret;
}
@@ -1155,18 +1162,18 @@ static int drm_buffer_object_validate(dr
DRM_ERROR("Driver did not support given buffer permissions\n");
return ret;
}
-
+
/*
* Move out if we need to change caching policy.
*/
- if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
+ if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
!(bo->flags & DRM_BO_FLAG_MEM_LOCAL)) {
if (bo->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
DRM_ERROR("Cannot change caching policy of "
"pinned buffer.\n");
return -EINVAL;
- }
+ }
ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
if (ret) {
if (ret != -EAGAIN)
@@ -1182,7 +1189,7 @@ static int drm_buffer_object_validate(dr
* release reserved manager regions.
*/
- if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
+ if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
!(new_flags & DRM_BO_FLAG_NO_MOVE)) {
mutex_lock(&dev->struct_mutex);
if (bo->node_ttm) {
@@ -1434,10 +1441,10 @@ int drm_buffer_object_create(drm_file_t
mutex_unlock(&bo->mutex);
*buf_obj = bo;
return 0;
-
- out_err:
+
+ out_err:
mutex_unlock(&bo->mutex);
- drm_bo_usage_deref_unlocked(dev, bo);
+ drm_bo_usage_deref_unlocked(dev, bo);
return ret;
}
@@ -1607,11 +1614,10 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
* dev->struct_sem locked.
*/
-static int drm_bo_force_list_clean(drm_device_t *dev,
- struct list_head *head,
+static int drm_bo_force_list_clean(drm_device_t * dev,
+ struct list_head *head,
unsigned mem_type,
- int force_no_move,
- int allow_errors)
+ int force_no_move, int allow_errors)
{
drm_buffer_manager_t *bm = &dev->bm;
struct list_head *list, *next, *prev;
@@ -1619,11 +1625,11 @@ static int drm_bo_force_list_clean(drm_d
int ret;
int clean;
- retry:
+ retry:
clean = 1;
list_for_each_safe(list, next, head) {
prev = list->prev;
- entry = drm_bo_entry(list, mem_type);
+ entry = drm_bo_entry(list, mem_type);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
@@ -1639,10 +1645,10 @@ static int drm_bo_force_list_clean(drm_d
/*
* Expire the fence.
*/
-
+
mutex_unlock(&dev->struct_mutex);
if (entry->fence && bm->nice_mode) {
- unsigned long _end = jiffies + 3*DRM_HZ;
+ unsigned long _end = jiffies + 3 * DRM_HZ;
do {
ret = drm_bo_wait(entry, 0, 1, 0);
if (ret && allow_errors) {
@@ -1651,7 +1657,7 @@ static int drm_bo_force_list_clean(drm_d
goto out_err;
}
} while (ret && !time_after_eq(jiffies, _end));
-
+
if (entry->fence) {
bm->nice_mode = 0;
DRM_ERROR("Detected GPU hang or "
@@ -1660,14 +1666,17 @@ static int drm_bo_force_list_clean(drm_d
}
}
if (entry->fence) {
- drm_fence_usage_deref_unlocked(dev, entry->fence);
+ drm_fence_usage_deref_unlocked(dev,
+ entry->fence);
entry->fence = NULL;
}
- DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, 0);
+ DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED,
+ 0);
if (force_no_move) {
- DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, 0);
+ DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE,
+ 0);
}
if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
@@ -1690,12 +1699,12 @@ static int drm_bo_force_list_clean(drm_d
drm_bo_usage_deref_locked(dev, entry);
if (prev != list->prev || next != list->next) {
goto retry;
- }
+ }
}
if (!clean)
goto retry;
return 0;
- out_err:
+ out_err:
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_unlocked(dev, entry);
mutex_lock(&dev->struct_mutex);
@@ -1715,7 +1724,7 @@ int drm_bo_clean_mm(drm_device_t * dev,
if (!bm->has_type[mem_type]) {
DRM_ERROR("Trying to take down uninitialized "
"memory manager type\n");
- return ret;
+ return ret;
}
bm->use_type[mem_type] = 0;
bm->has_type[mem_type] = 0;
@@ -1733,10 +1742,12 @@ int drm_bo_clean_mm(drm_device_t * dev,
* Throw out evicted no-move buffers.
*/
- drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
+ drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
mem_type, 1, 0);
- drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, 0);
- drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, 0);
+ drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1,
+ 0);
+ drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1,
+ 0);
if (drm_mm_clean(&bm->manager[mem_type])) {
drm_mm_takedown(&bm->manager[mem_type]);
@@ -1748,32 +1759,30 @@ int drm_bo_clean_mm(drm_device_t * dev,
return ret;
}
-static int drm_bo_lock_mm(drm_device_t *dev, unsigned mem_type)
+static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
{
int ret;
drm_buffer_manager_t *bm = &dev->bm;
if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
- DRM_ERROR("Illegal memory manager memory type %u,\n",
- mem_type);
+ DRM_ERROR("Illegal memory manager memory type %u,\n", mem_type);
return -EINVAL;
}
-
- ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
- if (ret)
+
+ ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
+ if (ret)
return ret;
- ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
+ ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
if (ret)
return ret;
- ret = drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
+ ret =
+ drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
return ret;
}
-
-static int drm_bo_init_mm(drm_device_t *dev,
+static int drm_bo_init_mm(drm_device_t * dev,
unsigned type,
- unsigned long p_offset,
- unsigned long p_size)
+ unsigned long p_offset, unsigned long p_size)
{
drm_buffer_manager_t *bm = &dev->bm;
int ret = -EINVAL;
@@ -1794,7 +1803,7 @@ static int drm_bo_init_mm(drm_device_t *
DRM_ERROR("Zero size memory manager type %d\n", type);
return ret;
}
- ret = drm_mm_init(&bm->manager[type],p_offset, p_size);
+ ret = drm_mm_init(&bm->manager[type], p_offset, p_size);
if (ret)
return ret;
}
@@ -1807,8 +1816,7 @@ static int drm_bo_init_mm(drm_device_t *
return 0;
}
-
-int drm_bo_driver_finish(drm_device_t *dev)
+int drm_bo_driver_finish(drm_device_t * dev)
{
drm_buffer_manager_t *bm = &dev->bm;
int ret = 0;
@@ -1817,10 +1825,10 @@ int drm_bo_driver_finish(drm_device_t *d
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
- if (!bm->initialized)
+ if (!bm->initialized)
goto out;
- while(i--) {
+ while (i--) {
if (bm->has_type[i]) {
bm->use_type[i] = 0;
if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
@@ -1840,25 +1848,24 @@ int drm_bo_driver_finish(drm_device_t *d
flush_scheduled_work();
}
mutex_lock(&dev->struct_mutex);
- out:
+ out:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->bm.init_mutex);
return ret;
}
-
-int drm_bo_driver_init(drm_device_t *dev)
+int drm_bo_driver_init(drm_device_t * dev)
{
drm_bo_driver_t *driver = dev->driver->bo_driver;
drm_buffer_manager_t *bm = &dev->bm;
int ret = -EINVAL;
struct sysinfo si;
-
+
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
if (!driver)
goto out_unlock;
-
+
/*
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
@@ -1876,14 +1883,14 @@ int drm_bo_driver_init(drm_device_t *dev
si_meminfo(&si);
bm->max_pages = si.totalram >> 1;
INIT_LIST_HEAD(&bm->unfenced);
- INIT_LIST_HEAD(&bm->ddestroy);
- out_unlock:
+ INIT_LIST_HEAD(&bm->ddestroy);
+ out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->bm.init_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_bo_driver_init);
+EXPORT_SYMBOL(drm_bo_driver_init);
int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
{
@@ -1911,15 +1918,15 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
break;
}
if (arg.req.mem_type == 0) {
- DRM_ERROR("System memory buffers already initialized.\n");
+ DRM_ERROR
+ ("System memory buffers already initialized.\n");
break;
}
- ret = drm_bo_init_mm(dev, arg.req.mem_type,
- arg.req.p_offset,
- arg.req.p_size);
+ ret = drm_bo_init_mm(dev, arg.req.mem_type,
+ arg.req.p_offset, arg.req.p_size);
break;
case mm_takedown:
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, filp);
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
ret = -EINVAL;
@@ -1937,36 +1944,38 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
"Delaying takedown\n", arg.req.mem_type);
}
break;
- case mm_set_max_pages: {
- struct sysinfo si;
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- if (arg.req.p_size < bm->cur_pages) {
- DRM_ERROR("Cannot currently decrease max number of "
- "locked pages below the number currently "
- "locked.\n");
- ret = -EINVAL;
- break;
- }
- si_meminfo(&si);
- if (arg.req.p_size > si.totalram) {
- DRM_ERROR("Cannot set max number of locked pages "
- "to %lu since the total number of RAM pages "
- "is %lu.\n", (unsigned long) arg.req.p_size,
- (unsigned long) si.totalram);
- ret = -EINVAL;
- break;
+ case mm_set_max_pages:{
+ struct sysinfo si;
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (arg.req.p_size < bm->cur_pages) {
+ DRM_ERROR
+ ("Cannot currently decrease max number of "
+ "locked pages below the number currently "
+ "locked.\n");
+ ret = -EINVAL;
+ break;
+ }
+ si_meminfo(&si);
+ if (arg.req.p_size > si.totalram) {
+ DRM_ERROR
+ ("Cannot set max number of locked pages "
+ "to %lu since the total number of RAM pages "
+ "is %lu.\n", (unsigned long)arg.req.p_size,
+ (unsigned long)si.totalram);
+ ret = -EINVAL;
+ break;
+ }
+ bm->max_pages = arg.req.p_size;
}
- bm->max_pages = arg.req.p_size;
- }
case mm_lock:
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, filp);
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
ret = drm_bo_lock_mm(dev, arg.req.mem_type);
break;
case mm_unlock:
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, filp);
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
ret = 0;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index c9a2a06..aa38204 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -31,7 +31,6 @@
#include "drmP.h"
-
/*
* Typically called by the IRQ handler.
*/
@@ -90,7 +89,7 @@ void drm_fence_handler(drm_device_t * de
}
}
-
+
if (wake) {
DRM_WAKEUP(&fm->fence_queue);
}
@@ -132,8 +131,8 @@ void drm_fence_usage_deref_unlocked(drm_
mutex_lock(&dev->struct_mutex);
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
- atomic_dec(&fm->count);
- drm_ctl_cache_free(drm_cache.fence_object,
+ atomic_dec(&fm->count);
+ drm_ctl_cache_free(drm_cache.fence_object,
sizeof(*fence), fence);
}
mutex_unlock(&dev->struct_mutex);
@@ -150,7 +149,7 @@ static void drm_fence_object_destroy(drm
drm_fence_usage_deref_locked(dev, fence);
}
-static int fence_signaled(drm_device_t * dev, volatile
+static int fence_signaled(drm_device_t * dev, volatile
drm_fence_object_t * fence,
uint32_t mask, int poke_flush)
{
@@ -205,15 +204,14 @@ static void drm_fence_flush_exe(drm_fenc
}
}
-int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
+int drm_fence_object_signaled(volatile drm_fence_object_t * fence,
uint32_t type)
{
return ((fence->signaled & type) == type);
}
int drm_fence_object_flush(drm_device_t * dev,
- volatile drm_fence_object_t * fence,
- uint32_t type)
+ volatile drm_fence_object_t * fence, uint32_t type)
{
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
@@ -221,7 +219,7 @@ int drm_fence_object_flush(drm_device_t
if (type & ~fence->type) {
DRM_ERROR("Flush trying to extend fence type, "
- "0x%x, 0x%x\n", type, fence->type);
+ "0x%x, 0x%x\n", type, fence->type);
return -EINVAL;
}
@@ -248,7 +246,6 @@ int drm_fence_object_flush(drm_device_t
* wrapped around and reused.
*/
-
void drm_fence_flush_old(drm_device_t * dev, uint32_t sequence)
{
drm_fence_manager_t *fm = &dev->fm;
@@ -279,7 +276,7 @@ void drm_fence_flush_old(drm_device_t *
EXPORT_SYMBOL(drm_fence_flush_old);
-int drm_fence_object_wait(drm_device_t * dev,
+int drm_fence_object_wait(drm_device_t * dev,
volatile drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
@@ -328,8 +325,8 @@ int drm_fence_object_wait(drm_device_t *
do {
DRM_WAIT_ON(ret, fm->fence_queue, 3 * DRM_HZ,
- fence_signaled(dev, fence, DRM_FENCE_TYPE_EXE,
- 1));
+ fence_signaled(dev, fence,
+ DRM_FENCE_TYPE_EXE, 1));
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
@@ -347,9 +344,9 @@ int drm_fence_object_wait(drm_device_t *
*/
#if 1
if (!ignore_signals)
- return -EAGAIN;
+ return -EAGAIN;
#endif
- do {
+ do {
schedule();
signaled = fence_signaled(dev, fence, mask, 1);
} while (!signaled && !time_after_eq(jiffies, _end));
@@ -387,7 +384,7 @@ int drm_fence_object_emit(drm_device_t *
return 0;
}
-static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
+static int drm_fence_object_init(drm_device_t * dev, uint32_t type,
uint32_t fence_flags,
drm_fence_object_t * fence)
{
@@ -414,7 +411,6 @@ static int drm_fence_object_init(drm_dev
return ret;
}
-
int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
int shareable)
{
@@ -441,7 +437,7 @@ int drm_fence_object_create(drm_device_t
int ret;
drm_fence_manager_t *fm = &dev->fm;
- fence = drm_ctl_cache_alloc(drm_cache.fence_object,
+ fence = drm_ctl_cache_alloc(drm_cache.fence_object,
sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
@@ -472,7 +468,7 @@ void drm_fence_manager_init(drm_device_t
fm->initialized = 0;
if (fed) {
fm->initialized = 1;
- atomic_set(&fm->count,0);
+ atomic_set(&fm->count, 0);
for (i = 0; i < fed->no_types; ++i) {
fm->fence_types[i] = &fm->ring;
}
@@ -523,9 +519,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
case drm_fence_create:
if (arg.flags & DRM_FENCE_FLAG_EMIT)
LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_object_create(dev, arg.type,
- arg.flags,
- &fence);
+ ret = drm_fence_object_create(dev, arg.type, arg.flags, &fence);
if (ret)
return ret;
ret = drm_fence_add_user_object(priv, fence,
@@ -596,7 +590,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
return -EINVAL;
}
LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
+ ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
NULL, &fence);
if (ret)
return ret;
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index 3a2aa80..6f17e11 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -36,7 +36,7 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
-int drm_ht_create(drm_open_hash_t *ht, unsigned int order)
+int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
{
unsigned int i;
@@ -46,24 +46,24 @@ int drm_ht_create(drm_open_hash_t *ht, u
ht->table = NULL;
ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
- ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+ ht->table = drm_calloc(ht->size, sizeof(*ht->table),
DRM_MEM_HASHTAB);
- }
+ }
if (!ht->table) {
ht->use_vmalloc = 1;
- ht->table = vmalloc(ht->size*sizeof(*ht->table));
- }
+ ht->table = vmalloc(ht->size * sizeof(*ht->table));
+ }
if (!ht->table) {
DRM_ERROR("Out of memory for hash table\n");
return -ENOMEM;
}
- for (i=0; i< ht->size; ++i) {
+ for (i = 0; i < ht->size; ++i) {
INIT_HLIST_HEAD(&ht->table[i]);
}
return 0;
}
-void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key)
+void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
{
drm_hash_item_t *entry;
struct hlist_head *h_list;
@@ -80,7 +80,7 @@ void drm_ht_verbose_list(drm_open_hash_t
}
}
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t *ht,
+static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
unsigned long key)
{
drm_hash_item_t *entry;
@@ -100,8 +100,7 @@ static struct hlist_node *drm_ht_find_ke
return NULL;
}
-
-int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
{
drm_hash_item_t *entry;
struct hlist_head *h_list;
@@ -132,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t *
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
-int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
@@ -147,7 +146,7 @@ int drm_ht_just_insert_please(drm_open_h
ret = drm_ht_insert_item(ht, item);
if (ret)
unshifted_key = (unshifted_key + 1) & mask;
- } while(ret && (unshifted_key != first));
+ } while (ret && (unshifted_key != first));
if (ret) {
DRM_ERROR("Available key bit space exhausted\n");
@@ -156,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_h
return 0;
}
-int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key,
- drm_hash_item_t **item)
+int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
+ drm_hash_item_t ** item)
{
struct hlist_node *list;
@@ -169,7 +168,7 @@ int drm_ht_find_item(drm_open_hash_t *ht
return 0;
}
-int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key)
+int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
{
struct hlist_node *list;
@@ -182,22 +181,21 @@ int drm_ht_remove_key(drm_open_hash_t *h
return -EINVAL;
}
-int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item)
+int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
-void drm_ht_remove(drm_open_hash_t *ht)
+void drm_ht_remove(drm_open_hash_t * ht)
{
if (ht->table) {
- if (ht->use_vmalloc)
+ if (ht->use_vmalloc)
vfree(ht->table);
else
- drm_free(ht->table, ht->size*sizeof(*ht->table),
+ drm_free(ht->table, ht->size * sizeof(*ht->table),
DRM_MEM_HASHTAB);
ht->table = NULL;
}
}
-
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index 6ab13af..4af33bd 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -59,9 +59,9 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_n
return parent;
} else {
- child = (drm_mm_node_t *)
- drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
- GFP_KERNEL);
+ child = (drm_mm_node_t *)
+ drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+ GFP_KERNEL);
if (!child)
return NULL;
@@ -111,8 +111,8 @@ void drm_mm_put_block(drm_mm_node_t * cu
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
- drm_ctl_cache_free(drm_cache.mm,
- sizeof(*next_node),
+ drm_ctl_cache_free(drm_cache.mm,
+ sizeof(*next_node),
next_node);
} else {
next_node->size += cur->size;
@@ -161,9 +161,9 @@ drm_mm_node_t *drm_mm_search_free(const
return best;
}
-int drm_mm_clean(drm_mm_t *mm)
+int drm_mm_clean(drm_mm_t * mm)
{
- struct list_head *head = &mm->root_node.ml_entry;
+ struct list_head *head = &mm->root_node.ml_entry;
return (head->next->next == head);
}
@@ -175,9 +175,8 @@ int drm_mm_init(drm_mm_t * mm, unsigned
INIT_LIST_HEAD(&mm->root_node.ml_entry);
INIT_LIST_HEAD(&mm->root_node.fl_entry);
-
- child = (drm_mm_node_t *)
- drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
+ child = (drm_mm_node_t *)
+ drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
if (!child)
return -ENOMEM;
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 3e66319..599589f 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -32,7 +32,6 @@
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
-
static void *ttm_alloc(unsigned long size, int type)
{
void *ret = NULL;
@@ -53,15 +52,15 @@ static void *ttm_alloc(unsigned long siz
static void ttm_free(void *pointer, unsigned long size, int type)
{
-
- if ((unsigned long) pointer >= VMALLOC_START &&
- (unsigned long) pointer <= VMALLOC_END) {
+
+ if ((unsigned long)pointer >= VMALLOC_START &&
+ (unsigned long)pointer <= VMALLOC_END) {
vfree(pointer);
} else {
drm_free(pointer, size, type);
}
drm_free_memctl(size);
-}
+}
/*
* Unmap all vma pages from vmas mapping this ttm.
@@ -155,7 +154,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (ttm->pages) {
drm_buffer_manager_t *bm = &ttm->dev->bm;
- if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
+ if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
drm_set_caching(ttm, 0);
for (i = 0; i < ttm->num_pages; ++i) {
@@ -184,7 +183,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
--bm->cur_pages;
}
}
- ttm_free(ttm->pages, ttm->num_pages*sizeof(*ttm->pages),
+ ttm_free(ttm->pages, ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
ttm->pages = NULL;
}
@@ -193,20 +192,19 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
return 0;
}
-static int drm_ttm_populate(drm_ttm_t *ttm)
+static int drm_ttm_populate(drm_ttm_t * ttm)
{
struct page *page;
unsigned long i;
drm_buffer_manager_t *bm;
drm_ttm_backend_t *be;
-
- if (ttm->state != ttm_unpopulated)
+ if (ttm->state != ttm_unpopulated)
return 0;
-
+
bm = &ttm->dev->bm;
be = ttm->be;
- for (i=0; i<ttm->num_pages; ++i) {
+ for (i = 0; i < ttm->num_pages; ++i) {
page = ttm->pages[i];
if (!page) {
if (drm_alloc_memctl(PAGE_SIZE)) {
@@ -229,9 +227,7 @@ static int drm_ttm_populate(drm_ttm_t *t
be->populate(be, ttm->num_pages, ttm->pages);
ttm->state = ttm_unbound;
return 0;
-}
-
-
+}
/*
* Initialize a ttm.
@@ -266,7 +262,7 @@ static drm_ttm_t *drm_init_ttm(struct dr
* Account also for AGP module memory usage.
*/
- ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
+ ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
if (!ttm->pages) {
drm_destroy_ttm(ttm);
@@ -321,13 +317,12 @@ void drm_fixup_ttm_caching(drm_ttm_t * t
ttm->state = ttm_unbound;
}
}
-
int drm_unbind_ttm(drm_ttm_t * ttm)
{
int ret = 0;
- if (ttm->state == ttm_bound)
+ if (ttm->state == ttm_bound)
ret = drm_evict_ttm(ttm);
if (ret)
@@ -337,8 +332,7 @@ int drm_unbind_ttm(drm_ttm_t * ttm)
return 0;
}
-int drm_bind_ttm(drm_ttm_t * ttm, int cached,
- unsigned long aper_offset)
+int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
{
int ret = 0;
@@ -350,7 +344,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int ca
return 0;
be = ttm->be;
-
+
ret = drm_ttm_populate(ttm);
if (ret)
return ret;
@@ -361,7 +355,7 @@ int drm_bind_ttm(drm_ttm_t * ttm, int ca
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
-#ifdef DRM_ODD_MM_COMPAT
+#ifdef DRM_ODD_MM_COMPAT
else if (ttm->state == ttm_evicted && !cached) {
ret = drm_ttm_lock_mm(ttm);
if (ret)
@@ -378,18 +372,17 @@ int drm_bind_ttm(drm_ttm_t * ttm, int ca
return ret;
}
-
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
#ifdef DRM_ODD_MM_COMPAT
if (be->needs_ub_cache_adjust(be)) {
ret = drm_ttm_remap_bound(ttm);
- if (ret)
+ if (ret)
return ret;
}
#endif
-
+
return 0;
}
@@ -448,8 +441,7 @@ void drm_ttm_object_deref_unlocked(drm_d
*/
int drm_ttm_object_create(drm_device_t * dev, unsigned long size,
- uint32_t flags,
- drm_ttm_object_t ** ttm_object)
+ uint32_t flags, drm_ttm_object_t ** ttm_object)
{
drm_ttm_object_t *object;
drm_map_list_t *list;
@@ -476,21 +468,20 @@ int drm_ttm_object_create(drm_device_t *
return -ENOMEM;
}
- map->offset = (unsigned long) ttm;
+ map->offset = (unsigned long)ttm;
map->type = _DRM_TTM;
map->flags = _DRM_REMOVABLE;
map->size = ttm->num_pages * PAGE_SIZE;
map->handle = (void *)object;
list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
- ttm->num_pages,
- 0,0);
+ ttm->num_pages, 0, 0);
if (!list->file_offset_node) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- ttm->num_pages,0);
+ ttm->num_pages, 0);
list->hash.key = list->file_offset_node->start;
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
index e5501d9..11a1375 100644
--- a/linux-core/drm_ttm.h
+++ b/linux-core/drm_ttm.h
@@ -52,12 +52,12 @@ typedef struct drm_ttm_backend {
unsigned long aperture_base;
void *private;
uint32_t flags;
- uint32_t drm_map_type;
+ uint32_t drm_map_type;
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
void (*clear) (struct drm_ttm_backend * backend);
- int (*bind) (struct drm_ttm_backend * backend,
+ int (*bind) (struct drm_ttm_backend * backend,
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
@@ -68,11 +68,11 @@ typedef struct drm_ttm {
uint32_t page_flags;
unsigned long num_pages;
unsigned long aper_offset;
- atomic_t vma_count;
+ atomic_t vma_count;
struct drm_device *dev;
int destroy;
- uint32_t mapping_offset;
- drm_ttm_backend_t *be;
+ uint32_t mapping_offset;
+ drm_ttm_backend_t *be;
enum {
ttm_bound,
ttm_evicted,
@@ -80,8 +80,8 @@ typedef struct drm_ttm {
ttm_unpopulated,
} state;
#ifdef DRM_ODD_MM_COMPAT
- struct list_head vma_list;
- struct list_head p_mm_list;
+ struct list_head vma_list;
+ struct list_head p_mm_list;
#endif
} drm_ttm_t;
@@ -93,7 +93,7 @@ typedef struct drm_ttm_object {
} drm_ttm_object_t;
extern int drm_ttm_object_create(struct drm_device *dev, unsigned long size,
- uint32_t flags,
+ uint32_t flags,
drm_ttm_object_t ** ttm_object);
extern void drm_ttm_object_deref_locked(struct drm_device *dev,
drm_ttm_object_t * to);
@@ -102,8 +102,7 @@ extern void drm_ttm_object_deref_unlocke
extern drm_ttm_object_t *drm_lookup_ttm_object(drm_file_t * priv,
uint32_t handle,
int check_owner);
-extern int drm_bind_ttm(drm_ttm_t * ttm, int cached,
- unsigned long aper_offset);
+extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
extern int drm_unbind_ttm(drm_ttm_t * ttm);
@@ -112,8 +111,7 @@ extern int drm_unbind_ttm(drm_ttm_t * tt
*/
extern int drm_evict_ttm(drm_ttm_t * ttm);
-extern void drm_fixup_ttm_caching(drm_ttm_t *ttm);
-
+extern void drm_fixup_ttm_caching(drm_ttm_t * ttm);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index 8a3d7bf..729ba4b 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -36,23 +36,23 @@
#define INTEL_AGP_MEM_USER 3
#define INTEL_AGP_MEM_UCACHED 4
-drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev)
+drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
{
- return drm_agp_init_ttm(dev, NULL, INTEL_AGP_MEM_USER, INTEL_AGP_MEM_UCACHED,
- INTEL_AGP_MEM_USER);
+ return drm_agp_init_ttm(dev, NULL, INTEL_AGP_MEM_USER,
+ INTEL_AGP_MEM_UCACHED, INTEL_AGP_MEM_USER);
}
-int i915_fence_types(uint32_t buffer_flags, uint32_t *class, uint32_t *type)
+int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
{
*class = 0;
- if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
-int i915_invalidate_caches(drm_device_t *dev, uint32_t flags)
+int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
@@ -65,6 +65,5 @@ int i915_invalidate_caches(drm_device_t
if (flags & DRM_BO_FLAG_EXE)
flush_cmd |= MI_EXE_FLUSH;
-
return i915_emit_mi_flush(dev, flush_cmd);
}
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index 49dc254..fc8ab76 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -62,7 +62,7 @@ static void i915_perform_flush(drm_devic
diff = sequence - fm->last_exe_flush;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, sequence, DRM_FENCE_TYPE_EXE);
- }
+ }
diff = sequence - fm->exe_flush_sequence;
if (diff < driver->wrap_diff) {
@@ -85,7 +85,7 @@ static void i915_perform_flush(drm_devic
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
- }
+ }
}
if (fm->pending_flush && !dev_priv->flush_pending) {
@@ -105,7 +105,7 @@ static void i915_perform_flush(drm_devic
flush_sequence = dev_priv->flush_sequence;
dev_priv->flush_pending = 0;
drm_fence_handler(dev, flush_sequence, flush_flags);
- }
+ }
}
}
@@ -121,15 +121,15 @@ void i915_poke_flush(drm_device_t * dev)
}
int i915_fence_emit_sequence(drm_device_t * dev, uint32_t flags,
- uint32_t * sequence, uint32_t *native_type)
+ uint32_t * sequence, uint32_t * native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
i915_emit_irq(dev);
*sequence = (uint32_t) dev_priv->counter;
- *native_type = DRM_FENCE_TYPE_EXE;
- if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
+ *native_type = DRM_FENCE_TYPE_EXE;
+ if (flags & DRM_I915_FENCE_FLAG_FLUSHED)
*native_type |= DRM_I915_FENCE_TYPE_RW;
-
+
return 0;
}
@@ -141,4 +141,3 @@ void i915_fence_handler(drm_device_t * d
i915_perform_flush(dev);
write_unlock(&fm->lock);
}
-
diff-tree f22f89e6b3c970a29197d3a53c170fb7d0340cbe (from \
d515936ea7f98f6aaa9217699796beadef9d664b)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 19:52:34 2006 +0200
Add vma list memory usage to memory accounting.
Use byte unit for /proc printout of memory usage for small sizes to be
able to detect memory allocation bugs more easily.
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index e59322d..b10e988 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -763,7 +763,6 @@ typedef struct drm_cache {
kmem_cache_t *mm;
kmem_cache_t *fence_object;
- kmem_cache_t *ref_object;
} drm_cache_t;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index e1ee35c..75c89c1 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -228,7 +228,7 @@ int drm_lastclose(drm_device_t * dev)
if (dev->vmalist) {
for (vma = dev->vmalist; vma; vma = vma_next) {
vma_next = vma->next;
- drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
dev->vmalist = NULL;
}
@@ -464,14 +464,6 @@ static int drm_create_memory_caches(void
if (!drm_cache.fence_object)
return -ENOMEM;
- drm_cache.ref_object= kmem_cache_create("drm_ref_object_t",
- sizeof(drm_ref_object_t),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL,NULL);
- if (!drm_cache.ref_object)
- return -ENOMEM;
-
return 0;
}
@@ -489,8 +481,6 @@ static void drm_free_mem_cache(kmem_cach
static void drm_free_memory_caches(void )
{
- drm_free_mem_cache(drm_cache.ref_object, "ref object");
- drm_cache.ref_object = NULL;
drm_free_mem_cache(drm_cache.fence_object, "fence object");
drm_cache.fence_object = NULL;
drm_free_mem_cache(drm_cache.mm, "memory manager block");
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 4bb71ca..863cacf 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -469,8 +469,13 @@ static int drm__objects_info(char *buf,
drm_query_memctl(&used_mem, &low_mem, &high_mem);
- DRM_PROC_PRINT("Used object memory is %lu pages.\n",
- (unsigned long) (used_mem >> PAGE_SHIFT));
+ if (used_mem > 16*PAGE_SIZE) {
+ DRM_PROC_PRINT("Used object memory is %lu pages.\n",
+ (unsigned long) (used_mem >> PAGE_SHIFT));
+ } else {
+ DRM_PROC_PRINT("Used object memory is %lu bytes.\n",
+ (unsigned long) used_mem);
+ }
DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
(unsigned long) (low_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 6631056..8413fb4 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -56,8 +56,7 @@ struct proc_dir_entry *drm_proc_root;
drm_cache_t drm_cache =
{ .mm = NULL,
- .fence_object = NULL,
- .ref_object = NULL
+ .fence_object = NULL
};
static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 1654236..aa11275 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -315,7 +315,7 @@ static void drm_vm_shm_close(struct vm_a
} else {
dev->vmalist = pt->next;
}
- drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
} else {
prev = pt;
}
@@ -567,7 +567,7 @@ static void drm_vm_open(struct vm_area_s
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_inc(&dev->vma_count);
- vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
+ vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
mutex_lock(&dev->struct_mutex);
vma_entry->vma = vma;
@@ -627,7 +627,7 @@ static void drm_vm_close(struct vm_area_
} else {
dev->vmalist = pt->next;
}
- drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+ drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
}
diff-tree d515936ea7f98f6aaa9217699796beadef9d664b (from \
5443dbe35f182b9286a96d24d29037d5cb625e3d)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 19:40:57 2006 +0200
Add memory usage accounting to avoid DOS problems.
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index ff95225..e59322d 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -1129,6 +1129,14 @@ extern int drm_free_agp(DRM_AGP_MEM * ha
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
+extern void drm_free_memctl(size_t size);
+extern int drm_alloc_memctl(size_t size);
+extern void drm_query_memctl(drm_u64_t *cur_used,
+ drm_u64_t *low_threshold,
+ drm_u64_t *high_threshold);
+extern void drm_init_memctl(size_t low_threshold,
+ size_t high_threshold);
+
/* Misc. IOCTL support (drm_ioctl.h) */
extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
@@ -1527,6 +1535,58 @@ extern void *drm_alloc(size_t size, int
extern void drm_free(void *pt, size_t size, int area);
#endif
+/*
+ * Accounting variants of standard calls.
+ */
+
+static inline void *drm_ctl_alloc(size_t size, int area)
+{
+ void *ret;
+ if (drm_alloc_memctl(size))
+ return NULL;
+ ret = drm_alloc(size, area);
+ if (!ret)
+ drm_free_memctl(size);
+ return ret;
+}
+
+static inline void *drm_ctl_calloc(size_t nmemb, size_t size, int area)
+{
+ void *ret;
+
+ if (drm_alloc_memctl(nmemb*size))
+ return NULL;
+ ret = drm_calloc(nmemb, size, area);
+ if (!ret)
+ drm_free_memctl(nmemb*size);
+ return ret;
+}
+
+static inline void drm_ctl_free(void *pt, size_t size, int area)
+{
+ drm_free(pt, size, area);
+ drm_free_memctl(size);
+}
+
+static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size,
+ int flags)
+{
+ void *ret;
+ if (drm_alloc_memctl(size))
+ return NULL;
+ ret = kmem_cache_alloc(cache, flags);
+ if (!ret)
+ drm_free_memctl(size);
+ return ret;
+}
+
+static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size,
+ void *obj)
+{
+ kmem_cache_free(cache, obj);
+ drm_free_memctl(size);
+}
+
/*@}*/
#endif /* __KERNEL__ */
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index d9fd9c9..ffbe04f 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -570,14 +570,19 @@ static int drm_agp_populate(drm_ttm_back
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
+ if (drm_alloc_memctl(num_pages * sizeof(void *)))
+ return -1;
+
DRM_DEBUG("drm_agp_populate_ttm\n");
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
mem = drm_agp_allocate_memory(num_pages, agp_priv->alloc_type);
#else
mem = drm_agp_allocate_memory(agp_priv->bridge, num_pages, agp_priv->alloc_type);
#endif
- if (!mem)
+ if (!mem) {
+ drm_free_memctl(num_pages *sizeof(void *));
return -1;
+ }
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
@@ -626,8 +631,10 @@ static void drm_agp_clear_ttm(drm_ttm_ba
DRM_DEBUG("drm_agp_clear_ttm\n");
if (mem) {
+ unsigned long num_pages = mem->page_count;
backend->unbind(backend);
agp_free_memory(mem);
+ drm_free_memctl(num_pages *sizeof(void *));
}
agp_priv->mem = NULL;
@@ -644,10 +651,12 @@ static void drm_agp_destroy_ttm(drm_ttm_
if (agp_priv->mem) {
backend->clear(backend);
}
- drm_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+ drm_ctl_free(agp_priv, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
+ backend->private = NULL;
+ }
+ if (backend->flags & DRM_BE_FLAG_NEEDS_FREE) {
+ drm_ctl_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
}
- if (backend->flags & DRM_BE_FLAG_NEEDS_FREE)
- drm_free(backend, sizeof(*backend), DRM_MEM_MAPPINGS);
}
}
@@ -662,15 +671,15 @@ drm_ttm_backend_t *drm_agp_init_ttm(stru
drm_agp_ttm_priv *agp_priv;
agp_be = (backend != NULL) ? backend:
- drm_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+ drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS);
if (!agp_be)
return NULL;
- agp_priv = drm_calloc(1, sizeof(agp_priv), DRM_MEM_MAPPINGS);
+ agp_priv = drm_ctl_calloc(1, sizeof(*agp_priv), DRM_MEM_MAPPINGS);
if (!agp_priv) {
- drm_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
+ drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_MAPPINGS);
return NULL;
}
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index f671a04..fb90098 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -237,7 +237,7 @@ static void drm_bo_destroy_locked(drm_de
drm_ttm_object_deref_locked(dev, bo->ttm_object);
}
atomic_dec(&bm->count);
- drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
+ drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
}
static void drm_bo_delayed_delete(drm_device_t * dev)
@@ -1390,7 +1390,7 @@ int drm_buffer_object_create(drm_file_t
return -EINVAL;
}
- bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
+ bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
if (!bo)
return -ENOMEM;
@@ -1752,6 +1752,12 @@ static int drm_bo_lock_mm(drm_device_t *
{
int ret;
drm_buffer_manager_t *bm = &dev->bm;
+
+ if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
+ DRM_ERROR("Illegal memory manager memory type %u,\n",
+ mem_type);
+ return -EINVAL;
+ }
ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
if (ret)
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 5287614..4a035f4 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -239,8 +239,13 @@ struct page *drm_vm_ttm_nopage(struct vm
page = NOPAGE_OOM;
goto out;
}
+ if (drm_alloc_memctl(PAGE_SIZE)) {
+ page = NOPAGE_OOM;
+ goto out;
+ }
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
+ drm_free_memctl(PAGE_SIZE);
page = NOPAGE_OOM;
goto out;
}
@@ -284,7 +289,7 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, str
vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
- v_entry = drm_alloc(sizeof(*v_entry), DRM_MEM_TTM);
+ v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
@@ -300,7 +305,7 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, str
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
- n_entry = drm_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+ n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
@@ -325,7 +330,7 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm,
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
- drm_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
+ drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
break;
}
}
@@ -336,7 +341,7 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm,
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
- drm_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
}
return;
}
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 43b4f8d..e1ee35c 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -130,7 +130,6 @@ static drm_ioctl_desc_t drm_ioctls[] = {
#define DRIVER_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
-
/**
* Take down the DRM device.
*
@@ -502,7 +501,10 @@ static void drm_free_memory_caches(void
static int __init drm_core_init(void)
{
int ret;
+ struct sysinfo si;
+ si_meminfo(&si);
+ drm_init_memctl(si.totalram/2, si.totalram*3/4);
ret = drm_create_memory_caches();
if (ret)
goto err_p1;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 3e20f12..c9a2a06 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -118,7 +118,8 @@ void drm_fence_usage_deref_locked(drm_de
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
fence->base.hash.key);
atomic_dec(&fm->count);
- kmem_cache_free(drm_cache.fence_object, fence);
+ drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence),
+ fence);
}
}
@@ -132,7 +133,8 @@ void drm_fence_usage_deref_unlocked(drm_
if (atomic_read(&fence->usage) == 0) {
drm_fence_unring(dev, &fence->ring);
atomic_dec(&fm->count);
- kmem_cache_free(drm_cache.fence_object, fence);
+ drm_ctl_cache_free(drm_cache.fence_object,
+ sizeof(*fence), fence);
}
mutex_unlock(&dev->struct_mutex);
}
@@ -439,7 +441,8 @@ int drm_fence_object_create(drm_device_t
int ret;
drm_fence_manager_t *fm = &dev->fm;
- fence = kmem_cache_alloc(drm_cache.fence_object, GFP_KERNEL);
+ fence = drm_ctl_cache_alloc(drm_cache.fence_object,
+ sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, type, flags, fence);
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index 63ee5f9..3a2aa80 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -44,7 +44,7 @@ int drm_ht_create(drm_open_hash_t *ht, u
ht->order = order;
ht->fill = 0;
ht->table = NULL;
- ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > 4*PAGE_SIZE);
+ ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
if (!ht->use_vmalloc) {
ht->table = drm_calloc(ht->size, sizeof(*ht->table),
DRM_MEM_HASHTAB);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 9125cd4..ba65136 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -37,6 +37,75 @@
#include <linux/highmem.h>
#include "drmP.h"
+static struct {
+ spinlock_t lock;
+ drm_u64_t cur_used;
+ drm_u64_t low_threshold;
+ drm_u64_t high_threshold;
+} drm_memctl = {
+ .lock = SPIN_LOCK_UNLOCKED
+};
+
+static inline size_t drm_size_align(size_t size) {
+
+ register size_t tmpSize = 4;
+ if (size > PAGE_SIZE)
+ return PAGE_ALIGN(size);
+
+ while(tmpSize < size)
+ tmpSize <<= 1;
+
+ return (size_t) tmpSize;
+}
+
+int drm_alloc_memctl(size_t size)
+{
+ int ret;
+ unsigned long a_size = drm_size_align(size);
+
+ spin_lock(&drm_memctl.lock);
+ ret = ((drm_memctl.cur_used + a_size) > drm_memctl.high_threshold) ?
+ -ENOMEM : 0;
+ if (!ret)
+ drm_memctl.cur_used += a_size;
+ spin_unlock(&drm_memctl.lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_alloc_memctl);
+
+void drm_free_memctl(size_t size)
+{
+ unsigned long a_size = drm_size_align(size);
+
+ spin_lock(&drm_memctl.lock);
+ drm_memctl.cur_used -= a_size;
+ spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_free_memctl);
+
+void drm_query_memctl(drm_u64_t *cur_used,
+ drm_u64_t *low_threshold,
+ drm_u64_t *high_threshold)
+{
+ spin_lock(&drm_memctl.lock);
+ *cur_used = drm_memctl.cur_used;
+ *low_threshold = drm_memctl.low_threshold;
+ *high_threshold = drm_memctl.high_threshold;
+ spin_unlock(&drm_memctl.lock);
+}
+EXPORT_SYMBOL(drm_query_memctl);
+
+void drm_init_memctl(size_t p_low_threshold,
+ size_t p_high_threshold)
+{
+ spin_lock(&drm_memctl.lock);
+ drm_memctl.cur_used = 0;
+ drm_memctl.low_threshold = p_low_threshold << PAGE_SHIFT;
+ drm_memctl.high_threshold = p_high_threshold << PAGE_SHIFT;
+ spin_unlock(&drm_memctl.lock);
+}
+
+
#ifndef DEBUG_MEMORY
/** No-op. */
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index ef8bd7e..6ab13af 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -59,8 +59,9 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_n
return parent;
} else {
- child = (drm_mm_node_t *) kmem_cache_alloc(drm_cache.mm,
- GFP_KERNEL);
+ child = (drm_mm_node_t *)
+ drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child),
+ GFP_KERNEL);
if (!child)
return NULL;
@@ -110,8 +111,9 @@ void drm_mm_put_block(drm_mm_node_t * cu
prev_node->size += next_node->size;
list_del(&next_node->ml_entry);
list_del(&next_node->fl_entry);
- kmem_cache_free(drm_cache.mm, next_node);
-
+ drm_ctl_cache_free(drm_cache.mm,
+ sizeof(*next_node),
+ next_node);
} else {
next_node->size += cur->size;
next_node->start = cur->start;
@@ -124,7 +126,7 @@ void drm_mm_put_block(drm_mm_node_t * cu
list_add(&cur->fl_entry, &list_root->fl_entry);
} else {
list_del(&cur->ml_entry);
- kmem_cache_free(drm_cache.mm, cur);
+ drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur);
}
}
@@ -174,7 +176,8 @@ int drm_mm_init(drm_mm_t * mm, unsigned
INIT_LIST_HEAD(&mm->root_node.fl_entry);
- child = (drm_mm_node_t *) kmem_cache_alloc(drm_cache.mm, GFP_KERNEL);
+ child = (drm_mm_node_t *)
+ drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), GFP_KERNEL);
if (!child)
return -ENOMEM;
@@ -210,7 +213,7 @@ void drm_mm_takedown(drm_mm_t * mm)
list_del(&entry->fl_entry);
list_del(&entry->ml_entry);
- kmem_cache_free(drm_cache.mm, entry);
+ drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry);
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
index e1b7910..0157329 100644
--- a/linux-core/drm_object.c
+++ b/linux-core/drm_object.c
@@ -152,7 +152,7 @@ int drm_add_ref_object(drm_file_t * priv
ref_action);
}
- item = drm_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
+ item = drm_ctl_calloc(1, sizeof(*item), DRM_MEM_OBJECTS);
if (item == NULL) {
DRM_ERROR("Could not allocate reference object\n");
return -ENOMEM;
@@ -218,7 +218,7 @@ void drm_remove_ref_object(drm_file_t *
list_del_init(&item->list);
if (unref_action == _DRM_REF_USE)
drm_remove_other_references(priv, user_object);
- drm_free(item, sizeof(*item), DRM_MEM_OBJECTS);
+ drm_ctl_free(item, sizeof(*item), DRM_MEM_OBJECTS);
}
switch (unref_action) {
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index b0b1748..4bb71ca 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -439,6 +439,10 @@ static int drm__objects_info(char *buf,
int len = 0;
drm_buffer_manager_t *bm = &dev->bm;
drm_fence_manager_t *fm = &dev->fm;
+ drm_u64_t used_mem;
+ drm_u64_t low_mem;
+ drm_u64_t high_mem;
+
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -459,12 +463,18 @@ static int drm__objects_info(char *buf,
DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n",
atomic_read(&bm->count));
DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages);
- DRM_PROC_PRINT("Max allowed number of locked GATT pages %lu\n",
- bm->max_pages);
} else {
DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n");
}
+ drm_query_memctl(&used_mem, &low_mem, &high_mem);
+
+ DRM_PROC_PRINT("Used object memory is %lu pages.\n",
+ (unsigned long) (used_mem >> PAGE_SHIFT));
+ DRM_PROC_PRINT("Soft object memory usage threshold is %lu pages.\n",
+ (unsigned long) (low_mem >> PAGE_SHIFT));
+ DRM_PROC_PRINT("Hard object memory usage threshold is %lu pages.\n",
+ (unsigned long) (high_mem >> PAGE_SHIFT));
DRM_PROC_PRINT("\n");
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 0f9cb11..3e66319 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -37,12 +37,17 @@ static void *ttm_alloc(unsigned long siz
{
void *ret = NULL;
- if (size <= 4*PAGE_SIZE) {
+ if (drm_alloc_memctl(size))
+ return NULL;
+ if (size <= PAGE_SIZE) {
ret = drm_alloc(size, type);
}
if (!ret) {
ret = vmalloc(size);
}
+ if (!ret) {
+ drm_free_memctl(size);
+ }
return ret;
}
@@ -55,6 +60,7 @@ static void ttm_free(void *pointer, unsi
} else {
drm_free(pointer, size, type);
}
+ drm_free_memctl(size);
}
/*
@@ -174,6 +180,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
*/
drm_free_gatt_pages(*cur_page, 0);
+ drm_free_memctl(PAGE_SIZE);
--bm->cur_pages;
}
}
@@ -182,8 +189,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
ttm->pages = NULL;
}
- drm_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
-
+ drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
return 0;
}
@@ -203,13 +209,14 @@ static int drm_ttm_populate(drm_ttm_t *t
for (i=0; i<ttm->num_pages; ++i) {
page = ttm->pages[i];
if (!page) {
- if (bm->cur_pages >= bm->max_pages) {
- DRM_ERROR("Maximum locked page count exceeded\n");
+ if (drm_alloc_memctl(PAGE_SIZE)) {
return -ENOMEM;
}
page = drm_alloc_gatt_pages(0);
- if (!page)
+ if (!page) {
+ drm_free_memctl(PAGE_SIZE);
return -ENOMEM;
+ }
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
SetPageLocked(page);
#else
@@ -238,7 +245,7 @@ static drm_ttm_t *drm_init_ttm(struct dr
if (!bo_driver)
return NULL;
- ttm = drm_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
+ ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
if (!ttm)
return NULL;
@@ -254,6 +261,11 @@ static drm_ttm_t *drm_init_ttm(struct dr
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
ttm->page_flags = 0;
+
+ /*
+ * Account also for AGP module memory usage.
+ */
+
ttm->pages = ttm_alloc(ttm->num_pages * sizeof(*ttm->pages),
DRM_MEM_TTM);
if (!ttm->pages) {
@@ -403,14 +415,14 @@ static void drm_ttm_object_remove(drm_de
drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
if (ttm) {
if (drm_destroy_ttm(ttm) != -EBUSY) {
- drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
} else {
- drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
- drm_free(object, sizeof(*object), DRM_MEM_TTM);
+ drm_ctl_free(object, sizeof(*object), DRM_MEM_TTM);
}
void drm_ttm_object_deref_locked(drm_device_t * dev, drm_ttm_object_t * to)
@@ -444,13 +456,13 @@ int drm_ttm_object_create(drm_device_t *
drm_local_map_t *map;
drm_ttm_t *ttm;
- object = drm_calloc(1, sizeof(*object), DRM_MEM_TTM);
+ object = drm_ctl_calloc(1, sizeof(*object), DRM_MEM_TTM);
if (!object)
return -ENOMEM;
object->flags = flags;
list = &object->map_list;
- list->map = drm_calloc(1, sizeof(*map), DRM_MEM_TTM);
+ list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_TTM);
if (!list->map) {
drm_ttm_object_remove(dev, object);
return -ENOMEM;
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 091b43f..1654236 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -202,13 +202,13 @@ struct page *drm_vm_ttm_fault(struct vm_
page = ttm->pages[page_offset];
if (!page) {
- if (bm->cur_pages >= bm->max_pages) {
- DRM_ERROR("Maximum locked page count exceeded\n");
+ if (drm_alloc_memctl(PAGE_SIZE)) {
data->type = VM_FAULT_OOM;
goto out;
}
page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0);
if (!page) {
+ drm_free_memctl(PAGE_SIZE);
data->type = VM_FAULT_OOM;
goto out;
}
@@ -654,7 +654,7 @@ static void drm_vm_ttm_close(struct vm_a
if (ttm->destroy) {
ret = drm_destroy_ttm(ttm);
BUG_ON(ret);
- drm_free(map, sizeof(*map), DRM_MEM_TTM);
+ drm_ctl_free(map, sizeof(*map), DRM_MEM_TTM);
}
}
mutex_unlock(&dev->struct_mutex);
diff-tree 5443dbe35f182b9286a96d24d29037d5cb625e3d (from \
db5c671e86c3db8c99ce5a4954632248e6f849aa)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 16:00:25 2006 +0200
Implement mm_lock and mm_unlock functions.
The mm_lock function is used when leaving vt. It evicts _all_ buffers.
Buffers with the DRM_BO_NO_MOVE attribute set will be guaranteed to
get the same offset when / if they are rebound.
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 039c9b3..253ba69 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -3202,4 +3202,37 @@ int drmMMMaxLockedSize(int fd, unsigned
return 0;
}
+int drmMMLock(int fd, unsigned memType)
+{
+ drm_mm_init_arg_t arg;
+ int ret;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.req.op = mm_lock;
+ arg.req.mem_type = memType;
+
+ do{
+ ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+ } while (ret && errno == EAGAIN);
+
+ return ret;
+}
+
+int drmMMUnlock(int fd, unsigned memType)
+{
+ drm_mm_init_arg_t arg;
+ int ret;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.req.op = mm_unlock;
+ arg.req.mem_type = memType;
+
+ do{
+ ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+ } while (ret && errno == EAGAIN);
+
+ return ret;
+}
+
+
#endif
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index b2ba24d..c3112c9 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -199,7 +199,8 @@ extern int drmMMInit(int fd, unsigned lo
unsigned memType);
extern int drmMMTakedown(int fd, unsigned memType);
extern int drmMMMaxLockedSize(int fd, unsigned long maxLockedSize);
-
+extern int drmMMLock(int fd, unsigned memType);
+extern int drmMMUnlock(int fd, unsigned memType);
#endif
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 7ae001b..ff95225 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -1351,6 +1351,11 @@ extern drm_mm_node_t *drm_mm_search_free
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
extern void drm_mm_takedown(drm_mm_t *mm);
extern int drm_mm_clean(drm_mm_t *mm);
+static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
+{
+ return block->mm;
+}
+
/*
* User space object bookkeeping (drm_object.c)
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 67e9024..f671a04 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -514,8 +514,8 @@ static int drm_bo_evict(drm_buffer_objec
if (ret)
goto out;
mutex_lock(&dev->struct_mutex);
- list_del(&bo->lru_ttm);
- list_add_tail(&bo->lru_ttm, &bm->lru[DRM_BO_MEM_LOCAL]);
+ list_del_init(&bo->lru_ttm);
+ drm_bo_add_to_lru(bo, bm);
mutex_unlock(&dev->struct_mutex);
}
#if 0
@@ -658,6 +658,11 @@ static int drm_bo_new_flags(drm_device_t
new_mask &= ~drm_bo_type_flags(i);
}
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT ) && !DRM_SUSER(DRM_CURPROC)) {
+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
+ "processes\n");
+ return -EPERM;
+ }
if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
!driver->cached[DRM_BO_MEM_TT]) &&
@@ -1085,7 +1090,7 @@ static void drm_buffer_user_object_unmap
*/
static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
- int no_wait)
+ int no_wait, int force_no_move)
{
int ret = 0;
@@ -1118,7 +1123,7 @@ static int drm_bo_move_buffer(drm_buffer
if (ret)
return ret;
} else {
- drm_move_tt_to_local(bo, 0, 1);
+ drm_move_tt_to_local(bo, 0, force_no_move);
}
return 0;
@@ -1153,8 +1158,6 @@ static int drm_buffer_object_validate(dr
/*
* Move out if we need to change caching policy.
- * FIXME: Failing is strictly not needed for NO_MOVE buffers.
- * We just have to implement NO_MOVE buffers.
*/
if ((flag_diff & DRM_BO_FLAG_BIND_CACHED) &&
@@ -1164,7 +1167,7 @@ static int drm_buffer_object_validate(dr
"pinned buffer.\n");
return -EINVAL;
}
- ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait);
+ ret = drm_bo_move_buffer(bo, DRM_BO_FLAG_MEM_LOCAL, no_wait, 0);
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed moving buffer.\n");
@@ -1175,11 +1178,30 @@ static int drm_buffer_object_validate(dr
flag_diff = (new_flags ^ bo->flags);
/*
+ * Check whether we dropped no_move policy, and in that case,
+ * release reserved manager regions.
+ */
+
+ if ((flag_diff & DRM_BO_FLAG_NO_MOVE) &&
+ !(new_flags & DRM_BO_FLAG_NO_MOVE)) {
+ mutex_lock(&dev->struct_mutex);
+ if (bo->node_ttm) {
+ drm_mm_put_block(bo->node_ttm);
+ bo->node_ttm = NULL;
+ }
+ if (bo->node_card) {
+ drm_mm_put_block(bo->node_card);
+ bo->node_card = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ /*
* Check whether we need to move buffer.
*/
if ((bo->type != drm_bo_type_fake) && (flag_diff & DRM_BO_MASK_MEM)) {
- ret = drm_bo_move_buffer(bo, new_flags, no_wait);
+ ret = drm_bo_move_buffer(bo, new_flags, no_wait, 1);
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed moving buffer.\n");
@@ -1207,7 +1229,6 @@ static int drm_buffer_object_validate(dr
list_del_init(&bo->lru_card);
drm_bo_add_to_lru(bo, bm);
mutex_unlock(&dev->struct_mutex);
- DRM_FLAG_MASKED(bo->flags, new_flags, DRM_BO_FLAG_NO_EVICT);
}
bo->flags = new_flags;
@@ -1586,34 +1607,51 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
* dev->struct_sem locked.
*/
-static void drm_bo_force_list_clean(drm_device_t *dev,
- struct list_head *head,
- unsigned mem_type)
+static int drm_bo_force_list_clean(drm_device_t *dev,
+ struct list_head *head,
+ unsigned mem_type,
+ int force_no_move,
+ int allow_errors)
{
drm_buffer_manager_t *bm = &dev->bm;
- struct list_head *l;
+ struct list_head *list, *next, *prev;
drm_buffer_object_t *entry;
int ret;
+ int clean;
- l = head->next;
- while (l != head) {
- entry = drm_bo_entry(l, mem_type);
-
+ retry:
+ clean = 1;
+ list_for_each_safe(list, next, head) {
+ prev = list->prev;
+ entry = drm_bo_entry(list, mem_type);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
+ mutex_lock(&dev->struct_mutex);
- /*
- * Expire the fence.
- */
+ if (prev != list->prev || next != list->next) {
+ mutex_unlock(&entry->mutex);
+ goto retry;
+ }
+ if (drm_bo_mm_node(entry, mem_type)) {
+ clean = 0;
- if (entry->fence) {
- if (bm->nice_mode) {
+ /*
+ * Expire the fence.
+ */
+
+ mutex_unlock(&dev->struct_mutex);
+ if (entry->fence && bm->nice_mode) {
unsigned long _end = jiffies + 3*DRM_HZ;
do {
ret = drm_bo_wait(entry, 0, 1, 0);
+ if (ret && allow_errors) {
+ if (ret == -EINTR)
+ ret = -EAGAIN;
+ goto out_err;
+ }
} while (ret && !time_after_eq(jiffies, _end));
-
+
if (entry->fence) {
bm->nice_mode = 0;
DRM_ERROR("Detected GPU hang or "
@@ -1621,23 +1659,47 @@ static void drm_bo_force_list_clean(drm_
"Evicting waiting buffers\n");
}
}
-
if (entry->fence) {
- drm_fence_usage_deref_unlocked(dev,
- entry->fence);
+ drm_fence_usage_deref_unlocked(dev, entry->fence);
entry->fence = NULL;
}
- }
- ret = drm_bo_evict(entry, mem_type, 0, 1);
- if (ret) {
- DRM_ERROR("Aargh. Eviction failed.\n");
+
+ DRM_MASK_VAL(entry->priv_flags, _DRM_BO_FLAG_UNFENCED, 0);
+
+ if (force_no_move) {
+ DRM_MASK_VAL(entry->flags, DRM_BO_FLAG_NO_MOVE, 0);
+ }
+ if (entry->flags & DRM_BO_FLAG_NO_EVICT) {
+ DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+ "cleanup. Removing flag and evicting.\n");
+ entry->flags &= ~DRM_BO_FLAG_NO_EVICT;
+ entry->mask &= ~DRM_BO_FLAG_NO_EVICT;
+ }
+
+ ret = drm_bo_evict(entry, mem_type, 1, force_no_move);
+ if (ret) {
+ if (allow_errors) {
+ goto out_err;
+ } else {
+ DRM_ERROR("Aargh. Eviction failed.\n");
+ }
+ }
+ mutex_lock(&dev->struct_mutex);
}
mutex_unlock(&entry->mutex);
- mutex_lock(&dev->struct_mutex);
-
drm_bo_usage_deref_locked(dev, entry);
- l = head->next;
+ if (prev != list->prev || next != list->next) {
+ goto retry;
+ }
}
+ if (!clean)
+ goto retry;
+ return 0;
+ out_err:
+ mutex_unlock(&entry->mutex);
+ drm_bo_usage_deref_unlocked(dev, entry);
+ mutex_lock(&dev->struct_mutex);
+ return ret;
}
int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
@@ -1660,8 +1722,21 @@ int drm_bo_clean_mm(drm_device_t * dev,
ret = 0;
if (mem_type > 0) {
- drm_bo_force_list_clean(dev, &bm->lru[mem_type], 1);
- drm_bo_force_list_clean(dev, &bm->pinned[mem_type], 1);
+
+ /*
+ * Throw out unfenced buffers.
+ */
+
+ drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 1, 0);
+
+ /*
+ * Throw out evicted no-move buffers.
+ */
+
+ drm_bo_force_list_clean(dev, &bm->pinned[DRM_BO_MEM_LOCAL],
+ mem_type, 1, 0);
+ drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 1, 0);
+ drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 1, 0);
if (drm_mm_clean(&bm->manager[mem_type])) {
drm_mm_takedown(&bm->manager[mem_type]);
@@ -1673,6 +1748,22 @@ int drm_bo_clean_mm(drm_device_t * dev,
return ret;
}
+static int drm_bo_lock_mm(drm_device_t *dev, unsigned mem_type)
+{
+ int ret;
+ drm_buffer_manager_t *bm = &dev->bm;
+
+ ret = drm_bo_force_list_clean(dev, &bm->unfenced, mem_type, 0, 1);
+ if (ret)
+ return ret;
+ ret = drm_bo_force_list_clean(dev, &bm->lru[mem_type], mem_type, 0, 1);
+ if (ret)
+ return ret;
+ ret = drm_bo_force_list_clean(dev, &bm->pinned[mem_type], mem_type, 0, 1);
+ return ret;
+}
+
+
static int drm_bo_init_mm(drm_device_t *dev,
unsigned type,
unsigned long p_offset,
@@ -1711,20 +1802,6 @@ static int drm_bo_init_mm(drm_device_t *
}
-/*
- * call dev->struct_mutex locked;
- */
-
-static void drm_bo_release_unfenced(drm_buffer_manager_t *bm)
-{
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &bm->unfenced) {
- list_del(list);
- list_add_tail(list, &bm->lru[0]);
- }
-}
-
int drm_bo_driver_finish(drm_device_t *dev)
{
drm_buffer_manager_t *bm = &dev->bm;
@@ -1736,7 +1813,6 @@ int drm_bo_driver_finish(drm_device_t *d
if (!bm->initialized)
goto out;
- drm_bo_release_unfenced(bm);
while(i--) {
if (bm->has_type[i]) {
@@ -1877,6 +1953,18 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
}
bm->max_pages = arg.req.p_size;
}
+ case mm_lock:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_bo_lock_mm(dev, arg.req.mem_type);
+ break;
+ case mm_unlock:
+ LOCK_TEST_WITH_RETURN(dev, filp);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = 0;
+ break;
default:
DRM_ERROR("Function not implemented yet\n");
return -EINVAL;
diff-tree db5c671e86c3db8c99ce5a4954632248e6f849aa (from \
5881ce1b91034fbdf81dda37a23215cfc1310cdf)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 11:28:48 2006 +0200
Remove the memory manager parameter from the put_block function, as this
makes the client code a lot cleaner. Prepare buffer manager for lock and
unlock calls.
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 7e95569..7ae001b 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -548,6 +548,7 @@ typedef struct drm_mm_node {
int free;
unsigned long start;
unsigned long size;
+ struct drm_mm *mm;
void *private;
} drm_mm_node_t;
@@ -1344,7 +1345,7 @@ extern void drm_sysfs_device_remove(stru
extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
unsigned alignment);
-extern void drm_mm_put_block(drm_mm_t *mm, drm_mm_node_t *cur);
+extern void drm_mm_put_block(drm_mm_node_t *cur);
extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
unsigned alignment, int best_match);
extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 20c58f2..67e9024 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -126,10 +126,10 @@ static void drm_bo_add_to_lru(drm_buffer
* bo locked.
*/
-static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict)
+static int drm_move_tt_to_local(drm_buffer_object_t * buf, int evict,
+ int force_no_move)
{
drm_device_t *dev = buf->dev;
- drm_buffer_manager_t *bm = &dev->bm;
int ret;
if (buf->node_ttm) {
@@ -146,8 +146,11 @@ static int drm_move_tt_to_local(drm_buff
return ret;
}
- drm_mm_put_block(&bm->manager[DRM_BO_MEM_TT], buf->node_ttm);
- buf->node_ttm = NULL;
+ if (!(buf->flags & DRM_BO_FLAG_NO_MOVE) ||
+ force_no_move) {
+ drm_mm_put_block(buf->node_ttm);
+ buf->node_ttm = NULL;
+ }
mutex_unlock(&dev->struct_mutex);
}
@@ -223,13 +226,11 @@ static void drm_bo_destroy_locked(drm_de
}
if (bo->node_ttm) {
- drm_mm_put_block(&bm->manager[DRM_BO_MEM_TT],
- bo->node_ttm);
+ drm_mm_put_block(bo->node_ttm);
bo->node_ttm = NULL;
}
if (bo->node_card) {
- drm_mm_put_block(&bm->manager[DRM_BO_MEM_VRAM],
- bo->node_card);
+ drm_mm_put_block(bo->node_card);
bo->node_card = NULL;
}
if (bo->ttm_object) {
@@ -484,7 +485,7 @@ static int drm_bo_wait(drm_buffer_object
*/
static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
- int no_wait)
+ int no_wait, int force_no_move)
{
int ret = 0;
drm_device_t *dev = bo->dev;
@@ -509,7 +510,7 @@ static int drm_bo_evict(drm_buffer_objec
}
if (mem_type == DRM_BO_MEM_TT) {
- ret = drm_move_tt_to_local(bo, 1);
+ ret = drm_move_tt_to_local(bo, 1, force_no_move);
if (ret)
goto out;
mutex_lock(&dev->struct_mutex);
@@ -565,7 +566,8 @@ int drm_bo_alloc_space(drm_buffer_object
atomic_inc(&bo->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&bo->mutex);
- ret = drm_bo_evict(bo, mem_type, no_wait);
+ BUG_ON(bo->flags & DRM_BO_FLAG_NO_MOVE);
+ ret = drm_bo_evict(bo, mem_type, no_wait, 0);
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(dev, bo);
if (ret)
@@ -596,22 +598,23 @@ int drm_bo_alloc_space(drm_buffer_object
static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
{
drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
drm_ttm_backend_t *be;
int ret;
- BUG_ON(bo->node_ttm);
- ret = drm_bo_alloc_space(bo, 1, no_wait);
+ if (!(bo->node_ttm && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
+ BUG_ON(bo->node_ttm);
+ ret = drm_bo_alloc_space(bo, DRM_BO_MEM_TT, no_wait);
+ if (ret)
+ return ret;
+ }
- if (ret)
- return ret;
DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->node_ttm->start);
mutex_lock(&dev->struct_mutex);
ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
bo->node_ttm->start);
if (ret) {
- drm_mm_put_block(&bm->manager[DRM_BO_MEM_TT], bo->node_ttm);
+ drm_mm_put_block(bo->node_ttm);
bo->node_ttm = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -824,11 +827,11 @@ static int drm_bo_read_cached(drm_buffer
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (bo->node_card)
- ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1);
+ ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1, 0);
if (ret)
return ret;
if (bo->node_ttm)
- ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1, 0);
return ret;
}
@@ -1115,7 +1118,7 @@ static int drm_bo_move_buffer(drm_buffer
if (ret)
return ret;
} else {
- drm_move_tt_to_local(bo, 0);
+ drm_move_tt_to_local(bo, 0, 1);
}
return 0;
@@ -1625,7 +1628,7 @@ static void drm_bo_force_list_clean(drm_
entry->fence = NULL;
}
}
- ret = drm_bo_evict(entry, mem_type, 0);
+ ret = drm_bo_evict(entry, mem_type, 0, 1);
if (ret) {
DRM_ERROR("Aargh. Eviction failed.\n");
}
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index debac9d..ef8bd7e 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -70,6 +70,7 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_n
child->free = 0;
child->size = size;
child->start = parent->start;
+ child->mm = parent->mm;
list_add_tail(&child->ml_entry, &parent->ml_entry);
parent->size -= size;
@@ -83,9 +84,10 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_n
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(drm_mm_t * mm, drm_mm_node_t * cur)
+void drm_mm_put_block(drm_mm_node_t * cur)
{
+ drm_mm_t *mm = cur->mm;
drm_mm_node_t *list_root = &mm->root_node;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &list_root->ml_entry;
@@ -183,6 +185,7 @@ int drm_mm_init(drm_mm_t * mm, unsigned
child->start = start;
child->size = size;
child->free = 1;
+ child->mm = mm;
list_add(&child->fl_entry, &mm->root_node.fl_entry);
list_add(&child->ml_entry, &mm->root_node.ml_entry);
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index 425c823..19a13f3 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -101,10 +101,9 @@ static void *drm_sman_mm_allocate(void *
static void drm_sman_mm_free(void *private, void *ref)
{
- drm_mm_t *mm = (drm_mm_t *) private;
drm_mm_node_t *node = (drm_mm_node_t *) ref;
- drm_mm_put_block(mm, node);
+ drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index f1fe1c8..0f9cb11 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -393,7 +393,7 @@ static void drm_ttm_object_remove(drm_de
drm_ht_remove_item(&dev->map_hash, &list->hash);
if (list->file_offset_node) {
- drm_mm_put_block(&dev->offset_manager, list->file_offset_node);
+ drm_mm_put_block(list->file_offset_node);
list->file_offset_node = NULL;
}
diff-tree 5881ce1b91034fbdf81dda37a23215cfc1310cdf (from \
5b2a60f550090a41c13483ceaaa1a84d3a9257f8)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Tue Oct 17 11:05:37 2006 +0200
Extend generality for more memory types.
Fix up init and destruction code.
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index c768318..039c9b3 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -3155,19 +3155,16 @@ int drmBOFenceList(int fd, drmBOList *li
return 0;
}
-int drmMMInit(int fd, unsigned long vramPOffset, unsigned long vramPSize,
- unsigned long ttPOffset, unsigned long ttPSize,
- unsigned long max_locked_size)
+int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
+ unsigned memType)
{
drm_mm_init_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_init;
- arg.req.vr_p_offset = vramPOffset;
- arg.req.vr_p_size = vramPSize;
- arg.req.tt_p_offset = ttPOffset;
- arg.req.tt_p_size = ttPSize;
- arg.req.max_locked_pages = max_locked_size / getpagesize();
+ arg.req.p_offset = pOffset;
+ arg.req.p_size = pSize;
+ arg.req.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
@@ -3175,13 +3172,29 @@ int drmMMInit(int fd, unsigned long vram
return 0;
}
-int drmMMTakedown(int fd)
+int drmMMTakedown(int fd, unsigned memType)
{
drm_mm_init_arg_t arg;
memset(&arg, 0, sizeof(arg));
arg.req.op = mm_takedown;
+ arg.req.mem_type = memType;
+
+ if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+ return -errno;
+
+ return 0;
+}
+
+int drmMMMaxLockedSize(int fd, unsigned long maxLockedSize)
+{
+ drm_mm_init_arg_t arg;
+
+
+ memset(&arg, 0, sizeof(arg));
+ arg.req.op = mm_set_max_pages;
+ arg.req.p_size = maxLockedSize / getpagesize();
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index 78df37c..b2ba24d 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -195,10 +195,11 @@ extern int drmBOWaitIdle(int fd, drmBO *
* Initialization functions.
*/
-extern int drmMMInit(int fd, unsigned long vramPOffset, unsigned long vramPSize,
- unsigned long ttPOffset, unsigned long ttPSize,
- unsigned long max_locked_size);
-extern int drmMMTakedown(int fd);
+extern int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
+ unsigned memType);
+extern int drmMMTakedown(int fd, unsigned memType);
+extern int drmMMMaxLockedSize(int fd, unsigned long maxLockedSize);
+
#endif
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index f706d4d..7e95569 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -167,6 +167,7 @@
#define DRM_OBJECT_HASH_ORDER 12
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_MM_INIT_MAX_PAGES 256
/*@}*/
@@ -660,9 +661,8 @@ typedef struct drm_ref_object {
*/
typedef struct drm_bo_driver{
- int cached_tt;
- int cached_vram;
- drm_local_map_t *vram_map;
+ int cached[DRM_BO_MEM_TYPES];
+ drm_local_map_t *iomap[DRM_BO_MEM_TYPES];
drm_ttm_backend_t *(*create_ttm_backend_entry)
(struct drm_device *dev);
int (*fence_type)(uint32_t flags, uint32_t *class, uint32_t *type);
@@ -801,21 +801,17 @@ typedef struct drm_fence_manager{
} drm_fence_manager_t;
typedef struct drm_buffer_manager{
+ struct mutex init_mutex;
+ int nice_mode;
int initialized;
drm_file_t *last_to_validate;
- int has_vram;
- int has_tt;
- int use_vram;
- int use_tt;
- drm_mm_t tt_manager;
- drm_mm_t vram_manager;
- struct list_head tt_lru;
- struct list_head vram_lru;
- struct list_head tt_pinned;
- struct list_head vram_pinned;
+ int has_type[DRM_BO_MEM_TYPES];
+ int use_type[DRM_BO_MEM_TYPES];
+ drm_mm_t manager[DRM_BO_MEM_TYPES];
+ struct list_head lru[DRM_BO_MEM_TYPES];
+ struct list_head pinned[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
- struct list_head other;
struct work_struct wq;
uint32_t fence_type;
unsigned long max_pages;
@@ -1024,10 +1020,10 @@ typedef struct drm_buffer_object{
uint32_t flags;
uint32_t mask;
- drm_mm_node_t *vram;
- drm_mm_node_t *tt;
- struct list_head tt_lru;
- struct list_head vram_lru;
+ drm_mm_node_t *node_ttm; /* MM node for on-card RAM */
+ drm_mm_node_t *node_card; /* MM node for ttm*/
+ struct list_head lru_ttm; /* LRU for the ttm pages*/
+ struct list_head lru_card; /* For memory types with on-card RAM */
struct list_head ddestroy;
uint32_t fence_type;
@@ -1447,7 +1443,8 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARG
extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
-extern int drm_bo_clean_mm(drm_device_t *dev);
+extern int drm_bo_driver_finish(drm_device_t *dev);
+extern int drm_bo_driver_init(drm_device_t *dev);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index c24f8d5..20c58f2 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -59,6 +59,69 @@
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
}
+static inline uint32_t drm_bo_type_flags(unsigned type)
+{
+ return (1 << (24 + type));
+}
+
+static inline drm_buffer_object_t *drm_bo_entry(struct list_head *list,
+ unsigned type)
+{
+ switch(type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return list_entry(list, drm_buffer_object_t, lru_ttm);
+ case DRM_BO_MEM_VRAM:
+ case DRM_BO_MEM_VRAM_NM:
+ return list_entry(list, drm_buffer_object_t, lru_card);
+ default:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
+static inline drm_mm_node_t *drm_bo_mm_node(drm_buffer_object_t *bo,
+ unsigned type)
+{
+ switch(type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return bo->node_ttm;
+ case DRM_BO_MEM_VRAM:
+ case DRM_BO_MEM_VRAM_NM:
+ return bo->node_card;
+ default:
+ BUG_ON(1);
+ }
+ return NULL;
+}
+
+/*
+ * bo locked. dev->struct_mutex locked.
+ */
+
+static void drm_bo_add_to_lru(drm_buffer_object_t *buf,
+ drm_buffer_manager_t *bm)
+{
+ struct list_head *list;
+ unsigned mem_type;
+
+ if (buf->flags & DRM_BO_FLAG_MEM_TT) {
+ mem_type = DRM_BO_MEM_TT;
+ list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? \
&bm->pinned[mem_type] : &bm->lru[mem_type]; + list_add_tail(&buf->lru_ttm, list);
+ } else {
+ mem_type = DRM_BO_MEM_LOCAL;
+ list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? \
&bm->pinned[mem_type] : &bm->lru[mem_type]; + list_add_tail(&buf->lru_ttm, list);
+ }
+ if (buf->flags & DRM_BO_FLAG_MEM_VRAM) {
+ mem_type = DRM_BO_MEM_VRAM;
+ list = (buf->flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) ? \
&bm->pinned[mem_type] : &bm->lru[mem_type]; + list_add_tail(&buf->lru_card, list);
+ }
+}
+
/*
* bo locked.
*/
@@ -69,27 +132,27 @@ static int drm_move_tt_to_local(drm_buff
drm_buffer_manager_t *bm = &dev->bm;
int ret;
- BUG_ON(!buf->tt);
-
- mutex_lock(&dev->struct_mutex);
- if (evict)
- ret = drm_evict_ttm(buf->ttm);
- else
- ret = drm_unbind_ttm(buf->ttm);
+ if (buf->node_ttm) {
+ mutex_lock(&dev->struct_mutex);
+ if (evict)
+ ret = drm_evict_ttm(buf->ttm);
+ else
+ ret = drm_unbind_ttm(buf->ttm);
- if (ret) {
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ if (ret == -EAGAIN)
+ schedule();
+ return ret;
+ }
+
+ drm_mm_put_block(&bm->manager[DRM_BO_MEM_TT], buf->node_ttm);
+ buf->node_ttm = NULL;
mutex_unlock(&dev->struct_mutex);
- if (ret == -EAGAIN)
- schedule();
- return ret;
}
-
- drm_mm_put_block(&bm->tt_manager, buf->tt);
- buf->tt = NULL;
- buf->flags &= ~DRM_BO_MASK_MEM;
+ buf->flags &= ~DRM_BO_FLAG_MEM_TT;
buf->flags |= DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED;
- mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -103,6 +166,7 @@ static void drm_bo_destroy_locked(drm_de
drm_buffer_manager_t *bm = &dev->bm;
+
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
/*
@@ -132,8 +196,8 @@ static void drm_bo_destroy_locked(drm_de
* Take away from lru lists.
*/
- list_del_init(&bo->tt_lru);
- list_del_init(&bo->vram_lru);
+ list_del_init(&bo->lru_ttm);
+ list_del_init(&bo->lru_card);
if (bo->ttm) {
unsigned long _end = jiffies + DRM_HZ;
@@ -158,14 +222,15 @@ static void drm_bo_destroy_locked(drm_de
}
}
- if (bo->tt) {
-
- drm_mm_put_block(&bm->tt_manager, bo->tt);
- bo->tt = NULL;
- }
- if (bo->vram) {
- drm_mm_put_block(&bm->vram_manager, bo->vram);
- bo->vram = NULL;
+ if (bo->node_ttm) {
+ drm_mm_put_block(&bm->manager[DRM_BO_MEM_TT],
+ bo->node_ttm);
+ bo->node_ttm = NULL;
+ }
+ if (bo->node_card) {
+ drm_mm_put_block(&bm->manager[DRM_BO_MEM_VRAM],
+ bo->node_card);
+ bo->node_card = NULL;
}
if (bo->ttm_object) {
drm_ttm_object_deref_locked(dev, bo->ttm_object);
@@ -246,7 +311,7 @@ static void drm_bo_delayed_workqueue(voi
drm_bo_delayed_delete(dev);
mutex_lock(&dev->struct_mutex);
- if (!list_empty(&bm->ddestroy)) {
+ if (bm->initialized && !list_empty(&bm->ddestroy)) {
schedule_delayed_work(&bm->wq,
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
@@ -296,14 +361,13 @@ int drm_fence_buffer_objects(drm_file_t
int count = 0;
int ret = 0;
struct list_head f_list, *l;
- struct list_head *q;
mutex_lock(&dev->struct_mutex);
if (!list)
list = &bm->unfenced;
- list_for_each_entry(entry, list, tt_lru) {
+ list_for_each_entry(entry, list, lru_ttm) {
BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
fence_type |= entry->fence_type;
if (entry->fence_class != 0) {
@@ -316,7 +380,6 @@ int drm_fence_buffer_objects(drm_file_t
}
if (!count) {
- DRM_ERROR("No buffers to fence\n");
ret = -EINVAL;
goto out;
}
@@ -350,13 +413,13 @@ int drm_fence_buffer_objects(drm_file_t
count = 0;
l = f_list.next;
while (l != &f_list) {
- entry = list_entry(l, drm_buffer_object_t, tt_lru);
+ entry = list_entry(l, drm_buffer_object_t, lru_ttm);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
mutex_lock(&dev->struct_mutex);
list_del_init(l);
- list_del_init(&entry->vram_lru);
+ list_del_init(&entry->lru_card);
if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
count++;
if (entry->fence)
@@ -365,17 +428,7 @@ int drm_fence_buffer_objects(drm_file_t
DRM_FLAG_MASKED(entry->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
DRM_WAKEUP(&entry->event_queue);
- if (entry->flags & DRM_BO_FLAG_MEM_TT) {
- q = (entry->flags & DRM_BO_FLAG_NO_EVICT) ?
- &bm->tt_pinned : &bm->tt_lru;
- list_add_tail(&entry->tt_lru, q);
- } else if (entry->flags & DRM_BO_FLAG_MEM_VRAM) {
- q = (entry->flags & DRM_BO_FLAG_NO_EVICT) ?
- &bm->vram_pinned : &bm->vram_lru;
- list_add_tail(&entry->vram_lru, q);
- } else {
- list_add_tail(&entry->tt_lru, &bm->other);
- }
+ drm_bo_add_to_lru(entry, bm);
}
mutex_unlock(&entry->mutex);
drm_bo_usage_deref_locked(dev, entry);
@@ -430,23 +483,24 @@ static int drm_bo_wait(drm_buffer_object
* bo->mutex locked
*/
-static int drm_bo_evict(drm_buffer_object_t * bo, int tt, int no_wait)
+static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
+ int no_wait)
{
int ret = 0;
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
+
/*
* Someone might have modified the buffer before we took the buffer mutex.
*/
- if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
- goto out;
- if (tt && !bo->tt)
+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
goto out;
- if (!tt && !bo->vram)
+ if (!(bo->flags & drm_bo_type_flags(mem_type)))
goto out;
ret = drm_bo_wait(bo, 0, 0, no_wait);
+
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed to expire fence before "
@@ -454,22 +508,26 @@ static int drm_bo_evict(drm_buffer_objec
goto out;
}
- if (tt) {
+ if (mem_type == DRM_BO_MEM_TT) {
ret = drm_move_tt_to_local(bo, 1);
+ if (ret)
+ goto out;
+ mutex_lock(&dev->struct_mutex);
+ list_del(&bo->lru_ttm);
+ list_add_tail(&bo->lru_ttm, &bm->lru[DRM_BO_MEM_LOCAL]);
+ mutex_unlock(&dev->struct_mutex);
}
#if 0
else {
ret = drm_move_vram_to_local(bo);
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->lru_card);
+ mutex_unlock(&dev->struct_mutex);
}
#endif
if (ret)
goto out;
- mutex_lock(&dev->struct_mutex);
- list_del_init((tt) ? &bo->tt_lru : &bo->vram_lru);
- if (list_empty((tt) ? &bo->vram_lru : &bo->tt_lru))
- list_add_tail(&bo->tt_lru, &bm->other);
- mutex_unlock(&dev->struct_mutex);
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
_DRM_BO_FLAG_EVICTED);
out:
@@ -480,13 +538,14 @@ static int drm_bo_evict(drm_buffer_objec
* buf->mutex locked.
*/
-int drm_bo_alloc_space(drm_buffer_object_t * buf, int tt, int no_wait)
+int drm_bo_alloc_space(drm_buffer_object_t * buf, unsigned mem_type,
+ int no_wait)
{
drm_device_t *dev = buf->dev;
drm_mm_node_t *node;
drm_buffer_manager_t *bm = &dev->bm;
drm_buffer_object_t *bo;
- drm_mm_t *mm = (tt) ? &bm->tt_manager : &bm->vram_manager;
+ drm_mm_t *mm = &bm->manager[mem_type];
struct list_head *lru;
unsigned long size = buf->num_pages;
int ret;
@@ -497,20 +556,16 @@ int drm_bo_alloc_space(drm_buffer_object
if (node)
break;
- lru = (tt) ? &bm->tt_lru : &bm->vram_lru;
+ lru = &bm->lru[mem_type];
if (lru->next == lru)
break;
- if (tt) {
- bo = list_entry(lru->next, drm_buffer_object_t, tt_lru);
- } else {
- bo = list_entry(lru->next, drm_buffer_object_t, vram_lru);
- }
+ bo = drm_bo_entry(lru->next, mem_type);
atomic_inc(&bo->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&bo->mutex);
- ret = drm_bo_evict(bo, tt, no_wait);
+ ret = drm_bo_evict(bo, mem_type, no_wait);
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(dev, bo);
if (ret)
@@ -529,10 +584,10 @@ int drm_bo_alloc_space(drm_buffer_object
BUG_ON(!node);
node->private = (void *)buf;
- if (tt) {
- buf->tt = node;
+ if (mem_type == DRM_BO_MEM_TT) {
+ buf->node_ttm = node;
} else {
- buf->vram = node;
+ buf->node_card = node;
}
buf->offset = node->start * PAGE_SIZE;
return 0;
@@ -545,19 +600,19 @@ static int drm_move_local_to_tt(drm_buff
drm_ttm_backend_t *be;
int ret;
- BUG_ON(bo->tt);
+ BUG_ON(bo->node_ttm);
ret = drm_bo_alloc_space(bo, 1, no_wait);
if (ret)
return ret;
- DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->tt->start);
+ DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->node_ttm->start);
mutex_lock(&dev->struct_mutex);
ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
- bo->tt->start);
+ bo->node_ttm->start);
if (ret) {
- drm_mm_put_block(&bm->tt_manager, bo->tt);
- bo->tt = NULL;
+ drm_mm_put_block(&bm->manager[DRM_BO_MEM_TT], bo->node_ttm);
+ bo->node_ttm = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -589,25 +644,27 @@ static int drm_bo_new_flags(drm_device_t
uint32_t new_props;
drm_bo_driver_t *driver = dev->driver->bo_driver;
drm_buffer_manager_t *bm = &dev->bm;
+ unsigned i;
/*
- * First adjust the mask.
+ * First adjust the mask to take away nonexistant memory types.
*/
- if (!bm->use_vram)
- new_mask &= ~DRM_BO_FLAG_MEM_VRAM;
- if (!bm->use_tt)
- new_mask &= ~DRM_BO_FLAG_MEM_TT;
+ for (i=0; i<DRM_BO_MEM_TYPES; ++i) {
+ if (!bm->use_type[i])
+ new_mask &= ~drm_bo_type_flags(i);
+ }
if (new_mask & DRM_BO_FLAG_BIND_CACHED) {
- if (((new_mask & DRM_BO_FLAG_MEM_TT) && !driver->cached_tt) &&
+ if (((new_mask & DRM_BO_FLAG_MEM_TT) &&
+ !driver->cached[DRM_BO_MEM_TT]) &&
((new_mask & DRM_BO_FLAG_MEM_VRAM)
- && !driver->cached_vram)) {
+ && !driver->cached[DRM_BO_MEM_VRAM])) {
new_mask &= ~DRM_BO_FLAG_BIND_CACHED;
} else {
- if (!driver->cached_tt)
+ if (!driver->cached[DRM_BO_MEM_TT])
new_flags &= DRM_BO_FLAG_MEM_TT;
- if (!driver->cached_vram)
+ if (!driver->cached[DRM_BO_MEM_VRAM])
new_flags &= DRM_BO_FLAG_MEM_VRAM;
}
}
@@ -766,12 +823,12 @@ static int drm_bo_read_cached(drm_buffer
int ret = 0;
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- if (bo->vram)
- ret = drm_bo_evict(bo, 0, 1);
+ if (bo->node_card)
+ ret = drm_bo_evict(bo, DRM_BO_MEM_VRAM, 1);
if (ret)
return ret;
- if (bo->tt)
- ret = drm_bo_evict(bo, 1, 1);
+ if (bo->node_ttm)
+ ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
return ret;
}
@@ -1136,28 +1193,16 @@ static int drm_buffer_object_validate(dr
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
_DRM_BO_FLAG_UNFENCED);
mutex_lock(&dev->struct_mutex);
- list_del(&bo->tt_lru);
- list_add_tail(&bo->tt_lru, &bm->unfenced);
- list_del_init(&bo->vram_lru);
+ list_del(&bo->lru_ttm);
+ list_add_tail(&bo->lru_ttm, &bm->unfenced);
+ list_del_init(&bo->lru_card);
mutex_unlock(&dev->struct_mutex);
} else {
- struct list_head *q;
mutex_lock(&dev->struct_mutex);
- list_del_init(&bo->tt_lru);
- list_del_init(&bo->vram_lru);
-
- if (new_flags & DRM_BO_FLAG_MEM_TT) {
- q = (new_flags & DRM_BO_FLAG_NO_EVICT) ?
- &bm->tt_pinned : &bm->tt_lru;
- list_add_tail(&bo->tt_lru, q);
- } else if (new_flags & DRM_BO_FLAG_MEM_VRAM) {
- q = (new_flags & DRM_BO_FLAG_NO_EVICT) ?
- &bm->vram_pinned : &bm->vram_lru;
- list_add_tail(&bo->vram_lru, q);
- } else {
- list_add_tail(&bo->tt_lru, &bm->other);
- }
+ list_del_init(&bo->lru_ttm);
+ list_del_init(&bo->lru_card);
+ drm_bo_add_to_lru(bo, bm);
mutex_unlock(&dev->struct_mutex);
DRM_FLAG_MASKED(bo->flags, new_flags, DRM_BO_FLAG_NO_EVICT);
}
@@ -1332,15 +1377,14 @@ int drm_buffer_object_create(drm_file_t
atomic_set(&bo->usage, 1);
atomic_set(&bo->mapped, -1);
DRM_INIT_WAITQUEUE(&bo->event_queue);
- INIT_LIST_HEAD(&bo->tt_lru);
- INIT_LIST_HEAD(&bo->vram_lru);
+ INIT_LIST_HEAD(&bo->lru_ttm);
+ INIT_LIST_HEAD(&bo->lru_card);
INIT_LIST_HEAD(&bo->ddestroy);
- list_add_tail(&bo->tt_lru, &bm->other);
bo->dev = dev;
bo->type = type;
bo->num_pages = num_pages;
- bo->vram = NULL;
- bo->tt = NULL;
+ bo->node_card = NULL;
+ bo->node_ttm = NULL;
if (bo->type == drm_bo_type_fake) {
bo->offset = buffer_start;
bo->buffer_start = 0;
@@ -1540,22 +1584,17 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
*/
static void drm_bo_force_list_clean(drm_device_t *dev,
- struct list_head *head, int tt)
+ struct list_head *head,
+ unsigned mem_type)
{
+ drm_buffer_manager_t *bm = &dev->bm;
struct list_head *l;
drm_buffer_object_t *entry;
- int nice_mode = 1;
int ret;
l = head->next;
while (l != head) {
- if (tt) {
- entry = list_entry(l, drm_buffer_object_t,
- tt_lru);
- } else {
- entry = list_entry(l, drm_buffer_object_t,
- vram_lru);
- }
+ entry = drm_bo_entry(l, mem_type);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
@@ -1566,89 +1605,200 @@ static void drm_bo_force_list_clean(drm_
*/
if (entry->fence) {
- if (nice_mode) {
+ if (bm->nice_mode) {
unsigned long _end = jiffies + 3*DRM_HZ;
do {
ret = drm_bo_wait(entry, 0, 1, 0);
} while (ret && !time_after_eq(jiffies, _end));
if (entry->fence) {
- nice_mode = 0;
- DRM_ERROR("Detected GPU hang. "
+ bm->nice_mode = 0;
+ DRM_ERROR("Detected GPU hang or "
+ "fence manager was taken down. "
"Evicting waiting buffers\n");
}
}
+
if (entry->fence) {
- drm_fence_usage_deref_unlocked(dev, entry->fence);
+ drm_fence_usage_deref_unlocked(dev,
+ entry->fence);
entry->fence = NULL;
}
}
- ret = drm_bo_evict(entry, tt, 0);
+ ret = drm_bo_evict(entry, mem_type, 0);
if (ret) {
DRM_ERROR("Aargh. Eviction failed.\n");
}
mutex_unlock(&entry->mutex);
mutex_lock(&dev->struct_mutex);
- if (!list_empty(l)) {
- list_del_init(l);
- if (list_empty(&entry->tt_lru) &&
- list_empty(&entry->vram_lru)) {
- list_add_tail(l, &dev->bm.other);
- }
- }
-
drm_bo_usage_deref_locked(dev, entry);
l = head->next;
}
}
-int drm_bo_clean_mm(drm_device_t * dev)
+int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
{
drm_buffer_manager_t *bm = &dev->bm;
- int ret = 0;
+ int ret = -EINVAL;
- mutex_lock(&dev->struct_mutex);
+ if (mem_type >= DRM_BO_MEM_TYPES) {
+ DRM_ERROR("Illegal memory type %d\n", mem_type);
+ return ret;
+ }
- if (!bm->initialized)
- goto out;
+ if (!bm->has_type[mem_type]) {
+ DRM_ERROR("Trying to take down uninitialized "
+ "memory manager type\n");
+ return ret;
+ }
+ bm->use_type[mem_type] = 0;
+ bm->has_type[mem_type] = 0;
+
+ ret = 0;
+ if (mem_type > 0) {
+ drm_bo_force_list_clean(dev, &bm->lru[mem_type], 1);
+ drm_bo_force_list_clean(dev, &bm->pinned[mem_type], 1);
- bm->use_vram = 0;
- bm->use_tt = 0;
+ if (drm_mm_clean(&bm->manager[mem_type])) {
+ drm_mm_takedown(&bm->manager[mem_type]);
+ } else {
+ ret = -EBUSY;
+ }
+ }
- /*
- * FIXME: Need to handle unfenced list.
- */
+ return ret;
+}
- drm_bo_force_list_clean(dev, &bm->tt_lru, 1);
- drm_bo_force_list_clean(dev, &bm->tt_pinned, 1);
- drm_bo_force_list_clean(dev, &bm->vram_lru, 1);
- drm_bo_force_list_clean(dev, &bm->vram_pinned, 1);
-
- if (bm->has_vram) {
- if (drm_mm_clean(&bm->vram_manager)) {
- drm_mm_takedown(&bm->vram_manager);
- bm->has_vram = 0;
- } else
- ret = -EBUSY;
+static int drm_bo_init_mm(drm_device_t *dev,
+ unsigned type,
+ unsigned long p_offset,
+ unsigned long p_size)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = -EINVAL;
+
+ if (type >= DRM_BO_MEM_TYPES) {
+ DRM_ERROR("Illegal memory type %d\n", type);
+ return ret;
+ }
+ if (bm->has_type[type]) {
+ DRM_ERROR("Memory manager already initialized for type %d\n",
+ type);
+ return ret;
}
- if (bm->has_tt) {
- if (drm_mm_clean(&bm->tt_manager)) {
- drm_mm_takedown(&bm->tt_manager);
- bm->has_tt = 0;
- } else
- ret = -EBUSY;
+ ret = 0;
+ if (type != DRM_BO_MEM_LOCAL) {
+ if (!p_size) {
+ DRM_ERROR("Zero size memory manager type %d\n", type);
+ return ret;
+ }
+ ret = drm_mm_init(&bm->manager[type],p_offset, p_size);
+ if (ret)
+ return ret;
+ }
+ bm->has_type[type] = 1;
+ bm->use_type[type] = 1;
+
+ INIT_LIST_HEAD(&bm->lru[type]);
+ INIT_LIST_HEAD(&bm->pinned[type]);
+
+ return 0;
+}
+
+
+/*
+ * call dev->struct_mutex locked;
+ */
+
+static void drm_bo_release_unfenced(drm_buffer_manager_t *bm)
+{
+ struct list_head *list, *next;
- if (!ret)
- bm->initialized = 0;
+ list_for_each_safe(list, next, &bm->unfenced) {
+ list_del(list);
+ list_add_tail(list, &bm->lru[0]);
}
+}
- out:
+int drm_bo_driver_finish(drm_device_t *dev)
+{
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = 0;
+ unsigned i = DRM_BO_MEM_TYPES;
+
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+
+ if (!bm->initialized)
+ goto out;
+ drm_bo_release_unfenced(bm);
+
+ while(i--) {
+ if (bm->has_type[i]) {
+ bm->use_type[i] = 0;
+ if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
+ ret = -EBUSY;
+ DRM_ERROR("DRM memory manager type %d "
+ "is not clean.\n", i);
+ }
+ bm->has_type[i] = 0;
+ }
+ }
+ mutex_unlock(&dev->struct_mutex);
+ drm_bo_delayed_delete(dev);
+ mutex_lock(&dev->struct_mutex);
+ bm->initialized = 0;
mutex_unlock(&dev->struct_mutex);
+ if (!cancel_delayed_work(&bm->wq)) {
+ flush_scheduled_work();
+ }
+ mutex_lock(&dev->struct_mutex);
+ out:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ return ret;
+}
+
+int drm_bo_driver_init(drm_device_t *dev)
+{
+ drm_bo_driver_t *driver = dev->driver->bo_driver;
+ drm_buffer_manager_t *bm = &dev->bm;
+ int ret = -EINVAL;
+ struct sysinfo si;
+
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (!driver)
+ goto out_unlock;
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+
+ ret = drm_bo_init_mm(dev, 0, 0, 0);
+ if (ret)
+ goto out_unlock;
+
+ INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
+ bm->initialized = 1;
+ bm->nice_mode = 1;
+ atomic_set(&bm->count, 0);
+ bm->cur_pages = 0;
+ si_meminfo(&si);
+ bm->max_pages = si.totalram >> 1;
+ INIT_LIST_HEAD(&bm->unfenced);
+ INIT_LIST_HEAD(&bm->ddestroy);
+ out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
return ret;
}
+EXPORT_SYMBOL(drm_bo_driver_init);
+
int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
{
@@ -1668,71 +1818,69 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
switch (arg.req.op) {
case mm_init:
- if (bm->initialized) {
- DRM_ERROR("Memory manager already initialized\n");
- return -EINVAL;
- }
+ ret = -EINVAL;
+ mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
- bm->has_vram = 0;
- bm->has_tt = 0;
-
- if (arg.req.vr_p_size) {
- ret = drm_mm_init(&bm->vram_manager,
- arg.req.vr_p_offset,
- arg.req.vr_p_size);
- bm->has_vram = 1;
- /*
- * VRAM not supported yet.
- */
-
- bm->use_vram = 0;
- if (ret)
- break;
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized.\n");
+ break;
}
-
- if (arg.req.tt_p_size) {
- ret = drm_mm_init(&bm->tt_manager,
- arg.req.tt_p_offset,
- arg.req.tt_p_size);
- bm->has_tt = 1;
- bm->use_tt = 1;
-
- if (ret) {
- if (bm->has_vram)
- drm_mm_takedown(&bm->vram_manager);
- break;
- }
+ if (arg.req.mem_type == 0) {
+ DRM_ERROR("System memory buffers already initialized.\n");
+ break;
}
- arg.rep.mm_sarea = 0;
-
- INIT_LIST_HEAD(&bm->vram_lru);
- INIT_LIST_HEAD(&bm->tt_lru);
- INIT_LIST_HEAD(&bm->vram_pinned);
- INIT_LIST_HEAD(&bm->tt_pinned);
- INIT_LIST_HEAD(&bm->unfenced);
- INIT_LIST_HEAD(&bm->ddestroy);
- INIT_LIST_HEAD(&bm->other);
-
- INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
- bm->initialized = 1;
- atomic_set(&bm->count, 0);
- bm->cur_pages = 0;
- bm->max_pages = arg.req.max_locked_pages;
- mutex_unlock(&dev->struct_mutex);
+ ret = drm_bo_init_mm(dev, arg.req.mem_type,
+ arg.req.p_offset,
+ arg.req.p_size);
break;
case mm_takedown:
LOCK_TEST_WITH_RETURN(dev, filp);
- if (drm_bo_clean_mm(dev)) {
- DRM_ERROR("Memory manager not clean. "
- "Delaying takedown\n");
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized\n");
+ break;
+ }
+ if (arg.req.mem_type == 0) {
+ DRM_ERROR("No takedown for System memory buffers.\n");
+ break;
+ }
+ ret = 0;
+ if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
+ DRM_ERROR("Memory manager type %d not clean. "
+ "Delaying takedown\n", arg.req.mem_type);
}
- DRM_DEBUG("We have %ld still locked pages\n", bm->cur_pages);
break;
+ case mm_set_max_pages: {
+ struct sysinfo si;
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (arg.req.p_size < bm->cur_pages) {
+ DRM_ERROR("Cannot currently decrease max number of "
+ "locked pages below the number currently "
+ "locked.\n");
+ ret = -EINVAL;
+ break;
+ }
+ si_meminfo(&si);
+ if (arg.req.p_size > si.totalram) {
+ DRM_ERROR("Cannot set max number of locked pages "
+ "to %lu since the total number of RAM pages "
+ "is %lu.\n", (unsigned long) arg.req.p_size,
+ (unsigned long) si.totalram);
+ ret = -EINVAL;
+ break;
+ }
+ bm->max_pages = arg.req.p_size;
+ }
default:
DRM_ERROR("Function not implemented yet\n");
return -EINVAL;
}
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
if (ret)
return ret;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index c7f0f48..43b4f8d 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -149,6 +149,11 @@ int drm_lastclose(drm_device_t * dev)
DRM_DEBUG("\n");
+ if (drm_bo_driver_finish(dev)) {
+ DRM_ERROR("DRM memory manager still busy. "
+ "System is unstable. Please reboot.\n");
+ }
+
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
DRM_DEBUG("driver lastclose completed\n");
@@ -265,10 +270,6 @@ int drm_lastclose(drm_device_t * dev)
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
- if (drm_bo_clean_mm(dev)) {
- DRM_ERROR("DRM memory manager still busy. "
- "System is unstable. Please reboot.\n");
- }
DRM_DEBUG("lastclose completed\n");
return 0;
}
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index e3db07d..6631056 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -71,6 +71,7 @@ static int drm_fill_in_dev(drm_device_t
init_timer(&dev->timer);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
+ mutex_init(&dev->bm.init_mutex);
dev->pdev = pdev;
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 1263bca..2c5b43d 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -51,9 +51,8 @@ static drm_fence_driver_t i915_fence_dri
#endif
#ifdef I915_HAVE_BUFFER
static drm_bo_driver_t i915_bo_driver = {
- .vram_map = NULL,
- .cached_vram = 0,
- .cached_tt = 1,
+ .iomap = {NULL, NULL},
+ .cached = {1, 1},
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
.fence_type = i915_fence_types,
.invalidate_caches = i915_invalidate_caches
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 32cad3b..915befb 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -680,6 +680,7 @@ typedef struct drm_fence_arg {
unsigned type;
unsigned flags;
unsigned signaled;
+ unsigned expand_pad[4]; /*Future expansion */
enum {
drm_fence_create,
drm_fence_destroy,
@@ -732,12 +733,14 @@ typedef struct drm_fence_arg {
/* Bind this buffer cached if the hardware supports it. */
#define DRM_BO_FLAG_BIND_CACHED 0x0002000
-/* Translation table aperture */
-#define DRM_BO_FLAG_MEM_TT 0x01000000
-/* On-card VRAM */
-#define DRM_BO_FLAG_MEM_VRAM 0x02000000
-/* System memory */
-#define DRM_BO_FLAG_MEM_LOCAL 0x04000000
+/* System Memory */
+#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
+/* Translation table memory */
+#define DRM_BO_FLAG_MEM_TT 0x02000000
+/* Vram memory */
+#define DRM_BO_FLAG_MEM_VRAM 0x04000000
+/* Unmappable Vram memory */
+#define DRM_BO_FLAG_MEM_VRAM_NM 0x08000000
/* Memory flag mask */
#define DRM_BO_MASK_MEM 0xFF000000
@@ -769,6 +772,7 @@ typedef struct drm_bo_arg_request {
drm_bo_type_t type;
unsigned arg_handle;
drm_u64_t buffer_start;
+ unsigned expand_pad[4]; /*Future expansion */
enum {
drm_bo_create,
drm_bo_validate,
@@ -802,6 +806,7 @@ typedef struct drm_bo_arg_reply {
drm_u64_t buffer_start;
unsigned fence_flags;
unsigned rep_flags;
+ unsigned expand_pad[4]; /*Future expansion */
}drm_bo_arg_reply_t;
@@ -814,23 +819,30 @@ typedef struct drm_bo_arg{
} d;
} drm_bo_arg_t;
+#define DRM_BO_MEM_LOCAL 0
+#define DRM_BO_MEM_TT 1
+#define DRM_BO_MEM_VRAM 2
+#define DRM_BO_MEM_VRAM_NM 3
+#define DRM_BO_MEM_TYPES 2 /* For now. */
+
typedef union drm_mm_init_arg{
struct {
enum {
mm_init,
+ mm_set_max_pages,
mm_takedown,
mm_query,
mm_lock,
mm_unlock
} op;
- drm_u64_t vr_p_offset;
- drm_u64_t vr_p_size;
- drm_u64_t tt_p_offset;
- drm_u64_t tt_p_size;
- drm_u64_t max_locked_pages;
+ drm_u64_t p_offset;
+ drm_u64_t p_size;
+ unsigned mem_type;
+ unsigned expand_pad[8]; /*Future expansion */
} req;
struct {
drm_handle_t mm_sarea;
+ unsigned expand_pad[8]; /*Future expansion */
} rep;
} drm_mm_init_arg_t;
#endif
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 841761c..8c701b4 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -197,7 +197,9 @@ static int i915_initialize(drm_device_t
I915_WRITE(0x02080, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
dev->dev_private = (void *)dev_priv;
-
+#ifdef I915_HAVE_BUFFER
+ drm_bo_driver_init(dev);
+#endif
return 0;
}
diff-tree 5b2a60f550090a41c13483ceaaa1a84d3a9257f8 (from \
540c64c378daafaad1c3f63faf5af81f39388665)
Author: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Date: Mon Oct 16 14:22:27 2006 +0200
Change Intel AGP memory type numbers.
diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c
deleted file mode 120000
index d64bbe1..0000000
--- a/linux-core/drm_drawable.c
+++ /dev/null
@@ -1 +0,0 @@
-../shared-core/drm_drawable.c
\ No newline at end of file
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index 8016bb1..8a3d7bf 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -33,8 +33,8 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#define INTEL_AGP_MEM_USER (1 << 16)
-#define INTEL_AGP_MEM_UCACHED (2 << 16)
+#define INTEL_AGP_MEM_USER 3
+#define INTEL_AGP_MEM_UCACHED 4
drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev)
{
-------------------------------------------------------------------------
Using Tomcat but need to do more? Need to support web services, security?
Get stuff done quickly with pre-integrated technology to make your job easier
Download IBM WebSphere Application Server v.1.0.1 based on Apache Geronimo
http://sel.as-us.falkag.net/sel?cmd=lnk&kid=120709&bid=263057&dat=121642
--
_______________________________________________
Dri-patches mailing list
Dri-patches@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/dri-patches
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic