/**
- * \file drm_memory.h
+ * \file drm_memory_debug.h
* Memory management wrappers for DRM.
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
unsigned long bytes_freed;
} drm_mem_stats_t;
-static DEFINE_SPINLOCK(DRM(mem_lock));
-static unsigned long DRM(ram_available) = 0; /* In pages */
-static unsigned long DRM(ram_used) = 0;
-static drm_mem_stats_t DRM(mem_stats)[] =
+static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long drm_ram_available = 0; /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] =
{
- [DRM_MEM_DMA] = {
- "dmabufs"},[DRM_MEM_SAREA] = {
- "sareas"},[DRM_MEM_DRIVER] = {
- "driver"},[DRM_MEM_MAGIC] = {
- "magic"},[DRM_MEM_IOCTLS] = {
- "ioctltab"},[DRM_MEM_MAPS] = {
- "maplist"},[DRM_MEM_VMAS] = {
- "vmalist"},[DRM_MEM_BUFS] = {
- "buflist"},[DRM_MEM_SEGS] = {
- "seglist"},[DRM_MEM_PAGES] = {
- "pagelist"},[DRM_MEM_FILES] = {
- "files"},[DRM_MEM_QUEUES] = {
- "queues"},[DRM_MEM_CMDS] = {
- "commands"},[DRM_MEM_MAPPINGS] = {
- "mappings"},[DRM_MEM_BUFLISTS] = {
- "buflists"},[DRM_MEM_AGPLISTS] = {
- "agplist"},[DRM_MEM_SGLISTS] = {
- "sglist"},[DRM_MEM_TOTALAGP] = {
- "totalagp"},[DRM_MEM_BOUNDAGP] = {
- "boundagp"},[DRM_MEM_CTXBITMAP] = {
- "ctxbitmap"},[DRM_MEM_CTXLIST] = {
- "ctxlist"},[DRM_MEM_STUB] = {
- "stub"}, {
- NULL, 0,} /* Last entry must be null */
+ [DRM_MEM_DMA] = {"dmabufs"},
+ [DRM_MEM_SAREA] = {"sareas"},
+ [DRM_MEM_DRIVER] = {"driver"},
+ [DRM_MEM_MAGIC] = {"magic"},
+ [DRM_MEM_IOCTLS] = {"ioctltab"},
+ [DRM_MEM_MAPS] = {"maplist"},
+ [DRM_MEM_VMAS] = {"vmalist"},
+ [DRM_MEM_BUFS] = {"buflist"},
+ [DRM_MEM_SEGS] = {"seglist"},
+ [DRM_MEM_PAGES] = {"pagelist"},
+ [DRM_MEM_FILES] = {"files"},
+ [DRM_MEM_QUEUES] = {"queues"},
+ [DRM_MEM_CMDS] = {"commands"},
+ [DRM_MEM_MAPPINGS] = {"mappings"},
+ [DRM_MEM_BUFLISTS] = {"buflists"},
+ [DRM_MEM_AGPLISTS] = {"agplist"},
+ [DRM_MEM_SGLISTS] = {"sglist"},
+ [DRM_MEM_TOTALAGP] = {"totalagp"},
+ [DRM_MEM_BOUNDAGP] = {"boundagp"},
+ [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
+ [DRM_MEM_CTXLIST] = {"ctxlist"},
+ [DRM_MEM_STUB] = {"stub"},
+ {NULL, 0,} /* Last entry must be null */
};
-void DRM(mem_init) (void) {
+void drm_mem_init (void) {
drm_mem_stats_t *mem;
struct sysinfo si;
- for (mem = DRM(mem_stats); mem->name; ++mem) {
+ for (mem = drm_mem_stats; mem->name; ++mem) {
mem->succeed_count = 0;
mem->free_count = 0;
mem->fail_count = 0;
}
si_meminfo(&si);
- DRM(ram_available) = si.totalram;
- DRM(ram_used) = 0;
+ drm_ram_available = si.totalram;
+ drm_ram_used = 0;
}
/* drm_mem_info is called whenever a process reads /dev/drm/mem. */
-static int DRM(_mem_info) (char *buf, char **start, off_t offset,
+static int drm__mem_info (char *buf, char **start, off_t offset,
int request, int *eof, void *data) {
drm_mem_stats_t *pt;
int len = 0;
" | allocs bytes\n\n");
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
"system", 0, 0, 0,
- DRM(ram_available) << (PAGE_SHIFT - 10));
+ drm_ram_available << (PAGE_SHIFT - 10));
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
- "locked", 0, 0, 0, DRM(ram_used) >> 10);
+ "locked", 0, 0, 0, drm_ram_used >> 10);
DRM_PROC_PRINT("\n");
- for (pt = DRM(mem_stats); pt->name; pt++) {
+ for (pt = drm_mem_stats; pt->name; pt++) {
DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
pt->name,
pt->succeed_count,
return len - offset;
}
-int DRM(mem_info) (char *buf, char **start, off_t offset,
+int drm_mem_info (char *buf, char **start, off_t offset,
int len, int *eof, void *data) {
int ret;
- spin_lock(&DRM(mem_lock));
- ret = DRM(_mem_info) (buf, start, offset, len, eof, data);
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ret = drm__mem_info (buf, start, offset, len, eof, data);
+ spin_unlock(&drm_mem_lock);
return ret;
}
-void *DRM(alloc) (size_t size, int area) {
+void *drm_alloc (size_t size, int area) {
void *pt;
if (!size) {
}
if (!(pt = kmalloc(size, GFP_KERNEL))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].succeed_count;
- DRM(mem_stats)[area].bytes_allocated += size;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
return pt;
}
-void *DRM(calloc) (size_t nmemb, size_t size, int area) {
+void *drm_calloc (size_t nmemb, size_t size, int area) {
void *addr;
- addr = DRM(alloc) (nmemb * size, area);
+ addr = drm_alloc (nmemb * size, area);
if (addr != NULL)
memset((void *)addr, 0, size * nmemb);
return addr;
}
-void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) {
+void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
void *pt;
- if (!(pt = DRM(alloc) (size, area)))
+ if (!(pt = drm_alloc (size, area)))
return NULL;
if (oldpt && oldsize) {
memcpy(pt, oldpt, oldsize);
- DRM(free) (oldpt, oldsize, area);
+ drm_free (oldpt, oldsize, area);
}
return pt;
}
-void DRM(free) (void *pt, size_t size, int area) {
+void drm_free (void *pt, size_t size, int area) {
int alloc_count;
int free_count;
DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
else
kfree(pt);
- spin_lock(&DRM(mem_lock));
- DRM(mem_stats)[area].bytes_freed += size;
- free_count = ++DRM(mem_stats)[area].free_count;
- alloc_count = DRM(mem_stats)[area].succeed_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[area].bytes_freed += size;
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
free_count, alloc_count);
}
}
-unsigned long DRM(alloc_pages) (int order, int area) {
+unsigned long drm_alloc_pages (int order, int area) {
unsigned long address;
unsigned long bytes = PAGE_SIZE << order;
unsigned long addr;
unsigned int sz;
- spin_lock(&DRM(mem_lock));
- if ((DRM(ram_used) >> PAGE_SHIFT)
- > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ if ((drm_ram_used >> PAGE_SHIFT)
+ > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+ spin_unlock(&drm_mem_lock);
return 0;
}
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
address = __get_free_pages(GFP_KERNEL, order);
if (!address) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].fail_count;
+ spin_unlock(&drm_mem_lock);
return 0;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[area].succeed_count;
- DRM(mem_stats)[area].bytes_allocated += bytes;
- DRM(ram_used) += bytes;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_allocated += bytes;
+ drm_ram_used += bytes;
+ spin_unlock(&drm_mem_lock);
/* Zero outside the lock */
memset((void *)address, 0, bytes);
return address;
}
-void DRM(free_pages) (unsigned long address, int order, int area) {
+void drm_free_pages (unsigned long address, int order, int area) {
unsigned long bytes = PAGE_SIZE << order;
int alloc_count;
int free_count;
free_pages(address, order);
}
- spin_lock(&DRM(mem_lock));
- free_count = ++DRM(mem_stats)[area].free_count;
- alloc_count = DRM(mem_stats)[area].succeed_count;
- DRM(mem_stats)[area].bytes_freed += bytes;
- DRM(ram_used) -= bytes;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[area].free_count;
+ alloc_count = drm_mem_stats[area].succeed_count;
+ drm_mem_stats[area].bytes_freed += bytes;
+ drm_ram_used -= bytes;
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(area,
"Excess frees: %d frees, %d allocs\n",
}
}
-void *DRM(ioremap) (unsigned long offset, unsigned long size,
+void *drm_ioremap (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
}
if (!(pt = drm_ioremap(offset, size, dev))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
- DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
return pt;
}
-void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
+void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
drm_device_t * dev) {
void *pt;
}
if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
- DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+ spin_unlock(&drm_mem_lock);
return pt;
}
-void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
+void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
int alloc_count;
int free_count;
else
drm_ioremapfree(pt, size, dev);
- spin_lock(&DRM(mem_lock));
- DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
- free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
- alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
+ free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
"Excess frees: %d frees, %d allocs\n",
#if __OS_HAS_AGP
-DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
+DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
DRM_AGP_MEM *handle;
if (!pages) {
return NULL;
}
- if ((handle = DRM(agp_allocate_memory) (pages, type))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+ if ((handle = drm_agp_allocate_memory (pages, type))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
+= pages << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
return handle;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
return NULL;
}
-int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
+int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
int alloc_count;
int free_count;
int retval = -EINVAL;
return retval;
}
- if (DRM(agp_free_memory) (handle)) {
- spin_lock(&DRM(mem_lock));
- free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
- alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+ if (drm_agp_free_memory (handle)) {
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
+= pages << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
"Excess frees: %d frees, %d allocs\n",
return retval;
}
-int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
+int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
int retcode = -EINVAL;
if (!handle) {
return retcode;
}
- if (!(retcode = DRM(agp_bind_memory) (handle, start))) {
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+ if (!(retcode = drm_agp_bind_memory (handle, start))) {
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
+= handle->page_count << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
return retcode;
}
- spin_lock(&DRM(mem_lock));
- ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
- spin_unlock(&DRM(mem_lock));
+ spin_lock(&drm_mem_lock);
+ ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+ spin_unlock(&drm_mem_lock);
return retcode;
}
-int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
+int drm_unbind_agp (DRM_AGP_MEM * handle) {
int alloc_count;
int free_count;
int retcode = -EINVAL;
return retcode;
}
- if ((retcode = DRM(agp_unbind_memory) (handle)))
+ if ((retcode = drm_agp_unbind_memory (handle)))
return retcode;
- spin_lock(&DRM(mem_lock));
- free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
- alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
- DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+ spin_lock(&drm_mem_lock);
+ free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+ alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+ drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
+= handle->page_count << PAGE_SHIFT;
- spin_unlock(&DRM(mem_lock));
+ spin_unlock(&drm_mem_lock);
if (free_count > alloc_count) {
DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
"Excess frees: %d frees, %d allocs\n",