return NULL;
}
-/*
- * Used to allocate 32-bit handles for mappings.
- */
-#define START_RANGE 0x10000000
-#define END_RANGE 0x40000000
-
-#ifdef _LP64
-static __inline__ unsigned int HandleID(unsigned long lhandle,
- drm_device_t *dev)
+int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
+ unsigned long user_token, int hashed_handle)
{
- static unsigned int map32_handle = START_RANGE;
- unsigned int hash;
-
- if (lhandle & 0xffffffff00000000) {
- hash = map32_handle;
- map32_handle += PAGE_SIZE;
- if (map32_handle > END_RANGE)
- map32_handle = START_RANGE;
- } else
- hash = lhandle;
-
- while (1) {
- drm_map_list_t *_entry;
- list_for_each_entry(_entry, &dev->maplist->head, head) {
- if (_entry->user_token == hash)
- break;
- }
- if (&_entry->head == &dev->maplist->head)
- return hash;
+ int use_hashed_handle;
+#if (BITS_PER_LONG == 64)
+ use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+ use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
- hash += PAGE_SIZE;
- map32_handle += PAGE_SIZE;
+ if (use_hashed_handle) {
+ return drm_ht_just_insert_please(&dev->map_hash, hash,
+ user_token, 32 - PAGE_SHIFT - 3,
+ PAGE_SHIFT, DRM_MAP_HASH_OFFSET);
+ } else {
+ hash->key = user_token;
+ return drm_ht_insert_item(&dev->map_hash, hash);
}
}
-#else
-# define HandleID(x,dev) (unsigned int)(x)
-#endif
/**
* Ioctl to specify a range of memory that is available for mapping by a non-root process.
drm_map_t *map;
drm_map_list_t *list;
drm_dma_handle_t *dmah;
+ unsigned long user_token;
+ int ret;
map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
if (!map)
memset(list, 0, sizeof(*list));
list->map = map;
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
list_add(&list->head, &dev->maplist->head);
+
/* Assign a 32-bit handle */
- /* We do it here so that dev->struct_sem protects the increment */
- list->user_token = HandleID(map->type == _DRM_SHM
- ? (unsigned long)map->handle
- : map->offset, dev);
- up(&dev->struct_sem);
+ /* We do it here so that dev->struct_mutex protects the increment */
+ user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+ map->offset;
+ ret = drm_map_handle(dev, &list->hash, user_token, FALSE);
+ if (ret) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
+
+ list->user_token = list->hash.key;
+ mutex_unlock(&dev->struct_mutex);
*maplist = list;
return 0;
if (r_list->map == map) {
list_del(list);
+ drm_ht_remove_key(&dev->map_hash, r_list->user_token);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
break;
}
return 0;
}
-EXPORT_SYMBOL(drm_rmmap_locked);
int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
{
int ret;
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
ret = drm_rmmap_locked(dev, map);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_rmmap);
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
return -EFAULT;
}
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist->head) {
drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
* find anything.
*/
if (list == (&dev->maplist->head)) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- if (!map)
+ if (!map) {
+ mutex_unlock(&dev->struct_mutex);
return -EINVAL;
+ }
/* Register and framebuffer maps are permanent */
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
ret = drm_rmmap_locked(dev, map);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
if (entry->seglist[i]) {
- drm_free_pages(entry->seglist[i],
- entry->page_order, DRM_MEM_DMA);
+ drm_pci_free(dev, entry->seglist[i]);
}
}
drm_free(entry->seglist,
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
int total;
int page_order;
drm_buf_entry_t *entry;
- unsigned long page;
+ drm_dma_handle_t *dmah;
drm_buf_t *buf;
int alignment;
unsigned long offset;
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
if (!entry->seglist) {
drm_free(entry->buflist,
count * sizeof(*entry->buflist), DRM_MEM_BUFS);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
count * sizeof(*entry->buflist), DRM_MEM_BUFS);
drm_free(entry->seglist,
count * sizeof(*entry->seglist), DRM_MEM_SEGS);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
page_count = 0;
while (entry->buf_count < count) {
- page = drm_alloc_pages(page_order, DRM_MEM_DMA);
- if (!page) {
+
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+
+ if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
drm_free(temp_pagelist,
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist), DRM_MEM_PAGES);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- entry->seglist[entry->seg_count++] = page;
+ entry->seglist[entry->seg_count++] = dmah;
for (i = 0; i < (1 << page_order); i++) {
DRM_DEBUG("page %d @ 0x%08lx\n",
dma->page_count + page_count,
- page + PAGE_SIZE * i);
+ (unsigned long)dmah->vaddr + PAGE_SIZE * i);
temp_pagelist[dma->page_count + page_count++]
- = page + PAGE_SIZE * i;
+ = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
}
for (offset = 0;
offset + size <= total && entry->buf_count < count;
buf->order = order;
buf->used = 0;
buf->offset = (dma->byte_count + byte_count + offset);
- buf->address = (void *)(page + offset);
+ buf->address = (void *)(dmah->vaddr + offset);
+ buf->bus_address = dmah->busaddr + offset;
buf->next = NULL;
buf->waiting = 0;
buf->pending = 0;
(count << page_order))
* sizeof(*dma->pagelist),
DRM_MEM_PAGES);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
drm_free(temp_pagelist,
(dma->page_count + (count << page_order))
* sizeof(*dma->pagelist), DRM_MEM_PAGES);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
dma->page_count += entry->seg_count << page_order;
dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
return 0;
}
-int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
+static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
{
drm_device_dma_t *dma = dev->dma;
drm_buf_entry_t *entry;
atomic_inc(&dev->buf_alloc);
spin_unlock(&dev->count_lock);
- down(&dev->struct_sem);
+ mutex_lock(&dev->struct_mutex);
entry = &dma->bufs[order];
if (entry->buf_count) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM; /* May only call once for each order */
}
if (count < 0 || count > 4096) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -EINVAL;
}
entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
DRM_MEM_BUFS);
if (!entry->buflist) {
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
if (!temp_buflist) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
- up(&dev->struct_sem);
+ mutex_unlock(&dev->struct_mutex);
request->count = entry->buf_count;
request->size = size;
atomic_dec(&dev->buf_alloc);
return 0;
}
-EXPORT_SYMBOL(drm_addbufs_fb);
/**