/* We do it here so that dev->struct_mutex protects the increment */
user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
map->offset;
- ret = drm_map_handle(dev, &list->hash, user_token, FALSE);
+ ret = drm_map_handle(dev, &list->hash, user_token, 0);
if (ret) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
drm_free(list, sizeof(*list), DRM_MEM_MAPS);
if (parent->size == size) {
list_del_init(&parent->fl_entry);
- parent->free = FALSE;
+ parent->free = 0;
return parent;
} else {
child = (drm_mm_node_t *) drm_alloc(sizeof(*child), DRM_MEM_MM);
INIT_LIST_HEAD(&child->ml_entry);
INIT_LIST_HEAD(&child->fl_entry);
- child->free = FALSE;
+ child->free = 0;
child->size = size;
child->start = parent->start;
drm_mm_node_t *prev_node = NULL;
drm_mm_node_t *next_node;
- int merged = FALSE;
+ int merged = 0;
if (cur_head->prev != root_head) {
prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
- merged = TRUE;
+ merged = 1;
}
}
if (cur_head->next != root_head) {
} else {
next_node->size += cur->size;
next_node->start = cur->start;
- merged = TRUE;
+ merged = 1;
}
}
}
if (!merged) {
- cur->free = TRUE;
+ cur->free = 1;
list_add(&cur->fl_entry, &list_root->fl_entry);
} else {
list_del(&cur->ml_entry);
child->start = start;
child->size = size;
- child->free = TRUE;
+ child->free = 1;
list_add(&child->fl_entry, &mm->root_node.fl_entry);
list_add(&child->ml_entry, &mm->root_node.ml_entry);
drm_mm_t *mm = (drm_mm_t *) private;
drm_mm_node_t *tmp;
- tmp = drm_mm_search_free(mm, size, alignment, TRUE);
+ tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
return NULL;
}
extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
/*
- * returns TRUE iff there are no stale memory blocks associated with this owner.
+ * returns 1 iff there are no stale memory blocks associated with this owner.
* Typically called to determine if we need to idle the hardware and call
* drm_sman_owner_cleanup. If there are no stale memory blocks, it removes all
* resources associated with owner.
return ret;
}
- dev_priv->vram_initialized = TRUE;
+ dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb.offset;
mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
- if (FALSE == ((pool == 0) ? dev_priv->vram_initialized :
+ if (0 == ((pool == 0) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
return ret;
}
- dev_priv->agp_initialized = TRUE;
+ dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp.offset;
mutex_unlock(&dev->struct_mutex);
if (time_after_eq(jiffies, end)) {
DRM_ERROR("Graphics engine idle timeout. "
"Disabling idle check\n");
- dev_priv->idle_fault = TRUE;
+ dev_priv->idle_fault = 1;
}
/*
mutex_lock(&dev->struct_mutex);
drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = FALSE;
- dev_priv->agp_initialized = FALSE;
+ dev_priv->vram_initialized = 0;
+ dev_priv->agp_initialized = 0;
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
mutex_lock(&dev->struct_mutex);
drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = FALSE;
- dev_priv->agp_initialized = FALSE;
+ dev_priv->vram_initialized = 0;
+ dev_priv->agp_initialized = 0;
mutex_unlock(&dev->struct_mutex);
}
return DRM_ERR(EINVAL);
}
mutex_lock(&dev->struct_mutex);
- if (FALSE == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
+ if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");