This patch allows drm to populate an agpgart structure with pages of its own.
It's needed for the new drm memory manager which dynamically flips pages in and out of AGP.
The patch modifies the generic functions as well as the intel agp driver. The intel drm driver is
currently the only one supporting the new memory manager.
Other agp drivers may need some minor fixing up once they have a corresponding memory manager enabled drm driver.
AGP memory types >= AGP_USER_TYPES are not populated by the agpgart driver, but the drm is expected
to do that, as well as taking care of cache- and tlb flushing when needed.
It's not possible to request these types from user space using agpgart ioctls.
The Intel driver also gets a new memory type for pages that can be bound cached to the intel GTT.
Signed-off-by: Thomas Hellstrom <thomas@tungstengraphics.com>
Signed-off-by: Dave Jones <davej@redhat.com>
void (*free_by_type)(struct agp_memory *);
void *(*agp_alloc_page)(struct agp_bridge_data *);
void (*agp_destroy_page)(void *);
+ int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
};
struct agp_bridge_data {
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
+#define I830_PTE_SYSTEM_CACHED 0x00000006
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
#define I810_GFX_MEM_WIN_32M 0x00010000
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
unsigned long addr, int type);
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type);
struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
+/* generic functions for user-populated AGP memory types */
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
+void agp_alloc_page_array(size_t size, struct agp_memory *mem);
+void agp_free_page_array(struct agp_memory *mem);
+
+
/* generic routines for agp>=3 */
int agp3_generic_fetch_size(void);
void agp3_generic_tlbflush(struct agp_memory *mem);
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = ali_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver ali_m1541_bridge = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = m1541_alloc_page,
.agp_destroy_page = m1541_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
int num_entries, status;
void *temp;
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
struct agp_bridge_data *alpha_bridge;
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
{
int i, j, num_entries;
long long tmp;
+ int mask_type;
+ struct agp_bridge_data *bridge = mem->bridge;
u32 pte;
num_entries = agp_num_entries();
- if (type != 0 || mem->type != 0)
+ if (type != mem->type)
return -EINVAL;
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0)
+ return -EINVAL;
+
/* Make sure we can fit the range in the gatt table. */
/* FIXME: could wrap */
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mem->type);
+ mem->memory[i], mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
pte = (tmp & 0x000000ff00000000ULL) >> 28;
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
/* Some basic sanity checks for the aperture. */
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
-#define AGPGART_VERSION_MINOR 101
+#define AGPGART_VERSION_MINOR 102
static const struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
return -EFAULT;
+ if (alloc.type >= AGP_USER_TYPES)
+ return -EINVAL;
+
memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
if (memory == NULL)
return -1;
}
+/*
+ * Use kmalloc if possible for the page list. Otherwise fall back to
+ * vmalloc. This speeds things up and also saves memory for small AGP
+ * regions.
+ */
+
+void agp_alloc_page_array(size_t size, struct agp_memory *mem)
+{
+ mem->memory = NULL;
+ mem->vmalloc_flag = 0;
+
+ if (size <= 2*PAGE_SIZE) {
+ mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
+ }
+ if (mem->memory == NULL) {
+ mem->memory = vmalloc(size);
+ mem->vmalloc_flag = 1;
+ }
+}
+EXPORT_SYMBOL(agp_alloc_page_array);
+
+void agp_free_page_array(struct agp_memory *mem)
+{
+ if (mem->vmalloc_flag) {
+ vfree(mem->memory);
+ } else {
+ kfree(mem->memory);
+ }
+}
+EXPORT_SYMBOL(agp_free_page_array);
+
+
+static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
+{
+ struct agp_memory *new;
+ unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
+
+ new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL);
+
+ if (new == NULL)
+ return NULL;
+
+ memset(new, 0, sizeof(struct agp_memory));
+ new->key = agp_get_key();
+
+ if (new->key < 0) {
+ kfree(new);
+ return NULL;
+ }
+
+ agp_alloc_page_array(alloc_size, new);
+
+ if (new->memory == NULL) {
+ agp_free_key(new->key);
+ kfree(new);
+ return NULL;
+ }
+ new->num_scratch_pages = 0;
+ return new;
+}
+
struct agp_memory *agp_create_memory(int scratch_pages)
{
kfree(new);
return NULL;
}
- new->memory = vmalloc(PAGE_SIZE * scratch_pages);
+
+ agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
if (new->memory == NULL) {
agp_free_key(new->key);
return NULL;
}
new->num_scratch_pages = scratch_pages;
+ new->type = AGP_NORMAL_MEMORY;
return new;
}
EXPORT_SYMBOL(agp_create_memory);
if (curr->is_bound == TRUE)
agp_unbind_memory(curr);
+ if (curr->type >= AGP_USER_TYPES) {
+ agp_generic_free_by_type(curr);
+ return;
+ }
+
if (curr->type != 0) {
curr->bridge->driver->free_by_type(curr);
return;
flush_agp_mappings();
}
agp_free_key(curr->key);
- vfree(curr->memory);
+ agp_free_page_array(curr);
kfree(curr);
}
EXPORT_SYMBOL(agp_free_memory);
if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
return NULL;
+ if (type >= AGP_USER_TYPES) {
+ new = agp_generic_alloc_user(page_count, type);
+ if (new)
+ new->bridge = bridge;
+ return new;
+ }
+
if (type != 0) {
new = bridge->driver->alloc_by_type(page_count, type);
if (new)
off_t j;
void *temp;
struct agp_bridge_data *bridge;
+ int mask_type;
bridge = mem->bridge;
if (!bridge)
num_entries -= agp_memory_reserved/PAGE_SIZE;
if (num_entries < 0) num_entries = 0;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type) {
+ return -EINVAL;
+ }
+
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0) {
/* The generic routines know nothing of memory types */
return -EINVAL;
}
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
+ writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
+ bridge->gatt_table+j);
}
readl(bridge->gatt_table+j-1); /* PCI Posting. */
{
size_t i;
struct agp_bridge_data *bridge;
+ int mask_type;
bridge = mem->bridge;
if (!bridge)
if (mem->page_count == 0)
return 0;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type)
+ return -EINVAL;
+
+ mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
+ if (mask_type != 0) {
/* The generic routines know nothing of memory types */
return -EINVAL;
}
void agp_generic_free_by_type(struct agp_memory *curr)
{
- vfree(curr->memory);
+ agp_free_page_array(curr);
agp_free_key(curr->key);
kfree(curr);
}
EXPORT_SYMBOL(agp_generic_free_by_type);
+struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
+{
+ struct agp_memory *new;
+ int i;
+ int pages;
+
+ pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
+ new = agp_create_user_memory(page_count);
+ if (new == NULL)
+ return NULL;
+
+ for (i = 0; i < page_count; i++) {
+ new->memory[i] = 0;
+ }
+ new->page_count = 0;
+ new->type = type;
+ new->num_scratch_pages = pages;
+
+ return new;
+}
+EXPORT_SYMBOL(agp_generic_alloc_user);
+
/*
* Basic Page Allocation Routines -
}
EXPORT_SYMBOL(agp_generic_mask_memory);
+int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type >= AGP_USER_TYPES)
+ return 0;
+ return type;
+}
+EXPORT_SYMBOL(agp_generic_type_to_mask_type);
+
/*
* These functions are implemented according to the AGPv3 spec,
* which covers implementation details that had previously been
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
mem, pg_start, type, mem->memory[0]);
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
temp = agp_bridge->current_size;
struct lp_desc *start, *end, *lp;
void *temp;
+ if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
+ return -EINVAL;
+
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
#endif
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
+extern int agp_memory_reserved;
+
+
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
#define AGP_DCACHE_MEMORY 1
#define AGP_PHYS_MEMORY 2
+#define INTEL_AGP_CACHED_MEMORY 3
static struct gatt_mask intel_i810_masks[] =
{
{.mask = I810_PTE_VALID, .type = 0},
{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
- {.mask = I810_PTE_VALID, .type = 0}
+ {.mask = I810_PTE_VALID, .type = 0},
+ {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
+ .type = INTEL_AGP_CACHED_MEMORY}
};
static struct _intel_i810_private {
int num_dcache_entries;
} intel_i810_private;
+
static int intel_i810_fetch_size(void)
{
u32 smram_miscc;
atomic_dec(&agp_bridge->current_memory_agp);
}
+static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
+ int type)
+{
+ if (type < AGP_USER_TYPES)
+ return type;
+ else if (type == AGP_USER_CACHED_MEMORY)
+ return INTEL_AGP_CACHED_MEMORY;
+ else
+ return 0;
+}
+
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
int i, j, num_entries;
void *temp;
+ int ret = -EINVAL;
+ int mask_type;
if (mem->page_count == 0)
- return 0;
+ goto out;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
- return -EINVAL;
+ goto out_err;
- for (j = pg_start; j < (pg_start + mem->page_count); j++) {
- if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
- return -EBUSY;
- }
- if (type != 0 || mem->type != 0) {
- if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
- /* special insert */
- if (!mem->is_flushed) {
- global_cache_flush();
- mem->is_flushed = TRUE;
- }
-
- for (i = pg_start; i < (pg_start + mem->page_count); i++) {
- writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
+ for (j = pg_start; j < (pg_start + mem->page_count); j++) {
+ if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
+ ret = -EBUSY;
+ goto out_err;
}
- if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
- goto insert;
- return -EINVAL;
}
-insert:
- if (!mem->is_flushed) {
- global_cache_flush();
- mem->is_flushed = TRUE;
- }
+ if (type != mem->type)
+ goto out_err;
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mem->type),
- intel_i810_private.registers+I810_PTE_BASE+(j*4));
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
+
+ switch (mask_type) {
+ case AGP_DCACHE_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+ writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
+ intel_i810_private.registers+I810_PTE_BASE+(i*4));
+ }
+ readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4));
+ break;
+ case AGP_PHYS_MEMORY:
+ case AGP_NORMAL_MEMORY:
+ if (!mem->is_flushed)
+ global_cache_flush();
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->memory[i],
+ mask_type),
+ intel_i810_private.registers+I810_PTE_BASE+(j*4));
+ }
+ readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4));
+ break;
+ default:
+ goto out_err;
}
- readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4)); /* PCI Posting. */
agp_bridge->driver->tlb_flush(mem);
- return 0;
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = 1;
+ return ret;
}
static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
new->type = AGP_DCACHE_MEMORY;
new->page_count = pg_count;
new->num_scratch_pages = 0;
- vfree(new->memory);
+ agp_free_page_array(new);
return new;
}
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
-
return NULL;
}
gart_to_virt(curr->memory[0]));
global_flush_tlb();
}
- vfree(curr->memory);
+ agp_free_page_array(curr);
}
kfree(curr);
}
{
int i,j,num_entries;
void *temp;
+ int ret = -EINVAL;
+ int mask_type;
if (mem->page_count == 0)
- return 0;
+ goto out;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
pg_start,intel_i830_private.gtt_entries);
printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
- return -EINVAL;
+ goto out_err;
}
if ((pg_start + mem->page_count) > num_entries)
- return -EINVAL;
+ goto out_err;
/* The i830 can't check the GTT for entries since its read only,
* depend on the caller to make the correct offset decisions.
*/
- if ((type != 0 && type != AGP_PHYS_MEMORY) ||
- (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
- return -EINVAL;
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (!mem->is_flushed) {
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
global_cache_flush();
- mem->is_flushed = TRUE;
- }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mem->type),
- intel_i830_private.registers+I810_PTE_BASE+(j*4));
+ mem->memory[i], mask_type),
+ intel_i830_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4));
-
agp_bridge->driver->tlb_flush(mem);
- return 0;
+
+out:
+ ret = 0;
+out_err:
+ mem->is_flushed = 1;
+ return ret;
}
static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
-
/* always return NULL for other allocation types for now */
return NULL;
}
{
int i,j,num_entries;
void *temp;
+ int ret = -EINVAL;
+ int mask_type;
if (mem->page_count == 0)
- return 0;
+ goto out;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
pg_start,intel_i830_private.gtt_entries);
printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
- return -EINVAL;
+ goto out_err;
}
if ((pg_start + mem->page_count) > num_entries)
- return -EINVAL;
+ goto out_err;
- /* The i830 can't check the GTT for entries since its read only,
+ /* The i915 can't check the GTT for entries since its read only,
* depend on the caller to make the correct offset decisions.
*/
- if ((type != 0 && type != AGP_PHYS_MEMORY) ||
- (mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
- return -EINVAL;
+ if (type != mem->type)
+ goto out_err;
+
+ mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
- if (!mem->is_flushed) {
+ if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
+ mask_type != INTEL_AGP_CACHED_MEMORY)
+ goto out_err;
+
+ if (!mem->is_flushed)
global_cache_flush();
- mem->is_flushed = TRUE;
- }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mem->type), intel_i830_private.gtt+j);
+ mem->memory[i], mask_type), intel_i830_private.gtt+j);
}
- readl(intel_i830_private.gtt+j-1);
+ readl(intel_i830_private.gtt+j-1);
agp_bridge->driver->tlb_flush(mem);
- return 0;
+
+ out:
+ ret = 0;
+ out_err:
+ mem->is_flushed = 1;
+ return ret;
}
static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_810_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_815_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_830_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
};
static struct agp_bridge_driver intel_820_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_830mp_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_840_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_845_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_850_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_860_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_915_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
};
static struct agp_bridge_driver intel_i965_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = intel_i830_type_to_mask_type,
};
static struct agp_bridge_driver intel_7505_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int find_i810(u16 device)
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = sgi_tioca_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
.needs_scratch_page = 0,
.num_aperture_sizes = 1,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
.needs_scratch_page = 1,
};
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver via_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
+ .agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids via_agp_device_ids[] __devinitdata =
u32 physical;
u8 is_bound;
u8 is_flushed;
+ u8 vmalloc_flag;
};
#define AGP_NORMAL_MEMORY 0
+#define AGP_USER_TYPES (1 << 16)
+#define AGP_USER_MEMORY (AGP_USER_TYPES)
+#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
+
extern struct agp_bridge_data *agp_bridge;
extern struct list_head agp_bridges;