ffb-objs := ffb_drv.o ffb_context.o
sis-objs := sis_drv.o sis_ds.o sis_mm.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
-via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
+via-objs := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
#define DRM_ERR(d) -(d)
/** Current process ID */
#define DRM_CURRENTPID current->pid
+#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
#define DRM_UDELAY(d) udelay(d)
/** Read a byte from a MMIO region */
#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
dev_priv->dma_wrap = init->size;
dev_priv->dma_offset = init->offset;
dev_priv->last_pause_ptr = NULL;
- dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
+ dev_priv->hw_addr_ptr =
+ (volatile uint32_t *)((char *)dev_priv->mmio->handle +
+ init->reg_pause_addr);
via_cmdbuf_start(dev_priv);
switch (init.func) {
case VIA_INIT_DMA:
- if (!capable(CAP_SYS_ADMIN))
+ if (!DRM_SUSER(DRM_CURPROC))
retcode = DRM_ERR(EPERM);
else
retcode = via_initialize(dev, dev_priv, &init);
break;
case VIA_CLEANUP_DMA:
- if (!capable(CAP_SYS_ADMIN))
+ if (!DRM_SUSER(DRM_CURPROC))
retcode = DRM_ERR(EPERM);
else
retcode = via_dma_cleanup(dev);
return 0;
}
-extern int
-via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
- unsigned int size);
static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
drm_via_cmdbuffer_t * cmd)
{
if ((count <= 8) && (count >= 0)) {
uint32_t rgtr, ptr;
rgtr = *(dev_priv->hw_addr_ptr);
- ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 -
- CMDBUF_ALIGNMENT_SIZE;
+ ptr = ((volatile char *)dev_priv->last_pause_ptr -
+ dev_priv->dma_ptr) + dev_priv->dma_offset +
+ (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE;
if (rgtr <= ptr) {
DRM_ERROR
("Command regulator\npaused at count %d, address %x, "
&& count--) ;
rgtr = *(dev_priv->hw_addr_ptr);
- ptr = ((char *)paused_at - dev_priv->dma_ptr) +
+ ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ?
sizeof(d_siz));
return ret;
}
+
+drm_ioctl_desc_t via_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
+ [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
+};
+
+int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
#define DRM_VIA_CMDBUF_SIZE 0x0b
#define NOT_USED
#define DRM_VIA_WAIT_IRQ 0x0d
+#define DRM_VIA_DMA_BLIT 0x0e
+#define DRM_VIA_BLIT_SYNC 0x0f
#define DRM_IOCTL_VIA_ALLOCMEM DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_FREEMEM DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
#define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
drm_via_cmdbuf_size_t)
#define DRM_IOCTL_VIA_WAIT_IRQ DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
+#define DRM_IOCTL_VIA_DMA_BLIT DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
+#define DRM_IOCTL_VIA_BLIT_SYNC DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
/* Indices into buf.Setup where various bits of state are mirrored per
* context and per buffer. These can be fired at the card as a unit,
#define VIA_BACK 0x2
#define VIA_DEPTH 0x4
#define VIA_STENCIL 0x8
-#define VIDEO 0
-#define AGP 1
+#define VIA_MEM_VIDEO 0 /* matches drm constant */
+#define VIA_MEM_AGP 1 /* matches drm constant */
+#define VIA_MEM_SYSTEM 2
+#define VIA_MEM_MIXED 3
+#define VIA_MEM_UNKNOWN 4
+
typedef struct {
uint32_t offset;
uint32_t size;
unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
unsigned int XvMCCtxNoGrabbed; /* Last context to hold decoder */
+ /* Used bt the 3d driver only at this point, for pageflipping:
+ */
+ unsigned int pfCurrentOffset;
} drm_via_sarea_t;
typedef struct _drm_via_cmdbuf_size {
#define VIA_IRQ_FLAGS_MASK 0xF0000000
+enum drm_via_irqs {
+ drm_via_irq_hqv0 = 0,
+ drm_via_irq_hqv1,
+ drm_via_irq_dma0_dd,
+ drm_via_irq_dma0_td,
+ drm_via_irq_dma1_dd,
+ drm_via_irq_dma1_td,
+ drm_via_irq_num
+};
+
struct drm_via_wait_irq_request {
unsigned irq;
via_irq_seq_type_t type;
struct drm_wait_vblank_reply reply;
} drm_via_irqwait_t;
-#ifdef __KERNEL__
-
-int via_fb_init(DRM_IOCTL_ARGS);
-int via_mem_alloc(DRM_IOCTL_ARGS);
-int via_mem_free(DRM_IOCTL_ARGS);
-int via_agp_init(DRM_IOCTL_ARGS);
-int via_map_init(DRM_IOCTL_ARGS);
-int via_decoder_futex(DRM_IOCTL_ARGS);
-int via_dma_init(DRM_IOCTL_ARGS);
-int via_cmdbuffer(DRM_IOCTL_ARGS);
-int via_flush_ioctl(DRM_IOCTL_ARGS);
-int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
-int via_cmdbuf_size(DRM_IOCTL_ARGS);
-int via_wait_irq(DRM_IOCTL_ARGS);
+typedef struct drm_via_blitsync {
+ uint32_t sync_handle;
+ unsigned engine;
+} drm_via_blitsync_t;
+
+typedef struct drm_via_dmablit {
+ uint32_t num_lines;
+ uint32_t line_length;
+
+ uint32_t fb_addr;
+ uint32_t fb_stride;
+
+ unsigned char *mem_addr;
+ uint32_t mem_stride;
+
+ int bounce_buffer;
+ int to_fb;
+
+ drm_via_blitsync_t sync;
+} drm_via_dmablit_t;
-#endif
#endif /* _VIA_DRM_H_ */
viadrv_PCI_IDS
};
-static drm_ioctl_desc_t ioctls[] = {
- [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
- [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
-};
-
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ .load = via_driver_load,
+ .unload = via_driver_unload,
.context_ctor = via_init_context,
.context_dtor = via_final_context,
.vblank_wait = via_driver_vblank_wait,
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
- .ioctls = ioctls,
- .num_ioctls = DRM_ARRAY_SIZE(ioctls),
+ .ioctls = via_ioctls,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
static int __init via_init(void)
{
+ driver.num_ioctls = via_max_ioctl;
via_init_command_verifier();
return drm_init(&driver);
}
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#define DRIVER_AUTHOR "VIA"
+#define DRIVER_AUTHOR "Various"
#define DRIVER_NAME "via"
#define DRIVER_DESC "VIA Unichrome / Pro"
-#define DRIVER_DATE "20050523"
+#define DRIVER_DATE "20051022"
#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 6
-#define DRIVER_PATCHLEVEL 3
+#define DRIVER_MINOR 7
+#define DRIVER_PATCHLEVEL 2
#include "via_verifier.h"
+#include "via_dmablit.h"
+
#define VIA_PCI_BUF_SIZE 60000
#define VIA_FIRE_BUF_SIZE 1024
-#define VIA_NUM_IRQS 2
+#define VIA_NUM_IRQS 4
typedef struct drm_via_ring_buffer {
- drm_map_t map;
+ drm_local_map_t map;
char *virtual_start;
} drm_via_ring_buffer_t;
typedef struct drm_via_private {
drm_via_sarea_t *sarea_priv;
- drm_map_t *sarea;
- drm_map_t *fb;
- drm_map_t *mmio;
+ drm_local_map_t *sarea;
+ drm_local_map_t *fb;
+ drm_local_map_t *mmio;
unsigned long agpAddr;
wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
char *dma_ptr;
maskarray_t *irq_masks;
uint32_t irq_enable_mask;
uint32_t irq_pending_mask;
+ int *irq_map;
+ drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
} drm_via_private_t;
+enum via_family {
+ VIA_OTHER = 0,
+ VIA_PRO_GROUP_A,
+};
+
/* VIA MMIO register access */
#define VIA_BASE ((dev_priv->mmio))
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
+extern drm_ioctl_desc_t via_ioctls[];
+extern int via_max_ioctl;
+
+extern int via_fb_init(DRM_IOCTL_ARGS);
+extern int via_mem_alloc(DRM_IOCTL_ARGS);
+extern int via_mem_free(DRM_IOCTL_ARGS);
+extern int via_agp_init(DRM_IOCTL_ARGS);
+extern int via_map_init(DRM_IOCTL_ARGS);
+extern int via_decoder_futex(DRM_IOCTL_ARGS);
+extern int via_dma_init(DRM_IOCTL_ARGS);
+extern int via_cmdbuffer(DRM_IOCTL_ARGS);
+extern int via_flush_ioctl(DRM_IOCTL_ARGS);
+extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
+extern int via_cmdbuf_size(DRM_IOCTL_ARGS);
+extern int via_wait_irq(DRM_IOCTL_ARGS);
+extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
+extern int via_dma_blit( DRM_IOCTL_ARGS );
+
+extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
+extern int via_driver_unload(drm_device_t *dev);
+
extern int via_init_context(drm_device_t * dev, int context);
extern int via_final_context(drm_device_t * dev, int context);
extern int via_do_cleanup_map(drm_device_t * dev);
-extern int via_map_init(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
extern void via_init_futex(drm_via_private_t * dev_priv);
extern void via_cleanup_futex(drm_via_private_t * dev_priv);
extern void via_release_futex(drm_via_private_t * dev_priv, int context);
+extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
+ int force_sequence, unsigned int *sequence);
-extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
- unsigned int size);
+extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
+extern void via_init_dmablit(drm_device_t *dev);
#endif
#define VIA_IRQ_HQV1_ENABLE (1 << 25)
#define VIA_IRQ_HQV0_PENDING (1 << 9)
#define VIA_IRQ_HQV1_PENDING (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
/*
* Device-specific IRQs go here. This type might need to be extended with
{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
0x00000000},
{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
- 0x00000000}
+ 0x00000000},
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
};
static int via_num_pro_group_a =
sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
-static maskarray_t via_unichrome_irqs[] = { };
+static maskarray_t via_unichrome_irqs[] = {
+ {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+ {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+ VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
static unsigned time_diff(struct timeval *now, struct timeval *then)
{
atomic_inc(&cur_irq->irq_received);
DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1;
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
+ via_dmablit_handler(dev, 0, 1);
+ } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
+ via_dmablit_handler(dev, 1, 1);
+ }
}
cur_irq++;
}
return ret;
}
-static int
+int
via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_irq_t *cur_irq = dev_priv->via_irqs;
int ret = 0;
maskarray_t *masks = dev_priv->irq_masks;
+ int real_irq;
DRM_DEBUG("%s\n", __FUNCTION__);
return DRM_ERR(EINVAL);
}
- if (irq >= dev_priv->num_irqs) {
+ if (irq >= drm_via_irq_num) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irq);
return DRM_ERR(EINVAL);
}
- cur_irq += irq;
+ real_irq = dev_priv->irq_map[irq];
+
+ if (real_irq < 0) {
+ DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
+ __FUNCTION__, irq);
+ return DRM_ERR(EINVAL);
+ }
+
+ cur_irq += real_irq;
- if (masks[irq][2] && !force_sequence) {
+ if (masks[real_irq][2] && !force_sequence) {
DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
masks[irq][4]));
via_pro_group_a_irqs : via_unichrome_irqs;
dev_priv->num_irqs = (dev_priv->pro_group_a) ?
via_num_pro_group_a : via_num_unichrome;
+ dev_priv->irq_map = (dev_priv->pro_group_a) ?
+ via_irqmap_pro_group_a : via_irqmap_unichrome;
for (i = 0; i < dev_priv->num_irqs; ++i) {
atomic_set(&cur_irq->irq_received, 0);
dev_priv->last_vblank_valid = 0;
- // Clear VSync interrupt regs
+ /* Clear VSync interrupt regs */
status = VIA_READ(VIA_REG_INTERRUPT);
VIA_WRITE(VIA_REG_INTERRUPT, status &
~(dev_priv->irq_enable_mask));
int via_wait_irq(DRM_IOCTL_ARGS)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ DRM_DEVICE;
drm_via_irqwait_t __user *argp = (void __user *)data;
drm_via_irqwait_t irqwait;
struct timeval now;
static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
{
- drm_via_private_t *dev_priv;
+ drm_via_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- memset(dev_priv, 0, sizeof(drm_via_private_t));
-
DRM_GETSAREA();
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
dev_priv->agpAddr = init->agpAddr;
via_init_futex(dev_priv);
- dev_priv->pro_group_a = (dev->pdev->device == 0x3118);
+
+ via_init_dmablit(dev);
dev->dev_private = (void *)dev_priv;
return 0;
int via_do_cleanup_map(drm_device_t * dev)
{
- if (dev->dev_private) {
-
- drm_via_private_t *dev_priv = dev->dev_private;
-
- via_dma_cleanup(dev);
-
- drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
- dev->dev_private = NULL;
- }
+ via_dma_cleanup(dev);
return 0;
}
return -EINVAL;
}
+
+int via_driver_load(drm_device_t *dev, unsigned long chipset)
+{
+ drm_via_private_t *dev_priv;
+
+ dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return DRM_ERR(ENOMEM);
+
+ dev->dev_private = (void *)dev_priv;
+
+ if (chipset == VIA_PRO_GROUP_A)
+ dev_priv->pro_group_a = 1;
+
+ return 0;
+}
+
+int via_driver_unload(drm_device_t *dev)
+{
+ drm_via_private_t *dev_priv = dev->dev_private;
+
+ drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+
+ return 0;
+}
+
sizeof(mem));
switch (mem.type) {
- case VIDEO:
+ case VIA_MEM_VIDEO:
if (via_fb_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
sizeof(mem));
return 0;
- case AGP:
+ case VIA_MEM_AGP:
if (via_agp_alloc(&mem) < 0)
return -EFAULT;
DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
if (block) {
fb.offset = block->ofs;
fb.free = (unsigned long)block;
- if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
+ if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) fb.free);
retval = -1;
if (block) {
agp.offset = block->ofs;
agp.free = (unsigned long)block;
- if (!add_alloc_set(agp.context, AGP, agp.free)) {
+ if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
DRM_DEBUG("adding to allocation set fails\n");
via_mmFreeMem((PMemBlock) agp.free);
retval = -1;
switch (mem.type) {
- case VIDEO:
+ case VIA_MEM_VIDEO:
if (via_fb_free(&mem) == 0)
return 0;
break;
- case AGP:
+ case VIA_MEM_AGP:
if (via_agp_free(&mem) == 0)
return 0;
break;
via_mmFreeMem((PMemBlock) fb.free);
- if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
+ if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
retval = -1;
}
via_mmFreeMem((PMemBlock) agp.free);
- if (!del_alloc_set(agp.context, AGP, agp.free)) {
+ if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
retval = -1;
}
static __inline__ int
eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
{
- if ((*buf - buf_end) >= num_words) {
+ if ((buf_end - *buf) >= num_words) {
*buf += num_words;
return 0;
}
* Partially stolen from drm_memory.h
*/
-static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset,
unsigned long size,
drm_device_t * dev)
{
struct list_head *list;
drm_map_list_t *r_list;
- drm_map_t *map = seq->map_cache;
+ drm_local_map_t *map = seq->map_cache;
if (map && map->offset <= offset
&& (offset + size) <= (map->offset + map->size)) {
int agp_texture;
int multitex;
drm_device_t *dev;
- drm_map_t *map_cache;
+ drm_local_map_t *map_cache;
uint32_t vertex_count;
int agp;
const uint32_t *buf_start;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
drm_device_t * dev, int agp);
+extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
+ unsigned int size);
#endif
unsigned int i;
volatile int *lock;
+ if (!dev_priv->sarea_priv)
+ return;
+
for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+ lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
if (_DRM_LOCK_IS_HELD(*lock)
&& (*lock & _DRM_LOCK_CONT)) {
if (fx.lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
- lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock);
+ lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock);
switch (fx.func) {
case VIA_FUTEX_WAIT: