1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
38 #define MAX_NOPID ((u32)~0)
41 * Emit blits for scheduled buffer swaps.
43 * This function will be called with the HW lock held.
45 static void i915_vblank_tasklet(drm_device_t *dev)
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned int irqflags;
49 struct list_head *list, *tmp;
53 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
55 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
56 drm_i915_vbl_swap_t *vbl_swap =
57 list_entry(list, drm_i915_vbl_swap_t, head);
58 atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 :
61 if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) {
62 drm_drawable_info_t *drw;
64 spin_unlock(&dev_priv->swaps_lock);
66 spin_lock(&dev->drw_lock);
68 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
71 int i, num_rects = drw->num_rects;
72 drm_clip_rect_t *rect = drw->rects;
73 drm_i915_sarea_t *sarea_priv =
75 u32 cpp = dev_priv->cpp;
76 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
77 XY_SRC_COPY_BLT_WRITE_ALPHA |
78 XY_SRC_COPY_BLT_WRITE_RGB)
79 : XY_SRC_COPY_BLT_CMD;
80 u32 pitchropcpp = (sarea_priv->pitch * cpp) |
81 (0xcc << 16) | (cpp << 23) |
85 i915_kernel_lost_context(dev);
89 OUT_RING(GFX_OP_DRAWRECT_INFO);
92 OUT_RING(sarea_priv->width |
93 sarea_priv->height << 16);
94 OUT_RING(sarea_priv->width |
95 sarea_priv->height << 16);
100 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
102 for (i = 0; i < num_rects; i++, rect++) {
106 OUT_RING(pitchropcpp);
107 OUT_RING((rect->y1 << 16) | rect->x1);
108 OUT_RING((rect->y2 << 16) | rect->x2);
109 OUT_RING(sarea_priv->front_offset);
110 OUT_RING((rect->y1 << 16) | rect->x1);
111 OUT_RING(pitchropcpp & 0xffff);
112 OUT_RING(sarea_priv->back_offset);
118 spin_unlock(&dev->drw_lock);
120 spin_lock(&dev_priv->swaps_lock);
124 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
126 dev_priv->swaps_pending--;
130 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
133 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
135 drm_device_t *dev = (drm_device_t *) arg;
136 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
139 temp = I915_READ16(I915REG_INT_IDENTITY_R);
141 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
143 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
148 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
150 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
152 if (temp & USER_INT_FLAG)
153 DRM_WAKEUP(&dev_priv->irq_queue);
155 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
156 if ((dev_priv->vblank_pipe &
157 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
158 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
159 if (temp & VSYNC_PIPEA_FLAG)
160 atomic_inc(&dev->vbl_received);
161 if (temp & VSYNC_PIPEB_FLAG)
162 atomic_inc(&dev->vbl_received2);
164 atomic_inc(&dev->vbl_received);
166 DRM_WAKEUP(&dev->vbl_queue);
167 drm_vbl_send_signals(dev);
169 drm_locked_tasklet(dev, i915_vblank_tasklet);
175 static int i915_emit_irq(drm_device_t * dev)
177 drm_i915_private_t *dev_priv = dev->dev_private;
180 i915_kernel_lost_context(dev);
182 DRM_DEBUG("%s\n", __FUNCTION__);
184 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
186 if (dev_priv->counter > 0x7FFFFFFFUL)
187 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
190 OUT_RING(CMD_STORE_DWORD_IDX);
192 OUT_RING(dev_priv->counter);
195 OUT_RING(GFX_OP_USER_INTERRUPT);
198 return dev_priv->counter;
201 static int i915_wait_irq(drm_device_t * dev, int irq_nr)
203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
206 DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
207 READ_BREADCRUMB(dev_priv));
209 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
212 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
214 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
215 READ_BREADCRUMB(dev_priv) >= irq_nr);
217 if (ret == DRM_ERR(EBUSY)) {
218 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
220 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
223 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
227 static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
230 drm_i915_private_t *dev_priv = dev->dev_private;
231 unsigned int cur_vblank;
235 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
236 return DRM_ERR(EINVAL);
239 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
240 (((cur_vblank = atomic_read(counter))
241 - *sequence) <= (1<<23)));
243 *sequence = cur_vblank;
249 int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
251 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
254 int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
256 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
259 /* Needs the lock as it touches the ring.
261 int i915_irq_emit(DRM_IOCTL_ARGS)
264 drm_i915_private_t *dev_priv = dev->dev_private;
265 drm_i915_irq_emit_t emit;
268 LOCK_TEST_WITH_RETURN(dev, filp);
271 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
272 return DRM_ERR(EINVAL);
275 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
278 result = i915_emit_irq(dev);
280 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
281 DRM_ERROR("copy_to_user\n");
282 return DRM_ERR(EFAULT);
288 /* Doesn't need the hardware lock.
290 int i915_irq_wait(DRM_IOCTL_ARGS)
293 drm_i915_private_t *dev_priv = dev->dev_private;
294 drm_i915_irq_wait_t irqwait;
297 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
298 return DRM_ERR(EINVAL);
301 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
304 return i915_wait_irq(dev, irqwait.irq_seq);
307 static int i915_enable_interrupt (drm_device_t *dev)
309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
313 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
314 flag |= VSYNC_PIPEA_FLAG;
315 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
316 flag |= VSYNC_PIPEB_FLAG;
317 if (dev_priv->vblank_pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
318 DRM_ERROR("%s called with invalid pipe 0x%x\n",
319 __FUNCTION__, dev_priv->vblank_pipe);
320 return DRM_ERR(EINVAL);
322 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
326 /* Set the vblank monitor pipe
328 int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
331 drm_i915_private_t *dev_priv = dev->dev_private;
332 drm_i915_vblank_pipe_t pipe;
335 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
336 return DRM_ERR(EINVAL);
339 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
342 dev_priv->vblank_pipe = pipe.pipe;
343 return i915_enable_interrupt (dev);
346 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
349 drm_i915_private_t *dev_priv = dev->dev_private;
350 drm_i915_vblank_pipe_t pipe;
354 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
355 return DRM_ERR(EINVAL);
358 flag = I915_READ(I915REG_INT_ENABLE_R);
360 if (flag & VSYNC_PIPEA_FLAG)
361 pipe.pipe |= DRM_I915_VBLANK_PIPE_A;
362 if (flag & VSYNC_PIPEB_FLAG)
363 pipe.pipe |= DRM_I915_VBLANK_PIPE_B;
364 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe,
370 * Schedule buffer swap at given vertical blank.
372 int i915_vblank_swap(DRM_IOCTL_ARGS)
375 drm_i915_private_t *dev_priv = dev->dev_private;
376 drm_i915_vblank_swap_t swap;
377 drm_i915_vbl_swap_t *vbl_swap;
378 unsigned int irqflags;
379 struct list_head *list;
382 DRM_ERROR("%s called with no initialization\n", __func__);
383 return DRM_ERR(EINVAL);
386 if (dev_priv->sarea_priv->rotation) {
387 DRM_DEBUG("Rotation not supported\n");
388 return DRM_ERR(EINVAL);
391 if (dev_priv->swaps_pending >= 100) {
392 DRM_DEBUG("Too many swaps queued\n");
393 return DRM_ERR(EBUSY);
396 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
399 if (swap.pipe > 1 || !(dev_priv->vblank_pipe & (1 << swap.pipe))) {
400 DRM_ERROR("Invalid pipe %d\n", swap.pipe);
401 return DRM_ERR(EINVAL);
404 spin_lock_irqsave(&dev->drw_lock, irqflags);
406 if (!drm_get_drawable_info(dev, swap.drawable)) {
407 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
408 DRM_ERROR("Invalid drawable ID %d\n", swap.drawable);
409 return DRM_ERR(EINVAL);
412 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
414 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
416 list_for_each(list, &dev_priv->vbl_swaps.head) {
417 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
419 if (vbl_swap->drw_id == swap.drawable &&
420 vbl_swap->pipe == swap.pipe &&
421 vbl_swap->sequence == swap.sequence) {
422 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
423 DRM_DEBUG("Already scheduled\n");
428 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
430 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
433 DRM_ERROR("Failed to allocate memory to queue swap\n");
434 return DRM_ERR(ENOMEM);
439 vbl_swap->drw_id = swap.drawable;
440 vbl_swap->pipe = swap.pipe;
441 vbl_swap->sequence = swap.sequence;
443 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
445 list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
446 dev_priv->swaps_pending++;
448 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
455 void i915_driver_irq_preinstall(drm_device_t * dev)
457 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
459 I915_WRITE16(I915REG_HWSTAM, 0xfffe);
460 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
461 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
464 void i915_driver_irq_postinstall(drm_device_t * dev)
466 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
468 dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED;
469 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
470 dev_priv->swaps_pending = 0;
472 i915_enable_interrupt(dev);
473 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
476 void i915_driver_irq_uninstall(drm_device_t * dev)
478 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
484 I915_WRITE16(I915REG_HWSTAM, 0xffff);
485 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
486 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
488 temp = I915_READ16(I915REG_INT_IDENTITY_R);
489 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);