]> err.no Git - linux-2.6/blob - drivers/media/video/ivtv/ivtv-irq.c
V4L/DVB (6087): ivtv: prevent changing VBI format while capture is in progress
[linux-2.6] / drivers / media / video / ivtv / ivtv-irq.c
1 /* interrupt handling
2     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5
6     This program is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with this program; if not, write to the Free Software
18     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include "ivtv-driver.h"
22 #include "ivtv-firmware.h"
23 #include "ivtv-fileops.h"
24 #include "ivtv-queue.h"
25 #include "ivtv-udma.h"
26 #include "ivtv-irq.h"
27 #include "ivtv-ioctl.h"
28 #include "ivtv-mailbox.h"
29 #include "ivtv-vbi.h"
30 #include "ivtv-yuv.h"
31
32 #define DMA_MAGIC_COOKIE 0x000001fe
33
34 static void ivtv_dma_dec_start(struct ivtv_stream *s);
35
36 static const int ivtv_stream_map[] = {
37         IVTV_ENC_STREAM_TYPE_MPG,
38         IVTV_ENC_STREAM_TYPE_YUV,
39         IVTV_ENC_STREAM_TYPE_PCM,
40         IVTV_ENC_STREAM_TYPE_VBI,
41 };
42
43
44 static void ivtv_pio_work_handler(struct ivtv *itv)
45 {
46         struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47         struct ivtv_buffer *buf;
48         struct list_head *p;
49         int i = 0;
50
51         IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
52         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53                         s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54                 itv->cur_pio_stream = -1;
55                 /* trigger PIO complete user interrupt */
56                 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57                 return;
58         }
59         IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
60         buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61         list_for_each(p, &s->q_dma.list) {
62                 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63                 u32 size = s->sg_processing[i].size & 0x3ffff;
64
65                 /* Copy the data from the card to the buffer */
66                 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67                         memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
68                 }
69                 else {
70                         memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
71                 }
72                 i++;
73                 if (i == s->sg_processing_size)
74                         break;
75         }
76         write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
77 }
78
79 void ivtv_irq_work_handler(struct work_struct *work)
80 {
81         struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82
83         DEFINE_WAIT(wait);
84
85         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86                 ivtv_pio_work_handler(itv);
87
88         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
89                 ivtv_vbi_work_handler(itv);
90
91         if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92                 ivtv_yuv_work_handler(itv);
93 }
94
95 /* Determine the required DMA size, setup enough buffers in the predma queue and
96    actually copy the data from the card to the buffers in case a PIO transfer is
97    required for this stream.
98  */
99 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
100 {
101         struct ivtv *itv = s->itv;
102         struct ivtv_buffer *buf;
103         struct list_head *p;
104         u32 bytes_needed = 0;
105         u32 offset, size;
106         u32 UVoffset = 0, UVsize = 0;
107         int skip_bufs = s->q_predma.buffers;
108         int idx = s->sg_pending_size;
109         int rc;
110
111         /* sanity checks */
112         if (s->v4l2dev == NULL) {
113                 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114                 return -1;
115         }
116         if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117                 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118                 return -1;
119         }
120
121         /* determine offset, size and PTS for the various streams */
122         switch (s->type) {
123                 case IVTV_ENC_STREAM_TYPE_MPG:
124                         offset = data[1];
125                         size = data[2];
126                         s->pending_pts = 0;
127                         break;
128
129                 case IVTV_ENC_STREAM_TYPE_YUV:
130                         offset = data[1];
131                         size = data[2];
132                         UVoffset = data[3];
133                         UVsize = data[4];
134                         s->pending_pts = ((u64) data[5] << 32) | data[6];
135                         break;
136
137                 case IVTV_ENC_STREAM_TYPE_PCM:
138                         offset = data[1] + 12;
139                         size = data[2] - 12;
140                         s->pending_pts = read_dec(offset - 8) |
141                                 ((u64)(read_dec(offset - 12)) << 32);
142                         if (itv->has_cx23415)
143                                 offset += IVTV_DECODER_OFFSET;
144                         break;
145
146                 case IVTV_ENC_STREAM_TYPE_VBI:
147                         size = itv->vbi.enc_size * itv->vbi.fpi;
148                         offset = read_enc(itv->vbi.enc_start - 4) + 12;
149                         if (offset == 12) {
150                                 IVTV_DEBUG_INFO("VBI offset == 0\n");
151                                 return -1;
152                         }
153                         s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
154                         break;
155
156                 case IVTV_DEC_STREAM_TYPE_VBI:
157                         size = read_dec(itv->vbi.dec_start + 4) + 8;
158                         offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
159                         s->pending_pts = 0;
160                         offset += IVTV_DECODER_OFFSET;
161                         break;
162                 default:
163                         /* shouldn't happen */
164                         return -1;
165         }
166
167         /* if this is the start of the DMA then fill in the magic cookie */
168         if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
169                 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170                     s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171                         s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172                         write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173                 }
174                 else {
175                         s->pending_backup = read_enc(offset);
176                         write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177                 }
178                 s->pending_offset = offset;
179         }
180
181         bytes_needed = size;
182         if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183                 /* The size for the Y samples needs to be rounded upwards to a
184                    multiple of the buf_size. The UV samples then start in the
185                    next buffer. */
186                 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187                 bytes_needed += UVsize;
188         }
189
190         IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191                 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193         rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194         if (rc < 0) { /* Insufficient buffers */
195                 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196                                 bytes_needed, s->name);
197                 return -1;
198         }
199         if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200                 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201                 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202         }
203         s->buffers_stolen = rc;
204
205         /* got the buffers, now fill in sg_pending */
206         buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207         memset(buf->buf, 0, 128);
208         list_for_each(p, &s->q_predma.list) {
209                 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210
211                 if (skip_bufs-- > 0)
212                         continue;
213                 s->sg_pending[idx].dst = buf->dma_handle;
214                 s->sg_pending[idx].src = offset;
215                 s->sg_pending[idx].size = s->buf_size;
216                 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217                 buf->dma_xfer_cnt = s->dma_xfer_cnt;
218
219                 s->q_predma.bytesused += buf->bytesused;
220                 size -= buf->bytesused;
221                 offset += s->buf_size;
222
223                 /* Sync SG buffers */
224                 ivtv_buf_sync_for_device(s, buf);
225
226                 if (size == 0) {        /* YUV */
227                         /* process the UV section */
228                         offset = UVoffset;
229                         size = UVsize;
230                 }
231                 idx++;
232         }
233         s->sg_pending_size = idx;
234         return 0;
235 }
236
237 static void dma_post(struct ivtv_stream *s)
238 {
239         struct ivtv *itv = s->itv;
240         struct ivtv_buffer *buf = NULL;
241         struct list_head *p;
242         u32 offset;
243         u32 *u32buf;
244         int x = 0;
245
246         IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
247                         s->name, s->dma_offset);
248         list_for_each(p, &s->q_dma.list) {
249                 buf = list_entry(p, struct ivtv_buffer, list);
250                 u32buf = (u32 *)buf->buf;
251
252                 /* Sync Buffer */
253                 ivtv_buf_sync_for_cpu(s, buf);
254
255                 if (x == 0 && ivtv_use_dma(s)) {
256                         offset = s->dma_last_offset;
257                         if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
258                         {
259                                 for (offset = 0; offset < 64; offset++) {
260                                         if (u32buf[offset] == DMA_MAGIC_COOKIE) {
261                                                 break;
262                                         }
263                                 }
264                                 offset *= 4;
265                                 if (offset == 256) {
266                                         IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
267                                         offset = s->dma_last_offset;
268                                 }
269                                 if (s->dma_last_offset != offset)
270                                         IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
271                                 s->dma_last_offset = offset;
272                         }
273                         if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
274                                                 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
275                                 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
276                         }
277                         else {
278                                 write_enc_sync(0, s->dma_offset);
279                         }
280                         if (offset) {
281                                 buf->bytesused -= offset;
282                                 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
283                         }
284                         *u32buf = cpu_to_le32(s->dma_backup);
285                 }
286                 x++;
287                 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
288                 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
289                     s->type == IVTV_ENC_STREAM_TYPE_VBI)
290                         buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
291         }
292         if (buf)
293                 buf->bytesused += s->dma_last_offset;
294         if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
295                 list_for_each(p, &s->q_dma.list) {
296                         buf = list_entry(p, struct ivtv_buffer, list);
297
298                         /* Parse and Groom VBI Data */
299                         s->q_dma.bytesused -= buf->bytesused;
300                         ivtv_process_vbi_data(itv, buf, 0, s->type);
301                         s->q_dma.bytesused += buf->bytesused;
302                 }
303                 if (s->id == -1) {
304                         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
305                         return;
306                 }
307         }
308         ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
309         if (s->id != -1)
310                 wake_up(&s->waitq);
311 }
312
313 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
314 {
315         struct ivtv *itv = s->itv;
316         struct ivtv_buffer *buf;
317         struct list_head *p;
318         u32 y_size = itv->params.height * itv->params.width;
319         u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
320         int y_done = 0;
321         int bytes_written = 0;
322         unsigned long flags = 0;
323         int idx = 0;
324
325         IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
326         buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
327         list_for_each(p, &s->q_predma.list) {
328                 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
329
330                 /* YUV UV Offset from Y Buffer */
331                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
332                         offset = uv_offset;
333                         y_done = 1;
334                 }
335                 s->sg_pending[idx].src = buf->dma_handle;
336                 s->sg_pending[idx].dst = offset;
337                 s->sg_pending[idx].size = buf->bytesused;
338
339                 offset += buf->bytesused;
340                 bytes_written += buf->bytesused;
341
342                 /* Sync SG buffers */
343                 ivtv_buf_sync_for_device(s, buf);
344                 idx++;
345         }
346         s->sg_pending_size = idx;
347
348         /* Sync Hardware SG List of buffers */
349         ivtv_stream_sync_for_device(s);
350         if (lock)
351                 spin_lock_irqsave(&itv->dma_reg_lock, flags);
352         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
353                 ivtv_dma_dec_start(s);
354         }
355         else {
356                 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
357         }
358         if (lock)
359                 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
360 }
361
362 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
363 {
364         struct ivtv *itv = s->itv;
365
366         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
367         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
368         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
369         s->sg_processed++;
370         /* Sync Hardware SG List of buffers */
371         ivtv_stream_sync_for_device(s);
372         write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
373         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
374 }
375
376 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
377 {
378         struct ivtv *itv = s->itv;
379
380         s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
381         s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
382         s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
383         s->sg_processed++;
384         /* Sync Hardware SG List of buffers */
385         ivtv_stream_sync_for_device(s);
386         write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
387         write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
388 }
389
390 /* start the encoder DMA */
391 static void ivtv_dma_enc_start(struct ivtv_stream *s)
392 {
393         struct ivtv *itv = s->itv;
394         struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
395         int i;
396
397         IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
398
399         if (s->q_predma.bytesused)
400                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
401
402         if (ivtv_use_dma(s))
403                 s->sg_pending[s->sg_pending_size - 1].size += 256;
404
405         /* If this is an MPEG stream, and VBI data is also pending, then append the
406            VBI DMA to the MPEG DMA and transfer both sets of data at once.
407
408            VBI DMA is a second class citizen compared to MPEG and mixing them together
409            will confuse the firmware (the end of a VBI DMA is seen as the end of a
410            MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
411            sure we only use the MPEG DMA to transfer the VBI DMA if both are in
412            use. This way no conflicts occur. */
413         clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
414         if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
415                         s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
416                 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
417                 if (ivtv_use_dma(s_vbi))
418                         s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
419                 for (i = 0; i < s_vbi->sg_pending_size; i++) {
420                         s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
421                 }
422                 s_vbi->dma_offset = s_vbi->pending_offset;
423                 s_vbi->sg_pending_size = 0;
424                 s_vbi->dma_xfer_cnt++;
425                 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
426                 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
427         }
428
429         s->dma_xfer_cnt++;
430         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
431         s->sg_processing_size = s->sg_pending_size;
432         s->sg_pending_size = 0;
433         s->sg_processed = 0;
434         s->dma_offset = s->pending_offset;
435         s->dma_backup = s->pending_backup;
436         s->dma_pts = s->pending_pts;
437
438         if (ivtv_use_pio(s)) {
439                 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
440                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
441                 set_bit(IVTV_F_I_PIO, &itv->i_flags);
442                 itv->cur_pio_stream = s->type;
443         }
444         else {
445                 itv->dma_retries = 0;
446                 ivtv_dma_enc_start_xfer(s);
447                 set_bit(IVTV_F_I_DMA, &itv->i_flags);
448                 itv->cur_dma_stream = s->type;
449                 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
450                 add_timer(&itv->dma_timer);
451         }
452 }
453
454 static void ivtv_dma_dec_start(struct ivtv_stream *s)
455 {
456         struct ivtv *itv = s->itv;
457
458         if (s->q_predma.bytesused)
459                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
460         s->dma_xfer_cnt++;
461         memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
462         s->sg_processing_size = s->sg_pending_size;
463         s->sg_pending_size = 0;
464         s->sg_processed = 0;
465
466         IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
467         itv->dma_retries = 0;
468         ivtv_dma_dec_start_xfer(s);
469         set_bit(IVTV_F_I_DMA, &itv->i_flags);
470         itv->cur_dma_stream = s->type;
471         itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
472         add_timer(&itv->dma_timer);
473 }
474
475 static void ivtv_irq_dma_read(struct ivtv *itv)
476 {
477         struct ivtv_stream *s = NULL;
478         struct ivtv_buffer *buf;
479         int hw_stream_type = 0;
480
481         IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
482         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
483                 del_timer(&itv->dma_timer);
484                 return;
485         }
486
487         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
488                 s = &itv->streams[itv->cur_dma_stream];
489                 ivtv_stream_sync_for_cpu(s);
490
491                 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
492                         IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
493                                         read_reg(IVTV_REG_DMASTATUS),
494                                         s->sg_processed, s->sg_processing_size, itv->dma_retries);
495                         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
496                         if (itv->dma_retries == 3) {
497                                 /* Too many retries, give up on this frame */
498                                 itv->dma_retries = 0;
499                                 s->sg_processed = s->sg_processing_size;
500                         }
501                         else {
502                                 /* Retry, starting with the first xfer segment.
503                                    Just retrying the current segment is not sufficient. */
504                                 s->sg_processed = 0;
505                                 itv->dma_retries++;
506                         }
507                 }
508                 if (s->sg_processed < s->sg_processing_size) {
509                         /* DMA next buffer */
510                         ivtv_dma_dec_start_xfer(s);
511                         return;
512                 }
513                 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
514                         hw_stream_type = 2;
515                 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
516
517                 /* For some reason must kick the firmware, like PIO mode,
518                    I think this tells the firmware we are done and the size
519                    of the xfer so it can calculate what we need next.
520                    I think we can do this part ourselves but would have to
521                    fully calculate xfer info ourselves and not use interrupts
522                  */
523                 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
524                                 hw_stream_type);
525
526                 /* Free last DMA call */
527                 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
528                         ivtv_buf_sync_for_cpu(s, buf);
529                         ivtv_enqueue(s, buf, &s->q_free);
530                 }
531                 wake_up(&s->waitq);
532         }
533         del_timer(&itv->dma_timer);
534         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
535         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
536         itv->cur_dma_stream = -1;
537         wake_up(&itv->dma_waitq);
538 }
539
540 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
541 {
542         u32 data[CX2341X_MBOX_MAX_DATA];
543         struct ivtv_stream *s;
544
545         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
546         IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
547         if (itv->cur_dma_stream < 0) {
548                 del_timer(&itv->dma_timer);
549                 return;
550         }
551         s = &itv->streams[itv->cur_dma_stream];
552         ivtv_stream_sync_for_cpu(s);
553
554         if (data[0] & 0x18) {
555                 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
556                         s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
557                 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
558                 if (itv->dma_retries == 3) {
559                         /* Too many retries, give up on this frame */
560                         itv->dma_retries = 0;
561                         s->sg_processed = s->sg_processing_size;
562                 }
563                 else {
564                         /* Retry, starting with the first xfer segment.
565                            Just retrying the current segment is not sufficient. */
566                         s->sg_processed = 0;
567                         itv->dma_retries++;
568                 }
569         }
570         if (s->sg_processed < s->sg_processing_size) {
571                 /* DMA next buffer */
572                 ivtv_dma_enc_start_xfer(s);
573                 return;
574         }
575         del_timer(&itv->dma_timer);
576         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
577         itv->cur_dma_stream = -1;
578         dma_post(s);
579         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
580                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
581                 dma_post(s);
582         }
583         s->sg_processing_size = 0;
584         s->sg_processed = 0;
585         wake_up(&itv->dma_waitq);
586 }
587
588 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
589 {
590         struct ivtv_stream *s;
591
592         if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
593                 itv->cur_pio_stream = -1;
594                 return;
595         }
596         s = &itv->streams[itv->cur_pio_stream];
597         IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
598         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
599         itv->cur_pio_stream = -1;
600         dma_post(s);
601         if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
602                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
603         else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
604                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
605         else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
606                 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
607         clear_bit(IVTV_F_I_PIO, &itv->i_flags);
608         if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
609                 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
610                 dma_post(s);
611         }
612         wake_up(&itv->dma_waitq);
613 }
614
615 static void ivtv_irq_dma_err(struct ivtv *itv)
616 {
617         u32 data[CX2341X_MBOX_MAX_DATA];
618
619         del_timer(&itv->dma_timer);
620         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
621         IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
622                                 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
623         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
624         if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
625             itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
626                 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
627
628                 /* retry */
629                 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
630                         ivtv_dma_dec_start(s);
631                 else
632                         ivtv_dma_enc_start(s);
633                 return;
634         }
635         if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
636                 ivtv_udma_start(itv);
637                 return;
638         }
639         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
640         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
641         itv->cur_dma_stream = -1;
642         wake_up(&itv->dma_waitq);
643 }
644
645 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
646 {
647         u32 data[CX2341X_MBOX_MAX_DATA];
648         struct ivtv_stream *s;
649
650         /* Get DMA destination and size arguments from card */
651         ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
652         IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
653
654         if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
655                 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
656                                 data[0], data[1], data[2]);
657                 return;
658         }
659         s = &itv->streams[ivtv_stream_map[data[0]]];
660         if (!stream_enc_dma_append(s, data)) {
661                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
662         }
663 }
664
665 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
666 {
667         struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
668         u32 data[CX2341X_MBOX_MAX_DATA];
669         struct ivtv_stream *s;
670
671         IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
672         s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
673
674         /* If more than two VBI buffers are pending, then
675            clear the old ones and start with this new one.
676            This can happen during transition stages when MPEG capturing is
677            started, but the first interrupts haven't arrived yet. During
678            that period VBI requests can accumulate without being able to
679            DMA the data. Since at most four VBI DMA buffers are available,
680            we just drop the old requests when there are already three
681            requests queued. */
682         if (s->sg_pending_size > 2) {
683                 struct list_head *p;
684                 list_for_each(p, &s->q_predma.list) {
685                         struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
686                         ivtv_buf_sync_for_cpu(s, buf);
687                 }
688                 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
689                 s->sg_pending_size = 0;
690         }
691         /* if we can append the data, and the MPEG stream isn't capturing,
692            then start a DMA request for just the VBI data. */
693         if (!stream_enc_dma_append(s, data) &&
694                         !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
695                 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
696         }
697 }
698
699 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
700 {
701         u32 data[CX2341X_MBOX_MAX_DATA];
702         struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
703
704         IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
705         if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
706                         !stream_enc_dma_append(s, data)) {
707                 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
708         }
709 }
710
711 static void ivtv_irq_dec_data_req(struct ivtv *itv)
712 {
713         u32 data[CX2341X_MBOX_MAX_DATA];
714         struct ivtv_stream *s;
715
716         /* YUV or MPG */
717         ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
718
719         if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
720                 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
721                 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
722                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
723         }
724         else {
725                 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
726                 itv->dma_data_req_offset = data[1];
727                 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
728         }
729         IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
730                        itv->dma_data_req_offset, itv->dma_data_req_size);
731         if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
732                 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733         }
734         else {
735                 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
736                 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
737                 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
738         }
739 }
740
741 static void ivtv_irq_vsync(struct ivtv *itv)
742 {
743         /* The vsync interrupt is unusual in that it won't clear until
744          * the end of the first line for the current field, at which
745          * point it clears itself. This can result in repeated vsync
746          * interrupts, or a missed vsync. Read some of the registers
747          * to determine the line being displayed and ensure we handle
748          * one vsync per frame.
749          */
750         unsigned int frame = read_reg(0x28c0) & 1;
751         int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
752
753         if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
754
755         if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
756                 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
757                         (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
758                 int next_dma_frame = last_dma_frame;
759
760                 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
761                         if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
762                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
763                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
764                                 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
765                                 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
766                                 next_dma_frame = (next_dma_frame + 1) & 0x3;
767                                 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
768                                 itv->yuv_info.fields_lapsed = -1;
769                         }
770                 }
771         }
772         if (frame != (itv->lastVsyncFrame & 1)) {
773                 struct ivtv_stream *s = ivtv_get_output_stream(itv);
774
775                 itv->lastVsyncFrame += 1;
776                 if (frame == 0) {
777                         clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
778                         clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
779                 }
780                 else {
781                         set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
782                 }
783                 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
784                         set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
785                         wake_up(&itv->event_waitq);
786                 }
787                 wake_up(&itv->vsync_waitq);
788                 if (s)
789                         wake_up(&s->waitq);
790
791                 /* Send VBI to saa7127 */
792                 if (frame) {
793                         set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
794                         set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
795                 }
796
797                 /* Check if we need to update the yuv registers */
798                 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
799                         if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
800                                 last_dma_frame = (last_dma_frame - 1) & 3;
801
802                         if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
803                                 itv->yuv_info.update_frame = last_dma_frame;
804                                 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
805                                 itv->yuv_info.yuv_forced_update = 0;
806                                 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
807                                 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
808                         }
809                 }
810
811                 itv->yuv_info.fields_lapsed ++;
812         }
813 }
814
815 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
816
817 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
818 {
819         struct ivtv *itv = (struct ivtv *)dev_id;
820         u32 combo;
821         u32 stat;
822         int i;
823         u8 vsync_force = 0;
824
825         spin_lock(&itv->dma_reg_lock);
826         /* get contents of irq status register */
827         stat = read_reg(IVTV_REG_IRQSTATUS);
828
829         combo = ~itv->irqmask & stat;
830
831         /* Clear out IRQ */
832         if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
833
834         if (0 == combo) {
835                 /* The vsync interrupt is unusual and clears itself. If we
836                  * took too long, we may have missed it. Do some checks
837                  */
838                 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
839                         /* vsync is enabled, see if we're in a new field */
840                         if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
841                                 /* New field, looks like we missed it */
842                                 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
843                                 vsync_force = 1;
844                         }
845                 }
846
847                 if (!vsync_force) {
848                         /* No Vsync expected, wasn't for us */
849                         spin_unlock(&itv->dma_reg_lock);
850                         return IRQ_NONE;
851                 }
852         }
853
854         /* Exclude interrupts noted below from the output, otherwise the log is flooded with
855            these messages */
856         if (combo & ~0xff6d0400)
857                 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
858
859         if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
860                 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
861         }
862
863         if (combo & IVTV_IRQ_DMA_READ) {
864                 ivtv_irq_dma_read(itv);
865         }
866
867         if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
868                 ivtv_irq_enc_dma_complete(itv);
869         }
870
871         if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
872                 ivtv_irq_enc_pio_complete(itv);
873         }
874
875         if (combo & IVTV_IRQ_DMA_ERR) {
876                 ivtv_irq_dma_err(itv);
877         }
878
879         if (combo & IVTV_IRQ_ENC_START_CAP) {
880                 ivtv_irq_enc_start_cap(itv);
881         }
882
883         if (combo & IVTV_IRQ_ENC_VBI_CAP) {
884                 ivtv_irq_enc_vbi_cap(itv);
885         }
886
887         if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
888                 ivtv_irq_dec_vbi_reinsert(itv);
889         }
890
891         if (combo & IVTV_IRQ_ENC_EOS) {
892                 IVTV_DEBUG_IRQ("ENC EOS\n");
893                 set_bit(IVTV_F_I_EOS, &itv->i_flags);
894                 wake_up(&itv->cap_w);
895         }
896
897         if (combo & IVTV_IRQ_DEC_DATA_REQ) {
898                 ivtv_irq_dec_data_req(itv);
899         }
900
901         /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
902         if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
903                 ivtv_irq_vsync(itv);
904         }
905
906         if (combo & IVTV_IRQ_ENC_VIM_RST) {
907                 IVTV_DEBUG_IRQ("VIM RST\n");
908                 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
909         }
910
911         if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
912                 IVTV_DEBUG_INFO("Stereo mode changed\n");
913         }
914
915         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
916                 itv->irq_rr_idx++;
917                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
918                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
919                         struct ivtv_stream *s = &itv->streams[idx];
920
921                         if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
922                                 continue;
923                         if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
924                                 ivtv_dma_dec_start(s);
925                         else
926                                 ivtv_dma_enc_start(s);
927                         break;
928                 }
929                 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
930                         ivtv_udma_start(itv);
931                 }
932         }
933
934         if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
935                 itv->irq_rr_idx++;
936                 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
937                         int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
938                         struct ivtv_stream *s = &itv->streams[idx];
939
940                         if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
941                                 continue;
942                         if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
943                                 ivtv_dma_enc_start(s);
944                         break;
945                 }
946         }
947
948         if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
949                 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
950
951         spin_unlock(&itv->dma_reg_lock);
952
953         /* If we've just handled a 'forced' vsync, it's safest to say it
954          * wasn't ours. Another device may have triggered it at just
955          * the right time.
956          */
957         return vsync_force ? IRQ_NONE : IRQ_HANDLED;
958 }
959
960 void ivtv_unfinished_dma(unsigned long arg)
961 {
962         struct ivtv *itv = (struct ivtv *)arg;
963
964         if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
965                 return;
966         IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
967
968         write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
969         clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
970         clear_bit(IVTV_F_I_DMA, &itv->i_flags);
971         itv->cur_dma_stream = -1;
972         wake_up(&itv->dma_waitq);
973 }