2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-firmware.h"
23 #include "ivtv-fileops.h"
24 #include "ivtv-queue.h"
25 #include "ivtv-udma.h"
27 #include "ivtv-ioctl.h"
28 #include "ivtv-mailbox.h"
32 #define DMA_MAGIC_COOKIE 0x000001fe
34 static void ivtv_dma_dec_start(struct ivtv_stream *s);
36 static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
44 static void ivtv_pio_work_handler(struct ivtv *itv)
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff;
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size);
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size);
72 if (s->PIOarray[i].size & 0x80000000)
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
79 void ivtv_irq_work_handler(struct work_struct *work)
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
89 ivtv_vbi_work_handler(itv);
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
95 /* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
99 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
104 u32 bytes_needed = 0;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->SG_length;
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
121 /* determine offset, size and PTS for the various streams */
123 case IVTV_ENC_STREAM_TYPE_MPG:
129 case IVTV_ENC_STREAM_TYPE_YUV:
134 s->dma_pts = ((u64) data[5] << 32) | data[6];
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
140 s->dma_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
153 s->dma_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
160 offset += IVTV_DECODER_OFFSET;
163 /* shouldn't happen */
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->SG_length == 0) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->dma_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
175 s->dma_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
178 s->dma_offset = offset;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
203 s->buffers_stolen = rc;
205 /* got the buffers, now fill in SGarray (DMA) */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
214 s->SGarray[idx].src = cpu_to_le32(offset);
215 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
219 s->q_predma.bytesused += buf->bytesused;
220 size -= buf->bytesused;
221 offset += s->buf_size;
223 /* Sync SG buffers */
224 ivtv_buf_sync_for_device(s, buf);
226 if (size == 0) { /* YUV */
227 /* process the UV section */
237 static void dma_post(struct ivtv_stream *s)
239 struct ivtv *itv = s->itv;
240 struct ivtv_buffer *buf = NULL;
246 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
247 s->name, s->dma_offset);
248 list_for_each(p, &s->q_dma.list) {
249 buf = list_entry(p, struct ivtv_buffer, list);
250 u32buf = (u32 *)buf->buf;
253 ivtv_buf_sync_for_cpu(s, buf);
256 offset = s->dma_last_offset;
257 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
259 for (offset = 0; offset < 64; offset++) {
260 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
266 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
267 offset = s->dma_last_offset;
269 if (s->dma_last_offset != offset)
270 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
271 s->dma_last_offset = offset;
273 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
274 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
275 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
278 write_enc_sync(0, s->dma_offset);
281 buf->bytesused -= offset;
282 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
284 *u32buf = cpu_to_le32(s->dma_backup);
287 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
288 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
289 s->type == IVTV_ENC_STREAM_TYPE_VBI)
290 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
293 buf->bytesused += s->dma_last_offset;
294 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
295 list_for_each(p, &s->q_dma.list) {
296 buf = list_entry(p, struct ivtv_buffer, list);
298 /* Parse and Groom VBI Data */
299 s->q_dma.bytesused -= buf->bytesused;
300 ivtv_process_vbi_data(itv, buf, 0, s->type);
301 s->q_dma.bytesused += buf->bytesused;
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
308 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
313 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
315 struct ivtv *itv = s->itv;
316 struct ivtv_buffer *buf;
318 u32 y_size = itv->params.height * itv->params.width;
319 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
321 int bytes_written = 0;
322 unsigned long flags = 0;
325 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
326 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
327 list_for_each(p, &s->q_predma.list) {
328 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
335 s->SGarray[idx].src = cpu_to_le32(buf->dma_handle);
336 s->SGarray[idx].dst = cpu_to_le32(offset);
337 s->SGarray[idx].size = cpu_to_le32(buf->bytesused);
339 offset += buf->bytesused;
340 bytes_written += buf->bytesused;
342 /* Sync SG buffers */
343 ivtv_buf_sync_for_device(s, buf);
348 /* Mark last buffer size for Interrupt flag */
349 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
351 /* Sync Hardware SG List of buffers */
352 ivtv_stream_sync_for_device(s);
354 spin_lock_irqsave(&itv->dma_reg_lock, flags);
355 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
356 ivtv_dma_dec_start(s);
359 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
362 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
365 /* start the encoder DMA */
366 static void ivtv_dma_enc_start(struct ivtv_stream *s)
368 struct ivtv *itv = s->itv;
369 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
372 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
374 if (s->q_predma.bytesused)
375 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
378 s->SGarray[s->SG_length - 1].size =
379 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
381 /* If this is an MPEG stream, and VBI data is also pending, then append the
382 VBI DMA to the MPEG DMA and transfer both sets of data at once.
384 VBI DMA is a second class citizen compared to MPEG and mixing them together
385 will confuse the firmware (the end of a VBI DMA is seen as the end of a
386 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
387 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
388 use. This way no conflicts occur. */
389 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
390 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
391 s->SG_length + s_vbi->SG_length <= s->buffers) {
392 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
393 if (ivtv_use_dma(s_vbi))
394 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
395 for (i = 0; i < s_vbi->SG_length; i++) {
396 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
398 itv->vbi.dma_offset = s_vbi->dma_offset;
399 s_vbi->SG_length = 0;
400 s_vbi->dma_xfer_cnt++;
401 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
402 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
405 /* Mark last buffer size for Interrupt flag */
406 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
409 if (s->type == IVTV_ENC_STREAM_TYPE_VBI)
410 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
412 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
414 if (ivtv_use_pio(s)) {
415 for (i = 0; i < s->SG_length; i++) {
416 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
417 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
419 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
420 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
421 set_bit(IVTV_F_I_PIO, &itv->i_flags);
422 itv->cur_pio_stream = s->type;
425 /* Sync Hardware SG List of buffers */
426 ivtv_stream_sync_for_device(s);
427 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
428 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
429 set_bit(IVTV_F_I_DMA, &itv->i_flags);
430 itv->cur_dma_stream = s->type;
431 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
432 add_timer(&itv->dma_timer);
436 static void ivtv_dma_dec_start(struct ivtv_stream *s)
438 struct ivtv *itv = s->itv;
440 if (s->q_predma.bytesused)
441 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
442 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
443 /* put SG Handle into register 0x0c */
444 write_reg(s->SG_handle, IVTV_REG_DECDMAADDR);
445 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
446 set_bit(IVTV_F_I_DMA, &itv->i_flags);
447 itv->cur_dma_stream = s->type;
448 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
449 add_timer(&itv->dma_timer);
452 static void ivtv_irq_dma_read(struct ivtv *itv)
454 struct ivtv_stream *s = NULL;
455 struct ivtv_buffer *buf;
458 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
459 del_timer(&itv->dma_timer);
460 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
461 IVTV_DEBUG_WARN("DEC DMA ERROR %x\n", read_reg(IVTV_REG_DMASTATUS));
462 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
464 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
465 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
466 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
470 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
473 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
475 ivtv_stream_sync_for_cpu(s);
477 /* For some reason must kick the firmware, like PIO mode,
478 I think this tells the firmware we are done and the size
479 of the xfer so it can calculate what we need next.
480 I think we can do this part ourselves but would have to
481 fully calculate xfer info ourselves and not use interrupts
483 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
486 /* Free last DMA call */
487 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
488 ivtv_buf_sync_for_cpu(s, buf);
489 ivtv_enqueue(s, buf, &s->q_free);
493 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
494 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
495 itv->cur_dma_stream = -1;
496 wake_up(&itv->dma_waitq);
499 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
501 u32 data[CX2341X_MBOX_MAX_DATA];
502 struct ivtv_stream *s;
504 del_timer(&itv->dma_timer);
505 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
506 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d\n", data[0], data[1]);
507 if (test_and_clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags))
509 else if (data[1] > 2)
511 s = &itv->streams[ivtv_stream_map[data[1]]];
512 if (data[0] & 0x18) {
513 IVTV_DEBUG_WARN("ENC DMA ERROR %x\n", data[0]);
514 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
515 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[1]);
518 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
519 itv->cur_dma_stream = -1;
521 ivtv_stream_sync_for_cpu(s);
522 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
525 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
527 s->dma_offset = itv->vbi.dma_offset;
531 wake_up(&itv->dma_waitq);
534 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
536 struct ivtv_stream *s;
538 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
539 itv->cur_pio_stream = -1;
542 s = &itv->streams[itv->cur_pio_stream];
543 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
545 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
546 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
547 itv->cur_pio_stream = -1;
549 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
550 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
551 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
552 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
553 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
554 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
555 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
556 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
559 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
561 s->dma_offset = itv->vbi.dma_offset;
565 wake_up(&itv->dma_waitq);
568 static void ivtv_irq_dma_err(struct ivtv *itv)
570 u32 data[CX2341X_MBOX_MAX_DATA];
572 del_timer(&itv->dma_timer);
573 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
574 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
575 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
576 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
577 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
578 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
581 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
582 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
583 ivtv_dma_dec_start(s);
585 ivtv_dma_enc_start(s);
588 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
589 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590 itv->cur_dma_stream = -1;
591 wake_up(&itv->dma_waitq);
594 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
596 u32 data[CX2341X_MBOX_MAX_DATA];
597 struct ivtv_stream *s;
599 /* Get DMA destination and size arguments from card */
600 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
601 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
603 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
604 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
605 data[0], data[1], data[2]);
608 s = &itv->streams[ivtv_stream_map[data[0]]];
609 if (!stream_enc_dma_append(s, data)) {
610 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
614 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
616 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
617 u32 data[CX2341X_MBOX_MAX_DATA];
618 struct ivtv_stream *s;
620 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
621 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
623 /* If more than two VBI buffers are pending, then
624 clear the old ones and start with this new one.
625 This can happen during transition stages when MPEG capturing is
626 started, but the first interrupts haven't arrived yet. During
627 that period VBI requests can accumulate without being able to
628 DMA the data. Since at most four VBI DMA buffers are available,
629 we just drop the old requests when there are already three
631 if (s->SG_length > 2) {
633 list_for_each(p, &s->q_predma.list) {
634 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
635 ivtv_buf_sync_for_cpu(s, buf);
637 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
640 /* if we can append the data, and the MPEG stream isn't capturing,
641 then start a DMA request for just the VBI data. */
642 if (!stream_enc_dma_append(s, data) &&
643 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
644 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
648 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
650 u32 data[CX2341X_MBOX_MAX_DATA];
651 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
653 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
654 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
655 !stream_enc_dma_append(s, data)) {
656 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
660 static void ivtv_irq_dec_data_req(struct ivtv *itv)
662 u32 data[CX2341X_MBOX_MAX_DATA];
663 struct ivtv_stream *s;
666 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
668 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
669 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
670 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
671 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
674 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
675 itv->dma_data_req_offset = data[1];
676 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
678 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
679 itv->dma_data_req_offset, itv->dma_data_req_size);
680 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
681 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
684 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
685 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
686 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
690 static void ivtv_irq_vsync(struct ivtv *itv)
692 /* The vsync interrupt is unusual in that it won't clear until
693 * the end of the first line for the current field, at which
694 * point it clears itself. This can result in repeated vsync
695 * interrupts, or a missed vsync. Read some of the registers
696 * to determine the line being displayed and ensure we handle
697 * one vsync per frame.
699 unsigned int frame = read_reg(0x28c0) & 1;
700 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
702 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
704 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
705 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
706 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
707 int next_dma_frame = last_dma_frame;
709 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
710 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
711 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
712 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
713 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
714 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
715 next_dma_frame = (next_dma_frame + 1) & 0x3;
716 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
717 itv->yuv_info.fields_lapsed = -1;
721 if (frame != (itv->lastVsyncFrame & 1)) {
722 struct ivtv_stream *s = ivtv_get_output_stream(itv);
724 itv->lastVsyncFrame += 1;
726 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
727 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
730 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
732 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
733 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
734 wake_up(&itv->event_waitq);
736 wake_up(&itv->vsync_waitq);
740 /* Send VBI to saa7127 */
742 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
743 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
746 /* Check if we need to update the yuv registers */
747 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
748 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
749 last_dma_frame = (last_dma_frame - 1) & 3;
751 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
752 itv->yuv_info.update_frame = last_dma_frame;
753 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
754 itv->yuv_info.yuv_forced_update = 0;
755 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
756 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
760 itv->yuv_info.fields_lapsed ++;
764 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
766 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
768 struct ivtv *itv = (struct ivtv *)dev_id;
774 spin_lock(&itv->dma_reg_lock);
775 /* get contents of irq status register */
776 stat = read_reg(IVTV_REG_IRQSTATUS);
778 combo = ~itv->irqmask & stat;
781 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
784 /* The vsync interrupt is unusual and clears itself. If we
785 * took too long, we may have missed it. Do some checks
787 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
788 /* vsync is enabled, see if we're in a new field */
789 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
790 /* New field, looks like we missed it */
791 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
797 /* No Vsync expected, wasn't for us */
798 spin_unlock(&itv->dma_reg_lock);
803 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
805 if (combo & ~0xff6d0400)
806 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
808 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
809 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
812 if (combo & IVTV_IRQ_DMA_READ) {
813 ivtv_irq_dma_read(itv);
816 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
817 ivtv_irq_enc_dma_complete(itv);
820 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
821 ivtv_irq_enc_pio_complete(itv);
824 if (combo & IVTV_IRQ_DMA_ERR) {
825 ivtv_irq_dma_err(itv);
828 if (combo & IVTV_IRQ_ENC_START_CAP) {
829 ivtv_irq_enc_start_cap(itv);
832 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
833 ivtv_irq_enc_vbi_cap(itv);
836 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
837 ivtv_irq_dec_vbi_reinsert(itv);
840 if (combo & IVTV_IRQ_ENC_EOS) {
841 IVTV_DEBUG_IRQ("ENC EOS\n");
842 set_bit(IVTV_F_I_EOS, &itv->i_flags);
843 wake_up(&itv->cap_w);
846 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
847 ivtv_irq_dec_data_req(itv);
850 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
851 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
855 if (combo & IVTV_IRQ_ENC_VIM_RST) {
856 IVTV_DEBUG_IRQ("VIM RST\n");
857 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
860 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
861 IVTV_DEBUG_INFO("Stereo mode changed\n");
864 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
866 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
867 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
868 struct ivtv_stream *s = &itv->streams[idx];
870 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
872 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
873 ivtv_dma_dec_start(s);
875 ivtv_dma_enc_start(s);
878 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
879 ivtv_udma_start(itv);
883 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
885 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
886 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
887 struct ivtv_stream *s = &itv->streams[idx];
889 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
891 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
892 ivtv_dma_enc_start(s);
897 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
898 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
900 spin_unlock(&itv->dma_reg_lock);
902 /* If we've just handled a 'forced' vsync, it's safest to say it
903 * wasn't ours. Another device may have triggered it at just
906 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
909 void ivtv_unfinished_dma(unsigned long arg)
911 struct ivtv *itv = (struct ivtv *)arg;
913 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
915 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
917 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
918 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
919 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
920 itv->cur_dma_stream = -1;
921 wake_up(&itv->dma_waitq);