2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-firmware.h"
23 #include "ivtv-fileops.h"
24 #include "ivtv-queue.h"
25 #include "ivtv-udma.h"
27 #include "ivtv-ioctl.h"
28 #include "ivtv-mailbox.h"
32 #define DMA_MAGIC_COOKIE 0x000001fe
34 static void ivtv_dma_dec_start(struct ivtv_stream *s);
36 static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
44 static void ivtv_pio_work_handler(struct ivtv *itv)
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->sg_processing[i].size & 0x3ffff;
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
70 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
73 if (i == s->sg_processing_size)
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
79 void ivtv_irq_work_handler(struct work_struct *work)
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
89 ivtv_vbi_work_handler(itv);
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
95 /* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
99 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
104 u32 bytes_needed = 0;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
108 int idx = s->sg_pending_size;
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
121 /* determine offset, size and PTS for the various streams */
123 case IVTV_ENC_STREAM_TYPE_MPG:
129 case IVTV_ENC_STREAM_TYPE_YUV:
134 s->pending_pts = ((u64) data[5] << 32) | data[6];
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
140 s->pending_pts = read_dec(offset - 8) |
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
160 offset += IVTV_DECODER_OFFSET;
163 /* shouldn't happen */
167 /* if this is the start of the DMA then fill in the magic cookie */
168 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
175 s->pending_backup = read_enc(offset);
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
178 s->pending_offset = offset;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
203 s->buffers_stolen = rc;
205 /* got the buffers, now fill in sg_pending */
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
213 s->sg_pending[idx].dst = buf->dma_handle;
214 s->sg_pending[idx].src = offset;
215 s->sg_pending[idx].size = s->buf_size;
216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
219 s->q_predma.bytesused += buf->bytesused;
220 size -= buf->bytesused;
221 offset += s->buf_size;
223 /* Sync SG buffers */
224 ivtv_buf_sync_for_device(s, buf);
226 if (size == 0) { /* YUV */
227 /* process the UV section */
233 s->sg_pending_size = idx;
237 static void dma_post(struct ivtv_stream *s)
239 struct ivtv *itv = s->itv;
240 struct ivtv_buffer *buf = NULL;
246 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
247 s->name, s->dma_offset);
248 list_for_each(p, &s->q_dma.list) {
249 buf = list_entry(p, struct ivtv_buffer, list);
250 u32buf = (u32 *)buf->buf;
253 ivtv_buf_sync_for_cpu(s, buf);
255 if (x == 0 && ivtv_use_dma(s)) {
256 offset = s->dma_last_offset;
257 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
259 for (offset = 0; offset < 64; offset++) {
260 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
266 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
267 offset = s->dma_last_offset;
269 if (s->dma_last_offset != offset)
270 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
271 s->dma_last_offset = offset;
273 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
274 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
275 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
278 write_enc_sync(0, s->dma_offset);
281 buf->bytesused -= offset;
282 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
284 *u32buf = cpu_to_le32(s->dma_backup);
287 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
288 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
289 s->type == IVTV_ENC_STREAM_TYPE_VBI)
290 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
293 buf->bytesused += s->dma_last_offset;
294 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
295 list_for_each(p, &s->q_dma.list) {
296 buf = list_entry(p, struct ivtv_buffer, list);
298 /* Parse and Groom VBI Data */
299 s->q_dma.bytesused -= buf->bytesused;
300 ivtv_process_vbi_data(itv, buf, 0, s->type);
301 s->q_dma.bytesused += buf->bytesused;
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
308 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
313 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
315 struct ivtv *itv = s->itv;
316 struct ivtv_buffer *buf;
318 u32 y_size = itv->params.height * itv->params.width;
319 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
321 int bytes_written = 0;
322 unsigned long flags = 0;
325 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
326 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
327 list_for_each(p, &s->q_predma.list) {
328 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
335 s->sg_pending[idx].src = buf->dma_handle;
336 s->sg_pending[idx].dst = offset;
337 s->sg_pending[idx].size = buf->bytesused;
339 offset += buf->bytesused;
340 bytes_written += buf->bytesused;
342 /* Sync SG buffers */
343 ivtv_buf_sync_for_device(s, buf);
346 s->sg_pending_size = idx;
348 /* Sync Hardware SG List of buffers */
349 ivtv_stream_sync_for_device(s);
351 spin_lock_irqsave(&itv->dma_reg_lock, flags);
352 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
353 ivtv_dma_dec_start(s);
356 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
359 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
362 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
364 struct ivtv *itv = s->itv;
366 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
367 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
368 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
370 /* Sync Hardware SG List of buffers */
371 ivtv_stream_sync_for_device(s);
372 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
373 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
376 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
378 struct ivtv *itv = s->itv;
380 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
381 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
382 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
390 /* start the encoder DMA */
391 static void ivtv_dma_enc_start(struct ivtv_stream *s)
393 struct ivtv *itv = s->itv;
394 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
397 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
399 if (s->q_predma.bytesused)
400 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
403 s->sg_pending[s->sg_pending_size - 1].size += 256;
405 /* If this is an MPEG stream, and VBI data is also pending, then append the
406 VBI DMA to the MPEG DMA and transfer both sets of data at once.
408 VBI DMA is a second class citizen compared to MPEG and mixing them together
409 will confuse the firmware (the end of a VBI DMA is seen as the end of a
410 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
411 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
412 use. This way no conflicts occur. */
413 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
414 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
415 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
416 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
417 if (ivtv_use_dma(s_vbi))
418 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
419 for (i = 0; i < s_vbi->sg_pending_size; i++) {
420 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
422 s_vbi->dma_offset = s_vbi->pending_offset;
423 s_vbi->sg_pending_size = 0;
424 s_vbi->dma_xfer_cnt++;
425 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
426 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
430 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
431 s->sg_processing_size = s->sg_pending_size;
432 s->sg_pending_size = 0;
434 s->dma_offset = s->pending_offset;
435 s->dma_backup = s->pending_backup;
436 s->dma_pts = s->pending_pts;
438 if (ivtv_use_pio(s)) {
439 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
440 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
441 set_bit(IVTV_F_I_PIO, &itv->i_flags);
442 itv->cur_pio_stream = s->type;
445 itv->dma_retries = 0;
446 ivtv_dma_enc_start_xfer(s);
447 set_bit(IVTV_F_I_DMA, &itv->i_flags);
448 itv->cur_dma_stream = s->type;
449 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
450 add_timer(&itv->dma_timer);
454 static void ivtv_dma_dec_start(struct ivtv_stream *s)
456 struct ivtv *itv = s->itv;
458 if (s->q_predma.bytesused)
459 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
461 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
462 s->sg_processing_size = s->sg_pending_size;
463 s->sg_pending_size = 0;
466 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
467 itv->dma_retries = 0;
468 ivtv_dma_dec_start_xfer(s);
469 set_bit(IVTV_F_I_DMA, &itv->i_flags);
470 itv->cur_dma_stream = s->type;
471 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
472 add_timer(&itv->dma_timer);
475 static void ivtv_irq_dma_read(struct ivtv *itv)
477 struct ivtv_stream *s = NULL;
478 struct ivtv_buffer *buf;
479 int hw_stream_type = 0;
481 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
482 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
483 del_timer(&itv->dma_timer);
487 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
488 s = &itv->streams[itv->cur_dma_stream];
489 ivtv_stream_sync_for_cpu(s);
491 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
492 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
493 read_reg(IVTV_REG_DMASTATUS),
494 s->sg_processed, s->sg_processing_size, itv->dma_retries);
495 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
496 if (itv->dma_retries == 3) {
497 /* Too many retries, give up on this frame */
498 itv->dma_retries = 0;
499 s->sg_processed = s->sg_processing_size;
502 /* Retry, starting with the first xfer segment.
503 Just retrying the current segment is not sufficient. */
508 if (s->sg_processed < s->sg_processing_size) {
509 /* DMA next buffer */
510 ivtv_dma_dec_start_xfer(s);
513 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
515 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
517 /* For some reason must kick the firmware, like PIO mode,
518 I think this tells the firmware we are done and the size
519 of the xfer so it can calculate what we need next.
520 I think we can do this part ourselves but would have to
521 fully calculate xfer info ourselves and not use interrupts
523 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
526 /* Free last DMA call */
527 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
528 ivtv_buf_sync_for_cpu(s, buf);
529 ivtv_enqueue(s, buf, &s->q_free);
533 del_timer(&itv->dma_timer);
534 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
535 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
536 itv->cur_dma_stream = -1;
537 wake_up(&itv->dma_waitq);
540 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
542 u32 data[CX2341X_MBOX_MAX_DATA];
543 struct ivtv_stream *s;
545 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
546 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
547 if (itv->cur_dma_stream < 0) {
548 del_timer(&itv->dma_timer);
551 s = &itv->streams[itv->cur_dma_stream];
552 ivtv_stream_sync_for_cpu(s);
554 if (data[0] & 0x18) {
555 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
556 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
557 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
558 if (itv->dma_retries == 3) {
559 /* Too many retries, give up on this frame */
560 itv->dma_retries = 0;
561 s->sg_processed = s->sg_processing_size;
564 /* Retry, starting with the first xfer segment.
565 Just retrying the current segment is not sufficient. */
570 if (s->sg_processed < s->sg_processing_size) {
571 /* DMA next buffer */
572 ivtv_dma_enc_start_xfer(s);
575 del_timer(&itv->dma_timer);
576 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
577 itv->cur_dma_stream = -1;
579 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
580 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
583 s->sg_processing_size = 0;
585 wake_up(&itv->dma_waitq);
588 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
590 struct ivtv_stream *s;
592 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
593 itv->cur_pio_stream = -1;
596 s = &itv->streams[itv->cur_pio_stream];
597 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
598 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
599 itv->cur_pio_stream = -1;
601 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
602 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
603 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
604 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
605 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
606 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
607 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
608 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
609 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
612 wake_up(&itv->dma_waitq);
615 static void ivtv_irq_dma_err(struct ivtv *itv)
617 u32 data[CX2341X_MBOX_MAX_DATA];
619 del_timer(&itv->dma_timer);
620 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
621 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
622 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
623 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
624 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
625 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
626 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
629 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
630 ivtv_dma_dec_start(s);
632 ivtv_dma_enc_start(s);
635 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
636 ivtv_udma_start(itv);
639 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
640 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
641 itv->cur_dma_stream = -1;
642 wake_up(&itv->dma_waitq);
645 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
647 u32 data[CX2341X_MBOX_MAX_DATA];
648 struct ivtv_stream *s;
650 /* Get DMA destination and size arguments from card */
651 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
652 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
654 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
655 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
656 data[0], data[1], data[2]);
659 s = &itv->streams[ivtv_stream_map[data[0]]];
660 if (!stream_enc_dma_append(s, data)) {
661 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
665 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
667 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
668 u32 data[CX2341X_MBOX_MAX_DATA];
669 struct ivtv_stream *s;
671 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
672 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
674 /* If more than two VBI buffers are pending, then
675 clear the old ones and start with this new one.
676 This can happen during transition stages when MPEG capturing is
677 started, but the first interrupts haven't arrived yet. During
678 that period VBI requests can accumulate without being able to
679 DMA the data. Since at most four VBI DMA buffers are available,
680 we just drop the old requests when there are already three
682 if (s->sg_pending_size > 2) {
684 list_for_each(p, &s->q_predma.list) {
685 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
686 ivtv_buf_sync_for_cpu(s, buf);
688 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
689 s->sg_pending_size = 0;
691 /* if we can append the data, and the MPEG stream isn't capturing,
692 then start a DMA request for just the VBI data. */
693 if (!stream_enc_dma_append(s, data) &&
694 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
695 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
699 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
701 u32 data[CX2341X_MBOX_MAX_DATA];
702 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
704 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
705 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
706 !stream_enc_dma_append(s, data)) {
707 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
711 static void ivtv_irq_dec_data_req(struct ivtv *itv)
713 u32 data[CX2341X_MBOX_MAX_DATA];
714 struct ivtv_stream *s;
717 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
719 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
720 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
721 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
722 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
725 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
726 itv->dma_data_req_offset = data[1];
727 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
729 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
730 itv->dma_data_req_offset, itv->dma_data_req_size);
731 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
732 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
735 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
736 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
737 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
741 static void ivtv_irq_vsync(struct ivtv *itv)
743 /* The vsync interrupt is unusual in that it won't clear until
744 * the end of the first line for the current field, at which
745 * point it clears itself. This can result in repeated vsync
746 * interrupts, or a missed vsync. Read some of the registers
747 * to determine the line being displayed and ensure we handle
748 * one vsync per frame.
750 unsigned int frame = read_reg(0x28c0) & 1;
751 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
753 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
755 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
756 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
757 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
758 int next_dma_frame = last_dma_frame;
760 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
761 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
762 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
763 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
764 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
765 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
766 next_dma_frame = (next_dma_frame + 1) & 0x3;
767 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
768 itv->yuv_info.fields_lapsed = -1;
772 if (frame != (itv->lastVsyncFrame & 1)) {
773 struct ivtv_stream *s = ivtv_get_output_stream(itv);
775 itv->lastVsyncFrame += 1;
777 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
778 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
781 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
783 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
784 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
785 wake_up(&itv->event_waitq);
787 wake_up(&itv->vsync_waitq);
791 /* Send VBI to saa7127 */
793 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
794 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
797 /* Check if we need to update the yuv registers */
798 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
799 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
800 last_dma_frame = (last_dma_frame - 1) & 3;
802 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
803 itv->yuv_info.update_frame = last_dma_frame;
804 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
805 itv->yuv_info.yuv_forced_update = 0;
806 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
807 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
811 itv->yuv_info.fields_lapsed ++;
815 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
817 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
819 struct ivtv *itv = (struct ivtv *)dev_id;
825 spin_lock(&itv->dma_reg_lock);
826 /* get contents of irq status register */
827 stat = read_reg(IVTV_REG_IRQSTATUS);
829 combo = ~itv->irqmask & stat;
832 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
835 /* The vsync interrupt is unusual and clears itself. If we
836 * took too long, we may have missed it. Do some checks
838 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
839 /* vsync is enabled, see if we're in a new field */
840 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
841 /* New field, looks like we missed it */
842 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
848 /* No Vsync expected, wasn't for us */
849 spin_unlock(&itv->dma_reg_lock);
854 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
856 if (combo & ~0xff6d0400)
857 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
859 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
860 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
863 if (combo & IVTV_IRQ_DMA_READ) {
864 ivtv_irq_dma_read(itv);
867 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
868 ivtv_irq_enc_dma_complete(itv);
871 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
872 ivtv_irq_enc_pio_complete(itv);
875 if (combo & IVTV_IRQ_DMA_ERR) {
876 ivtv_irq_dma_err(itv);
879 if (combo & IVTV_IRQ_ENC_START_CAP) {
880 ivtv_irq_enc_start_cap(itv);
883 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
884 ivtv_irq_enc_vbi_cap(itv);
887 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
888 ivtv_irq_dec_vbi_reinsert(itv);
891 if (combo & IVTV_IRQ_ENC_EOS) {
892 IVTV_DEBUG_IRQ("ENC EOS\n");
893 set_bit(IVTV_F_I_EOS, &itv->i_flags);
894 wake_up(&itv->cap_w);
897 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
898 ivtv_irq_dec_data_req(itv);
901 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
902 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
906 if (combo & IVTV_IRQ_ENC_VIM_RST) {
907 IVTV_DEBUG_IRQ("VIM RST\n");
908 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
911 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
912 IVTV_DEBUG_INFO("Stereo mode changed\n");
915 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
917 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
918 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
919 struct ivtv_stream *s = &itv->streams[idx];
921 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
923 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
924 ivtv_dma_dec_start(s);
926 ivtv_dma_enc_start(s);
929 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
930 ivtv_udma_start(itv);
934 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
936 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
937 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
938 struct ivtv_stream *s = &itv->streams[idx];
940 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
942 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
943 ivtv_dma_enc_start(s);
948 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
949 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
951 spin_unlock(&itv->dma_reg_lock);
953 /* If we've just handled a 'forced' vsync, it's safest to say it
954 * wasn't ours. Another device may have triggered it at just
957 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
960 void ivtv_unfinished_dma(unsigned long arg)
962 struct ivtv *itv = (struct ivtv *)arg;
964 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
966 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
968 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
969 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
970 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
971 itv->cur_dma_stream = -1;
972 wake_up(&itv->dma_waitq);