2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
25 #include "ivtv-mailbox.h"
29 #define DMA_MAGIC_COOKIE 0x000001fe
31 static void ivtv_dma_dec_start(struct ivtv_stream *s);
33 static const int ivtv_stream_map[] = {
34 IVTV_ENC_STREAM_TYPE_MPG,
35 IVTV_ENC_STREAM_TYPE_YUV,
36 IVTV_ENC_STREAM_TYPE_PCM,
37 IVTV_ENC_STREAM_TYPE_VBI,
41 static void ivtv_pio_work_handler(struct ivtv *itv)
43 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
44 struct ivtv_buffer *buf;
48 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
49 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
50 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
51 itv->cur_pio_stream = -1;
52 /* trigger PIO complete user interrupt */
53 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
56 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
57 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
58 list_for_each(p, &s->q_dma.list) {
59 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
60 u32 size = s->sg_processing[i].size & 0x3ffff;
62 /* Copy the data from the card to the buffer */
63 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
64 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
67 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
70 if (i == s->sg_processing_size)
73 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
76 void ivtv_irq_work_handler(struct work_struct *work)
78 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
83 ivtv_pio_work_handler(itv);
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
86 ivtv_vbi_work_handler(itv);
88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
89 ivtv_yuv_work_handler(itv);
92 /* Determine the required DMA size, setup enough buffers in the predma queue and
93 actually copy the data from the card to the buffers in case a PIO transfer is
94 required for this stream.
96 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
98 struct ivtv *itv = s->itv;
99 struct ivtv_buffer *buf;
101 u32 bytes_needed = 0;
103 u32 UVoffset = 0, UVsize = 0;
104 int skip_bufs = s->q_predma.buffers;
105 int idx = s->sg_pending_size;
109 if (s->v4l2dev == NULL) {
110 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
113 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
114 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 /* determine offset, size and PTS for the various streams */
120 case IVTV_ENC_STREAM_TYPE_MPG:
126 case IVTV_ENC_STREAM_TYPE_YUV:
131 s->pending_pts = ((u64) data[5] << 32) | data[6];
134 case IVTV_ENC_STREAM_TYPE_PCM:
135 offset = data[1] + 12;
137 s->pending_pts = read_dec(offset - 8) |
138 ((u64)(read_dec(offset - 12)) << 32);
139 if (itv->has_cx23415)
140 offset += IVTV_DECODER_OFFSET;
143 case IVTV_ENC_STREAM_TYPE_VBI:
144 size = itv->vbi.enc_size * itv->vbi.fpi;
145 offset = read_enc(itv->vbi.enc_start - 4) + 12;
147 IVTV_DEBUG_INFO("VBI offset == 0\n");
150 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
153 case IVTV_DEC_STREAM_TYPE_VBI:
154 size = read_dec(itv->vbi.dec_start + 4) + 8;
155 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
157 offset += IVTV_DECODER_OFFSET;
160 /* shouldn't happen */
164 /* if this is the start of the DMA then fill in the magic cookie */
165 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
166 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
167 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
168 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
169 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
172 s->pending_backup = read_enc(offset);
173 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
175 s->pending_offset = offset;
179 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
180 /* The size for the Y samples needs to be rounded upwards to a
181 multiple of the buf_size. The UV samples then start in the
183 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
184 bytes_needed += UVsize;
187 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
188 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
190 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
191 if (rc < 0) { /* Insufficient buffers */
192 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
193 bytes_needed, s->name);
196 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
197 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
198 IVTV_WARN("Cause: the application is not reading fast enough.\n");
200 s->buffers_stolen = rc;
202 /* got the buffers, now fill in sg_pending */
203 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
204 memset(buf->buf, 0, 128);
205 list_for_each(p, &s->q_predma.list) {
206 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210 s->sg_pending[idx].dst = buf->dma_handle;
211 s->sg_pending[idx].src = offset;
212 s->sg_pending[idx].size = s->buf_size;
213 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
214 buf->dma_xfer_cnt = s->dma_xfer_cnt;
216 s->q_predma.bytesused += buf->bytesused;
217 size -= buf->bytesused;
218 offset += s->buf_size;
220 /* Sync SG buffers */
221 ivtv_buf_sync_for_device(s, buf);
223 if (size == 0) { /* YUV */
224 /* process the UV section */
230 s->sg_pending_size = idx;
234 static void dma_post(struct ivtv_stream *s)
236 struct ivtv *itv = s->itv;
237 struct ivtv_buffer *buf = NULL;
243 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
244 s->name, s->dma_offset);
245 list_for_each(p, &s->q_dma.list) {
246 buf = list_entry(p, struct ivtv_buffer, list);
247 u32buf = (u32 *)buf->buf;
250 ivtv_buf_sync_for_cpu(s, buf);
252 if (x == 0 && ivtv_use_dma(s)) {
253 offset = s->dma_last_offset;
254 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
256 for (offset = 0; offset < 64; offset++) {
257 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
263 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
264 offset = s->dma_last_offset;
266 if (s->dma_last_offset != offset)
267 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
268 s->dma_last_offset = offset;
270 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
271 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
272 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
275 write_enc_sync(0, s->dma_offset);
278 buf->bytesused -= offset;
279 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
281 *u32buf = cpu_to_le32(s->dma_backup);
284 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
285 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
286 s->type == IVTV_ENC_STREAM_TYPE_VBI)
287 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
290 buf->bytesused += s->dma_last_offset;
291 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
292 list_for_each(p, &s->q_dma.list) {
293 buf = list_entry(p, struct ivtv_buffer, list);
295 /* Parse and Groom VBI Data */
296 s->q_dma.bytesused -= buf->bytesused;
297 ivtv_process_vbi_data(itv, buf, 0, s->type);
298 s->q_dma.bytesused += buf->bytesused;
301 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
305 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
310 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
312 struct ivtv *itv = s->itv;
313 struct ivtv_buffer *buf;
315 u32 y_size = itv->params.height * itv->params.width;
316 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
318 int bytes_written = 0;
319 unsigned long flags = 0;
322 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
323 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
324 list_for_each(p, &s->q_predma.list) {
325 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
327 /* YUV UV Offset from Y Buffer */
328 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
332 s->sg_pending[idx].src = buf->dma_handle;
333 s->sg_pending[idx].dst = offset;
334 s->sg_pending[idx].size = buf->bytesused;
336 offset += buf->bytesused;
337 bytes_written += buf->bytesused;
339 /* Sync SG buffers */
340 ivtv_buf_sync_for_device(s, buf);
343 s->sg_pending_size = idx;
345 /* Sync Hardware SG List of buffers */
346 ivtv_stream_sync_for_device(s);
348 spin_lock_irqsave(&itv->dma_reg_lock, flags);
349 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
350 ivtv_dma_dec_start(s);
353 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
356 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
359 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
361 struct ivtv *itv = s->itv;
363 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
364 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
365 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
367 /* Sync Hardware SG List of buffers */
368 ivtv_stream_sync_for_device(s);
369 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
370 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
373 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
375 struct ivtv *itv = s->itv;
377 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
378 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
379 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
381 /* Sync Hardware SG List of buffers */
382 ivtv_stream_sync_for_device(s);
383 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
384 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
387 /* start the encoder DMA */
388 static void ivtv_dma_enc_start(struct ivtv_stream *s)
390 struct ivtv *itv = s->itv;
391 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
394 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
396 if (s->q_predma.bytesused)
397 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
400 s->sg_pending[s->sg_pending_size - 1].size += 256;
402 /* If this is an MPEG stream, and VBI data is also pending, then append the
403 VBI DMA to the MPEG DMA and transfer both sets of data at once.
405 VBI DMA is a second class citizen compared to MPEG and mixing them together
406 will confuse the firmware (the end of a VBI DMA is seen as the end of a
407 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
408 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
409 use. This way no conflicts occur. */
410 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
411 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
412 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
413 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
414 if (ivtv_use_dma(s_vbi))
415 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
416 for (i = 0; i < s_vbi->sg_pending_size; i++) {
417 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
419 s_vbi->dma_offset = s_vbi->pending_offset;
420 s_vbi->sg_pending_size = 0;
421 s_vbi->dma_xfer_cnt++;
422 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
423 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
427 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
428 s->sg_processing_size = s->sg_pending_size;
429 s->sg_pending_size = 0;
431 s->dma_offset = s->pending_offset;
432 s->dma_backup = s->pending_backup;
433 s->dma_pts = s->pending_pts;
435 if (ivtv_use_pio(s)) {
436 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
437 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
438 set_bit(IVTV_F_I_PIO, &itv->i_flags);
439 itv->cur_pio_stream = s->type;
442 itv->dma_retries = 0;
443 ivtv_dma_enc_start_xfer(s);
444 set_bit(IVTV_F_I_DMA, &itv->i_flags);
445 itv->cur_dma_stream = s->type;
446 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
447 add_timer(&itv->dma_timer);
451 static void ivtv_dma_dec_start(struct ivtv_stream *s)
453 struct ivtv *itv = s->itv;
455 if (s->q_predma.bytesused)
456 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
458 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
459 s->sg_processing_size = s->sg_pending_size;
460 s->sg_pending_size = 0;
463 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
464 itv->dma_retries = 0;
465 ivtv_dma_dec_start_xfer(s);
466 set_bit(IVTV_F_I_DMA, &itv->i_flags);
467 itv->cur_dma_stream = s->type;
468 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
469 add_timer(&itv->dma_timer);
472 static void ivtv_irq_dma_read(struct ivtv *itv)
474 struct ivtv_stream *s = NULL;
475 struct ivtv_buffer *buf;
476 int hw_stream_type = 0;
478 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
479 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
480 del_timer(&itv->dma_timer);
484 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
485 s = &itv->streams[itv->cur_dma_stream];
486 ivtv_stream_sync_for_cpu(s);
488 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
489 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
490 read_reg(IVTV_REG_DMASTATUS),
491 s->sg_processed, s->sg_processing_size, itv->dma_retries);
492 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
493 if (itv->dma_retries == 3) {
494 /* Too many retries, give up on this frame */
495 itv->dma_retries = 0;
496 s->sg_processed = s->sg_processing_size;
499 /* Retry, starting with the first xfer segment.
500 Just retrying the current segment is not sufficient. */
505 if (s->sg_processed < s->sg_processing_size) {
506 /* DMA next buffer */
507 ivtv_dma_dec_start_xfer(s);
510 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
514 /* For some reason must kick the firmware, like PIO mode,
515 I think this tells the firmware we are done and the size
516 of the xfer so it can calculate what we need next.
517 I think we can do this part ourselves but would have to
518 fully calculate xfer info ourselves and not use interrupts
520 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
523 /* Free last DMA call */
524 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
525 ivtv_buf_sync_for_cpu(s, buf);
526 ivtv_enqueue(s, buf, &s->q_free);
530 del_timer(&itv->dma_timer);
531 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
532 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
533 itv->cur_dma_stream = -1;
534 wake_up(&itv->dma_waitq);
537 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
539 u32 data[CX2341X_MBOX_MAX_DATA];
540 struct ivtv_stream *s;
542 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
543 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
544 if (itv->cur_dma_stream < 0) {
545 del_timer(&itv->dma_timer);
548 s = &itv->streams[itv->cur_dma_stream];
549 ivtv_stream_sync_for_cpu(s);
551 if (data[0] & 0x18) {
552 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
553 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
554 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
555 if (itv->dma_retries == 3) {
556 /* Too many retries, give up on this frame */
557 itv->dma_retries = 0;
558 s->sg_processed = s->sg_processing_size;
561 /* Retry, starting with the first xfer segment.
562 Just retrying the current segment is not sufficient. */
567 if (s->sg_processed < s->sg_processing_size) {
568 /* DMA next buffer */
569 ivtv_dma_enc_start_xfer(s);
572 del_timer(&itv->dma_timer);
573 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
574 itv->cur_dma_stream = -1;
576 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
577 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
580 s->sg_processing_size = 0;
582 wake_up(&itv->dma_waitq);
585 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
587 struct ivtv_stream *s;
589 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
590 itv->cur_pio_stream = -1;
593 s = &itv->streams[itv->cur_pio_stream];
594 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
595 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
596 itv->cur_pio_stream = -1;
598 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
599 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
600 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
601 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
602 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
603 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
604 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
605 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
606 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
609 wake_up(&itv->dma_waitq);
612 static void ivtv_irq_dma_err(struct ivtv *itv)
614 u32 data[CX2341X_MBOX_MAX_DATA];
616 del_timer(&itv->dma_timer);
617 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
618 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
619 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
620 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
621 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
622 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
623 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
626 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
627 ivtv_dma_dec_start(s);
629 ivtv_dma_enc_start(s);
632 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
633 ivtv_udma_start(itv);
636 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
637 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
638 itv->cur_dma_stream = -1;
639 wake_up(&itv->dma_waitq);
642 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
644 u32 data[CX2341X_MBOX_MAX_DATA];
645 struct ivtv_stream *s;
647 /* Get DMA destination and size arguments from card */
648 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
649 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
651 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
652 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
653 data[0], data[1], data[2]);
656 s = &itv->streams[ivtv_stream_map[data[0]]];
657 if (!stream_enc_dma_append(s, data)) {
658 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
662 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
664 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
665 u32 data[CX2341X_MBOX_MAX_DATA];
666 struct ivtv_stream *s;
668 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
669 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
671 /* If more than two VBI buffers are pending, then
672 clear the old ones and start with this new one.
673 This can happen during transition stages when MPEG capturing is
674 started, but the first interrupts haven't arrived yet. During
675 that period VBI requests can accumulate without being able to
676 DMA the data. Since at most four VBI DMA buffers are available,
677 we just drop the old requests when there are already three
679 if (s->sg_pending_size > 2) {
681 list_for_each(p, &s->q_predma.list) {
682 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
683 ivtv_buf_sync_for_cpu(s, buf);
685 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
686 s->sg_pending_size = 0;
688 /* if we can append the data, and the MPEG stream isn't capturing,
689 then start a DMA request for just the VBI data. */
690 if (!stream_enc_dma_append(s, data) &&
691 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
692 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
696 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
698 u32 data[CX2341X_MBOX_MAX_DATA];
699 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
701 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
702 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
703 !stream_enc_dma_append(s, data)) {
704 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
708 static void ivtv_irq_dec_data_req(struct ivtv *itv)
710 u32 data[CX2341X_MBOX_MAX_DATA];
711 struct ivtv_stream *s;
714 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
716 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
717 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
718 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
719 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
722 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
723 itv->dma_data_req_offset = data[1];
724 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
726 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
727 itv->dma_data_req_offset, itv->dma_data_req_size);
728 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
729 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
732 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
734 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
738 static void ivtv_irq_vsync(struct ivtv *itv)
740 /* The vsync interrupt is unusual in that it won't clear until
741 * the end of the first line for the current field, at which
742 * point it clears itself. This can result in repeated vsync
743 * interrupts, or a missed vsync. Read some of the registers
744 * to determine the line being displayed and ensure we handle
745 * one vsync per frame.
747 unsigned int frame = read_reg(0x28c0) & 1;
748 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
750 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
752 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
753 ((itv->last_vsync_field & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
754 (frame != (itv->last_vsync_field & 1) && !itv->yuv_info.frame_interlaced)) {
755 int next_dma_frame = last_dma_frame;
757 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
758 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
759 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
760 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
761 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
762 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
763 next_dma_frame = (next_dma_frame + 1) & 0x3;
764 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
765 itv->yuv_info.fields_lapsed = -1;
769 if (frame != (itv->last_vsync_field & 1)) {
770 struct ivtv_stream *s = ivtv_get_output_stream(itv);
772 itv->last_vsync_field += 1;
774 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
775 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
778 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
780 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
781 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
782 wake_up(&itv->event_waitq);
784 wake_up(&itv->vsync_waitq);
788 /* Send VBI to saa7127 */
789 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
790 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
791 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
792 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
793 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
794 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
797 /* Check if we need to update the yuv registers */
798 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
799 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
800 last_dma_frame = (last_dma_frame - 1) & 3;
802 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
803 itv->yuv_info.update_frame = last_dma_frame;
804 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
805 itv->yuv_info.yuv_forced_update = 0;
806 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
807 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
811 itv->yuv_info.fields_lapsed ++;
815 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
817 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
819 struct ivtv *itv = (struct ivtv *)dev_id;
825 spin_lock(&itv->dma_reg_lock);
826 /* get contents of irq status register */
827 stat = read_reg(IVTV_REG_IRQSTATUS);
829 combo = ~itv->irqmask & stat;
832 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
835 /* The vsync interrupt is unusual and clears itself. If we
836 * took too long, we may have missed it. Do some checks
838 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
839 /* vsync is enabled, see if we're in a new field */
840 if ((itv->last_vsync_field & 1) != (read_reg(0x28c0) & 1)) {
841 /* New field, looks like we missed it */
842 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
848 /* No Vsync expected, wasn't for us */
849 spin_unlock(&itv->dma_reg_lock);
854 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
856 if (combo & ~0xff6d0400)
857 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
859 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
860 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
863 if (combo & IVTV_IRQ_DMA_READ) {
864 ivtv_irq_dma_read(itv);
867 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
868 ivtv_irq_enc_dma_complete(itv);
871 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
872 ivtv_irq_enc_pio_complete(itv);
875 if (combo & IVTV_IRQ_DMA_ERR) {
876 ivtv_irq_dma_err(itv);
879 if (combo & IVTV_IRQ_ENC_START_CAP) {
880 ivtv_irq_enc_start_cap(itv);
883 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
884 ivtv_irq_enc_vbi_cap(itv);
887 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
888 ivtv_irq_dec_vbi_reinsert(itv);
891 if (combo & IVTV_IRQ_ENC_EOS) {
892 IVTV_DEBUG_IRQ("ENC EOS\n");
893 set_bit(IVTV_F_I_EOS, &itv->i_flags);
894 wake_up(&itv->eos_waitq);
897 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
898 ivtv_irq_dec_data_req(itv);
901 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
902 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
906 if (combo & IVTV_IRQ_ENC_VIM_RST) {
907 IVTV_DEBUG_IRQ("VIM RST\n");
908 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
911 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
912 IVTV_DEBUG_INFO("Stereo mode changed\n");
915 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
917 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
918 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
919 struct ivtv_stream *s = &itv->streams[idx];
921 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
923 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
924 ivtv_dma_dec_start(s);
926 ivtv_dma_enc_start(s);
929 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
930 ivtv_udma_start(itv);
934 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
936 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
937 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
938 struct ivtv_stream *s = &itv->streams[idx];
940 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
942 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
943 ivtv_dma_enc_start(s);
948 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
949 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
952 spin_unlock(&itv->dma_reg_lock);
954 /* If we've just handled a 'forced' vsync, it's safest to say it
955 * wasn't ours. Another device may have triggered it at just
958 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
961 void ivtv_unfinished_dma(unsigned long arg)
963 struct ivtv *itv = (struct ivtv *)arg;
965 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
967 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
969 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
970 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
971 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
972 itv->cur_dma_stream = -1;
973 wake_up(&itv->dma_waitq);