2 * helper functions for physically contiguous capture buffers
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
7 * Copyright (c) 2008 Magnus Damm
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <media/videobuf-dma-contig.h>
22 struct videobuf_dma_contig_memory {
25 dma_addr_t dma_handle;
29 #define MAGIC_DC_MEM 0x0733ac61
30 #define MAGIC_CHECK(is, should) \
31 if (unlikely((is) != (should))) { \
32 pr_err("magic mismatch: %x expected %x\n", is, should); \
37 videobuf_vm_open(struct vm_area_struct *vma)
39 struct videobuf_mapping *map = vma->vm_private_data;
41 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
42 map, map->count, vma->vm_start, vma->vm_end);
47 static void videobuf_vm_close(struct vm_area_struct *vma)
49 struct videobuf_mapping *map = vma->vm_private_data;
50 struct videobuf_queue *q = map->q;
53 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
54 map, map->count, vma->vm_start, vma->vm_end);
57 if (0 == map->count) {
58 struct videobuf_dma_contig_memory *mem;
60 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
61 mutex_lock(&q->vb_lock);
63 /* We need first to cancel streams, before unmapping */
65 videobuf_queue_cancel(q);
67 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
68 if (NULL == q->bufs[i])
71 if (q->bufs[i]->map != map)
74 mem = q->bufs[i]->priv;
76 /* This callback is called only if kernel has
77 allocated memory and this memory is mmapped.
78 In this case, memory should be freed,
79 in order to do memory unmap.
82 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
84 /* vfree is not atomic - can't be
85 called with IRQ's disabled
87 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
90 dma_free_coherent(q->dev, mem->size,
91 mem->vaddr, mem->dma_handle);
95 q->bufs[i]->map = NULL;
96 q->bufs[i]->baddr = 0;
101 mutex_unlock(&q->vb_lock);
105 static struct vm_operations_struct videobuf_vm_ops = {
106 .open = videobuf_vm_open,
107 .close = videobuf_vm_close,
110 static void *__videobuf_alloc(size_t size)
112 struct videobuf_dma_contig_memory *mem;
113 struct videobuf_buffer *vb;
115 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
117 mem = vb->priv = ((char *)vb) + size;
118 mem->magic = MAGIC_DC_MEM;
124 static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
126 struct videobuf_dma_contig_memory *mem = buf->priv;
129 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
134 static int __videobuf_iolock(struct videobuf_queue *q,
135 struct videobuf_buffer *vb,
136 struct v4l2_framebuffer *fbuf)
138 struct videobuf_dma_contig_memory *mem = vb->priv;
141 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
143 switch (vb->memory) {
144 case V4L2_MEMORY_MMAP:
145 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
147 /* All handling should be done by __videobuf_mmap_mapper() */
149 dev_err(q->dev, "memory is not alloced/mmapped.\n");
153 case V4L2_MEMORY_USERPTR:
154 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
156 /* The only USERPTR currently supported is the one needed for
162 mem->size = PAGE_ALIGN(vb->size);
163 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
164 &mem->dma_handle, GFP_KERNEL);
166 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
171 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
172 mem->vaddr, mem->size);
174 case V4L2_MEMORY_OVERLAY:
176 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
184 static int __videobuf_sync(struct videobuf_queue *q,
185 struct videobuf_buffer *buf)
187 struct videobuf_dma_contig_memory *mem = buf->priv;
190 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
192 dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
197 static int __videobuf_mmap_free(struct videobuf_queue *q)
201 dev_dbg(q->dev, "%s\n", __func__);
202 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
203 if (q->bufs[i] && q->bufs[i]->map)
210 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
211 struct vm_area_struct *vma)
213 struct videobuf_dma_contig_memory *mem;
214 struct videobuf_mapping *map;
217 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
219 dev_dbg(q->dev, "%s\n", __func__);
220 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
223 /* look for first buffer to map */
224 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
228 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
230 if (q->bufs[first]->boff == offset)
233 if (VIDEO_MAX_FRAME == first) {
234 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
239 /* create mapping + update buffer list */
240 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
244 q->bufs[first]->map = map;
245 map->start = vma->vm_start;
246 map->end = vma->vm_end;
249 q->bufs[first]->baddr = vma->vm_start;
251 mem = q->bufs[first]->priv;
253 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
255 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
256 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
257 &mem->dma_handle, GFP_KERNEL);
259 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
263 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
264 mem->vaddr, mem->size);
266 /* Try to remap memory */
268 size = vma->vm_end - vma->vm_start;
269 size = (size < mem->size) ? size : mem->size;
271 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
272 retval = remap_pfn_range(vma, vma->vm_start,
273 mem->dma_handle >> PAGE_SHIFT,
274 size, vma->vm_page_prot);
276 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
277 dma_free_coherent(q->dev, mem->size,
278 mem->vaddr, mem->dma_handle);
282 vma->vm_ops = &videobuf_vm_ops;
283 vma->vm_flags |= VM_DONTEXPAND;
284 vma->vm_private_data = map;
286 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
287 map, q, vma->vm_start, vma->vm_end,
288 (long int) q->bufs[first]->bsize,
289 vma->vm_pgoff, first);
291 videobuf_vm_open(vma);
300 static int __videobuf_copy_to_user(struct videobuf_queue *q,
301 char __user *data, size_t count,
304 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
308 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
311 /* copy to userspace */
312 if (count > q->read_buf->size - q->read_off)
313 count = q->read_buf->size - q->read_off;
317 if (copy_to_user(data, vaddr + q->read_off, count))
323 static int __videobuf_copy_stream(struct videobuf_queue *q,
324 char __user *data, size_t count, size_t pos,
325 int vbihack, int nonblocking)
328 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
331 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
334 /* dirty, undocumented hack -- pass the frame counter
335 * within the last four bytes of each vbi data block.
336 * We need that one to maintain backward compatibility
337 * to all vbi decoding software out there ... */
338 fc = (unsigned int *)mem->vaddr;
339 fc += (q->read_buf->size >> 2) - 1;
340 *fc = q->read_buf->field_count >> 1;
341 dev_dbg(q->dev, "vbihack: %d\n", *fc);
344 /* copy stuff using the common method */
345 count = __videobuf_copy_to_user(q, data, count, nonblocking);
347 if ((count == -EFAULT) && (pos == 0))
353 static struct videobuf_qtype_ops qops = {
354 .magic = MAGIC_QTYPE_OPS,
356 .alloc = __videobuf_alloc,
357 .iolock = __videobuf_iolock,
358 .sync = __videobuf_sync,
359 .mmap_free = __videobuf_mmap_free,
360 .mmap_mapper = __videobuf_mmap_mapper,
361 .video_copy_to_user = __videobuf_copy_to_user,
362 .copy_stream = __videobuf_copy_stream,
363 .vmalloc = __videobuf_to_vmalloc,
366 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
367 struct videobuf_queue_ops *ops,
370 enum v4l2_buf_type type,
371 enum v4l2_field field,
375 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
378 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
380 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
382 struct videobuf_dma_contig_memory *mem = buf->priv;
385 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
387 return mem->dma_handle;
389 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
391 void videobuf_dma_contig_free(struct videobuf_queue *q,
392 struct videobuf_buffer *buf)
394 struct videobuf_dma_contig_memory *mem = buf->priv;
396 /* mmapped memory can't be freed here, otherwise mmapped region
397 would be released, while still needed. In this case, the memory
398 release should happen inside videobuf_vm_close().
399 So, it should free memory only if the memory were allocated for
402 if ((buf->memory != V4L2_MEMORY_USERPTR) || !buf->baddr)
408 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
410 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
413 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
415 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
416 MODULE_AUTHOR("Magnus Damm");
417 MODULE_LICENSE("GPL");