]> err.no Git - linux-2.6/blob - drivers/char/drm/drm_vm.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
[linux-2.6] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
45 {
46         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
47
48 #if defined(__i386__) || defined(__x86_64__)
49         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50                 pgprot_val(tmp) |= _PAGE_PCD;
51                 pgprot_val(tmp) &= ~_PAGE_PWT;
52         }
53 #elif defined(__powerpc__)
54         pgprot_val(tmp) |= _PAGE_NO_CACHE;
55         if (map_type == _DRM_REGISTERS)
56                 pgprot_val(tmp) |= _PAGE_GUARDED;
57 #endif
58 #if defined(__ia64__)
59         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60                                     vma->vm_start))
61                 tmp = pgprot_writecombine(tmp);
62         else
63                 tmp = pgprot_noncached(tmp);
64 #endif
65         return tmp;
66 }
67
68 /**
69  * \c fault method for AGP virtual memory.
70  *
71  * \param vma virtual memory area.
72  * \param address access address.
73  * \return pointer to the page structure.
74  *
75  * Find the right map and if it's AGP memory find the real physical page to
76  * map, get the page, increment the use count and return it.
77  */
78 #if __OS_HAS_AGP
79 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
80 {
81         struct drm_file *priv = vma->vm_file->private_data;
82         struct drm_device *dev = priv->head->dev;
83         struct drm_map *map = NULL;
84         struct drm_map_list *r_list;
85         struct drm_hash_item *hash;
86
87         /*
88          * Find the right map
89          */
90         if (!drm_core_has_AGP(dev))
91                 goto vm_fault_error;
92
93         if (!dev->agp || !dev->agp->cant_use_aperture)
94                 goto vm_fault_error;
95
96         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
97                 goto vm_fault_error;
98
99         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
100         map = r_list->map;
101
102         if (map && map->type == _DRM_AGP) {
103                 /*
104                  * Using vm_pgoff as a selector forces us to use this unusual
105                  * addressing scheme.
106                  */
107                 unsigned long offset = (unsigned long)vmf->virtual_address -
108                                                                 vma->vm_start;
109                 unsigned long baddr = map->offset + offset;
110                 struct drm_agp_mem *agpmem;
111                 struct page *page;
112
113 #ifdef __alpha__
114                 /*
115                  * Adjust to a bus-relative address
116                  */
117                 baddr -= dev->hose->mem_space->start;
118 #endif
119
120                 /*
121                  * It's AGP memory - find the real physical page to map
122                  */
123                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
124                         if (agpmem->bound <= baddr &&
125                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
126                                 break;
127                 }
128
129                 if (!agpmem)
130                         goto vm_fault_error;
131
132                 /*
133                  * Get the page, inc the use count, and return it
134                  */
135                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
136                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
137                 get_page(page);
138                 vmf->page = page;
139
140                 DRM_DEBUG
141                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
142                      baddr, __va(agpmem->memory->memory[offset]), offset,
143                      page_count(page));
144                 return 0;
145         }
146 vm_fault_error:
147         return VM_FAULT_SIGBUS; /* Disallow mremap */
148 }
149 #else                           /* __OS_HAS_AGP */
150 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
151 {
152         return VM_FAULT_SIGBUS;
153 }
154 #endif                          /* __OS_HAS_AGP */
155
156 /**
157  * \c nopage method for shared virtual memory.
158  *
159  * \param vma virtual memory area.
160  * \param address access address.
161  * \return pointer to the page structure.
162  *
163  * Get the mapping, find the real physical page to map, get the page, and
164  * return it.
165  */
166 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
167 {
168         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
169         unsigned long offset;
170         unsigned long i;
171         struct page *page;
172
173         if (!map)
174                 return VM_FAULT_SIGBUS; /* Nothing allocated */
175
176         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
177         i = (unsigned long)map->handle + offset;
178         page = vmalloc_to_page((void *)i);
179         if (!page)
180                 return VM_FAULT_SIGBUS;
181         get_page(page);
182         vmf->page = page;
183
184         DRM_DEBUG("shm_fault 0x%lx\n", offset);
185         return 0;
186 }
187
188 /**
189  * \c close method for shared virtual memory.
190  *
191  * \param vma virtual memory area.
192  *
193  * Deletes map information if we are the last
194  * person to close a mapping and it's not in the global maplist.
195  */
196 static void drm_vm_shm_close(struct vm_area_struct *vma)
197 {
198         struct drm_file *priv = vma->vm_file->private_data;
199         struct drm_device *dev = priv->head->dev;
200         struct drm_vma_entry *pt, *temp;
201         struct drm_map *map;
202         struct drm_map_list *r_list;
203         int found_maps = 0;
204
205         DRM_DEBUG("0x%08lx,0x%08lx\n",
206                   vma->vm_start, vma->vm_end - vma->vm_start);
207         atomic_dec(&dev->vma_count);
208
209         map = vma->vm_private_data;
210
211         mutex_lock(&dev->struct_mutex);
212         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
213                 if (pt->vma->vm_private_data == map)
214                         found_maps++;
215                 if (pt->vma == vma) {
216                         list_del(&pt->head);
217                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
218                 }
219         }
220
221         /* We were the only map that was found */
222         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
223                 /* Check to see if we are in the maplist, if we are not, then
224                  * we delete this mappings information.
225                  */
226                 found_maps = 0;
227                 list_for_each_entry(r_list, &dev->maplist, head) {
228                         if (r_list->map == map)
229                                 found_maps++;
230                 }
231
232                 if (!found_maps) {
233                         drm_dma_handle_t dmah;
234
235                         switch (map->type) {
236                         case _DRM_REGISTERS:
237                         case _DRM_FRAME_BUFFER:
238                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
239                                         int retcode;
240                                         retcode = mtrr_del(map->mtrr,
241                                                            map->offset,
242                                                            map->size);
243                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
244                                 }
245                                 iounmap(map->handle);
246                                 break;
247                         case _DRM_SHM:
248                                 vfree(map->handle);
249                                 break;
250                         case _DRM_AGP:
251                         case _DRM_SCATTER_GATHER:
252                                 break;
253                         case _DRM_CONSISTENT:
254                                 dmah.vaddr = map->handle;
255                                 dmah.busaddr = map->offset;
256                                 dmah.size = map->size;
257                                 __drm_pci_free(dev, &dmah);
258                                 break;
259                         }
260                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
261                 }
262         }
263         mutex_unlock(&dev->struct_mutex);
264 }
265
266 /**
267  * \c fault method for DMA virtual memory.
268  *
269  * \param vma virtual memory area.
270  * \param address access address.
271  * \return pointer to the page structure.
272  *
273  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
274  */
275 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
276 {
277         struct drm_file *priv = vma->vm_file->private_data;
278         struct drm_device *dev = priv->head->dev;
279         struct drm_device_dma *dma = dev->dma;
280         unsigned long offset;
281         unsigned long page_nr;
282         struct page *page;
283
284         if (!dma)
285                 return VM_FAULT_SIGBUS; /* Error */
286         if (!dma->pagelist)
287                 return VM_FAULT_SIGBUS; /* Nothing allocated */
288
289         offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
290         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
291         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
292
293         get_page(page);
294         vmf->page = page;
295
296         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
297         return 0;
298 }
299
300 /**
301  * \c fault method for scatter-gather virtual memory.
302  *
303  * \param vma virtual memory area.
304  * \param address access address.
305  * \return pointer to the page structure.
306  *
307  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
308  */
309 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
310 {
311         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
312         struct drm_file *priv = vma->vm_file->private_data;
313         struct drm_device *dev = priv->head->dev;
314         struct drm_sg_mem *entry = dev->sg;
315         unsigned long offset;
316         unsigned long map_offset;
317         unsigned long page_offset;
318         struct page *page;
319
320         if (!entry)
321                 return VM_FAULT_SIGBUS; /* Error */
322         if (!entry->pagelist)
323                 return VM_FAULT_SIGBUS; /* Nothing allocated */
324
325         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
326         map_offset = map->offset - (unsigned long)dev->sg->virtual;
327         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
328         page = entry->pagelist[page_offset];
329         get_page(page);
330         vmf->page = page;
331
332         return 0;
333 }
334
335 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
336 {
337         return drm_do_vm_fault(vma, vmf);
338 }
339
340 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
341 {
342         return drm_do_vm_shm_fault(vma, vmf);
343 }
344
345 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
346 {
347         return drm_do_vm_dma_fault(vma, vmf);
348 }
349
350 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
351 {
352         return drm_do_vm_sg_fault(vma, vmf);
353 }
354
355 /** AGP virtual memory operations */
356 static struct vm_operations_struct drm_vm_ops = {
357         .fault = drm_vm_fault,
358         .open = drm_vm_open,
359         .close = drm_vm_close,
360 };
361
362 /** Shared virtual memory operations */
363 static struct vm_operations_struct drm_vm_shm_ops = {
364         .fault = drm_vm_shm_fault,
365         .open = drm_vm_open,
366         .close = drm_vm_shm_close,
367 };
368
369 /** DMA virtual memory operations */
370 static struct vm_operations_struct drm_vm_dma_ops = {
371         .fault = drm_vm_dma_fault,
372         .open = drm_vm_open,
373         .close = drm_vm_close,
374 };
375
376 /** Scatter-gather virtual memory operations */
377 static struct vm_operations_struct drm_vm_sg_ops = {
378         .fault = drm_vm_sg_fault,
379         .open = drm_vm_open,
380         .close = drm_vm_close,
381 };
382
383 /**
384  * \c open method for shared virtual memory.
385  *
386  * \param vma virtual memory area.
387  *
388  * Create a new drm_vma_entry structure as the \p vma private data entry and
389  * add it to drm_device::vmalist.
390  */
391 static void drm_vm_open_locked(struct vm_area_struct *vma)
392 {
393         struct drm_file *priv = vma->vm_file->private_data;
394         struct drm_device *dev = priv->head->dev;
395         struct drm_vma_entry *vma_entry;
396
397         DRM_DEBUG("0x%08lx,0x%08lx\n",
398                   vma->vm_start, vma->vm_end - vma->vm_start);
399         atomic_inc(&dev->vma_count);
400
401         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
402         if (vma_entry) {
403                 vma_entry->vma = vma;
404                 vma_entry->pid = current->pid;
405                 list_add(&vma_entry->head, &dev->vmalist);
406         }
407 }
408
409 static void drm_vm_open(struct vm_area_struct *vma)
410 {
411         struct drm_file *priv = vma->vm_file->private_data;
412         struct drm_device *dev = priv->head->dev;
413
414         mutex_lock(&dev->struct_mutex);
415         drm_vm_open_locked(vma);
416         mutex_unlock(&dev->struct_mutex);
417 }
418
419 /**
420  * \c close method for all virtual memory types.
421  *
422  * \param vma virtual memory area.
423  *
424  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
425  * free it.
426  */
427 static void drm_vm_close(struct vm_area_struct *vma)
428 {
429         struct drm_file *priv = vma->vm_file->private_data;
430         struct drm_device *dev = priv->head->dev;
431         struct drm_vma_entry *pt, *temp;
432
433         DRM_DEBUG("0x%08lx,0x%08lx\n",
434                   vma->vm_start, vma->vm_end - vma->vm_start);
435         atomic_dec(&dev->vma_count);
436
437         mutex_lock(&dev->struct_mutex);
438         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
439                 if (pt->vma == vma) {
440                         list_del(&pt->head);
441                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
442                         break;
443                 }
444         }
445         mutex_unlock(&dev->struct_mutex);
446 }
447
448 /**
449  * mmap DMA memory.
450  *
451  * \param file_priv DRM file private.
452  * \param vma virtual memory area.
453  * \return zero on success or a negative number on failure.
454  *
455  * Sets the virtual memory area operations structure to vm_dma_ops, the file
456  * pointer, and calls vm_open().
457  */
458 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
459 {
460         struct drm_file *priv = filp->private_data;
461         struct drm_device *dev;
462         struct drm_device_dma *dma;
463         unsigned long length = vma->vm_end - vma->vm_start;
464
465         dev = priv->head->dev;
466         dma = dev->dma;
467         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
468                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
469
470         /* Length must match exact page count */
471         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
472                 return -EINVAL;
473         }
474
475         if (!capable(CAP_SYS_ADMIN) &&
476             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
477                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
478 #if defined(__i386__) || defined(__x86_64__)
479                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
480 #else
481                 /* Ye gads this is ugly.  With more thought
482                    we could move this up higher and use
483                    `protection_map' instead.  */
484                 vma->vm_page_prot =
485                     __pgprot(pte_val
486                              (pte_wrprotect
487                               (__pte(pgprot_val(vma->vm_page_prot)))));
488 #endif
489         }
490
491         vma->vm_ops = &drm_vm_dma_ops;
492
493         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
494         vma->vm_flags |= VM_DONTEXPAND;
495
496         vma->vm_file = filp;    /* Needed for drm_vm_open() */
497         drm_vm_open_locked(vma);
498         return 0;
499 }
500
501 unsigned long drm_core_get_map_ofs(struct drm_map * map)
502 {
503         return map->offset;
504 }
505
506 EXPORT_SYMBOL(drm_core_get_map_ofs);
507
508 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
509 {
510 #ifdef __alpha__
511         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
512 #else
513         return 0;
514 #endif
515 }
516
517 EXPORT_SYMBOL(drm_core_get_reg_ofs);
518
519 /**
520  * mmap DMA memory.
521  *
522  * \param file_priv DRM file private.
523  * \param vma virtual memory area.
524  * \return zero on success or a negative number on failure.
525  *
526  * If the virtual memory area has no offset associated with it then it's a DMA
527  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
528  * checks that the restricted flag is not set, sets the virtual memory operations
529  * according to the mapping type and remaps the pages. Finally sets the file
530  * pointer and calls vm_open().
531  */
532 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
533 {
534         struct drm_file *priv = filp->private_data;
535         struct drm_device *dev = priv->head->dev;
536         struct drm_map *map = NULL;
537         unsigned long offset = 0;
538         struct drm_hash_item *hash;
539
540         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
541                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
542
543         if (!priv->authenticated)
544                 return -EACCES;
545
546         /* We check for "dma". On Apple's UniNorth, it's valid to have
547          * the AGP mapped at physical address 0
548          * --BenH.
549          */
550         if (!vma->vm_pgoff
551 #if __OS_HAS_AGP
552             && (!dev->agp
553                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
554 #endif
555             )
556                 return drm_mmap_dma(filp, vma);
557
558         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
559                 DRM_ERROR("Could not find map\n");
560                 return -EINVAL;
561         }
562
563         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
564         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
565                 return -EPERM;
566
567         /* Check for valid size. */
568         if (map->size < vma->vm_end - vma->vm_start)
569                 return -EINVAL;
570
571         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
572                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
573 #if defined(__i386__) || defined(__x86_64__)
574                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
575 #else
576                 /* Ye gads this is ugly.  With more thought
577                    we could move this up higher and use
578                    `protection_map' instead.  */
579                 vma->vm_page_prot =
580                     __pgprot(pte_val
581                              (pte_wrprotect
582                               (__pte(pgprot_val(vma->vm_page_prot)))));
583 #endif
584         }
585
586         switch (map->type) {
587         case _DRM_AGP:
588                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
589                         /*
590                          * On some platforms we can't talk to bus dma address from the CPU, so for
591                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
592                          * pages and mappings in fault()
593                          */
594 #if defined(__powerpc__)
595                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
596 #endif
597                         vma->vm_ops = &drm_vm_ops;
598                         break;
599                 }
600                 /* fall through to _DRM_FRAME_BUFFER... */
601         case _DRM_FRAME_BUFFER:
602         case _DRM_REGISTERS:
603                 offset = dev->driver->get_reg_ofs(dev);
604                 vma->vm_flags |= VM_IO; /* not in core dump */
605                 vma->vm_page_prot = drm_io_prot(map->type, vma);
606 #ifdef __sparc__
607                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
608 #endif
609                 if (io_remap_pfn_range(vma, vma->vm_start,
610                                        (map->offset + offset) >> PAGE_SHIFT,
611                                        vma->vm_end - vma->vm_start,
612                                        vma->vm_page_prot))
613                         return -EAGAIN;
614                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
615                           " offset = 0x%lx\n",
616                           map->type,
617                           vma->vm_start, vma->vm_end, map->offset + offset);
618                 vma->vm_ops = &drm_vm_ops;
619                 break;
620         case _DRM_CONSISTENT:
621                 /* Consistent memory is really like shared memory. But
622                  * it's allocated in a different way, so avoid fault */
623                 if (remap_pfn_range(vma, vma->vm_start,
624                     page_to_pfn(virt_to_page(map->handle)),
625                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
626                         return -EAGAIN;
627         /* fall through to _DRM_SHM */
628         case _DRM_SHM:
629                 vma->vm_ops = &drm_vm_shm_ops;
630                 vma->vm_private_data = (void *)map;
631                 /* Don't let this area swap.  Change when
632                    DRM_KERNEL advisory is supported. */
633                 vma->vm_flags |= VM_RESERVED;
634                 break;
635         case _DRM_SCATTER_GATHER:
636                 vma->vm_ops = &drm_vm_sg_ops;
637                 vma->vm_private_data = (void *)map;
638                 vma->vm_flags |= VM_RESERVED;
639                 break;
640         default:
641                 return -EINVAL; /* This should never happen. */
642         }
643         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
644         vma->vm_flags |= VM_DONTEXPAND;
645
646         vma->vm_file = filp;    /* Needed for drm_vm_open() */
647         drm_vm_open_locked(vma);
648         return 0;
649 }
650
651 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
652 {
653         struct drm_file *priv = filp->private_data;
654         struct drm_device *dev = priv->head->dev;
655         int ret;
656
657         mutex_lock(&dev->struct_mutex);
658         ret = drm_mmap_locked(filp, vma);
659         mutex_unlock(&dev->struct_mutex);
660
661         return ret;
662 }
663 EXPORT_SYMBOL(drm_mmap);