2 * linux/arch/m68k/mm/sun3dvma.c
4 * Copyright (C) 2000 Sam Creasey
6 * Contains common routines for sun3/sun3x DVMA management.
9 #include <linux/kernel.h>
11 #include <linux/list.h>
14 #include <asm/pgtable.h>
20 extern void dvma_unmap_iommu(unsigned long baddr, int len);
22 static inline void dvma_unmap_iommu(unsigned long a, int b)
28 extern void sun3_dvma_init(void);
31 unsigned long iommu_use[IOMMU_TOTAL_ENTRIES];
33 #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
35 #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)])
41 struct list_head list;
44 static struct list_head hole_list;
45 static struct list_head hole_cache;
46 static struct hole initholes[64];
50 static unsigned long dvma_allocs;
51 static unsigned long dvma_frees;
52 static unsigned long long dvma_alloc_bytes;
53 static unsigned long long dvma_free_bytes;
55 static void print_use(void)
61 printk("dvma entry usage:\n");
63 for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
69 printk("dvma entry: %08lx len %08lx\n",
70 ( i << DVMA_PAGE_SHIFT) + DVMA_START,
74 printk("%d entries in use total\n", j);
76 printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
77 printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
81 static void print_holes(struct list_head *holes)
84 struct list_head *cur;
87 printk("listing dvma holes\n");
88 list_for_each(cur, holes) {
89 hole = list_entry(cur, struct hole, list);
91 if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
94 printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size);
97 printk("end of hole listing...\n");
100 #endif /* DVMA_DEBUG */
102 static inline int refill(void)
106 struct hole *prev = NULL;
107 struct list_head *cur;
110 list_for_each(cur, &hole_list) {
111 hole = list_entry(cur, struct hole, list);
118 if(hole->end == prev->start) {
119 hole->size += prev->size;
120 hole->end = prev->end;
121 list_move(&(prev->list), &hole_cache);
130 static inline struct hole *rmcache(void)
134 if(list_empty(&hole_cache)) {
136 printk("out of dvma hole cache!\n");
141 ret = list_entry(hole_cache.next, struct hole, list);
142 list_del(&(ret->list));
148 static inline unsigned long get_baddr(int len, unsigned long align)
151 struct list_head *cur;
154 if(list_empty(&hole_list)) {
156 printk("out of dvma holes! (printing hole cache)\n");
157 print_holes(&hole_cache);
163 list_for_each(cur, &hole_list) {
164 unsigned long newlen;
166 hole = list_entry(cur, struct hole, list);
168 if(align > DVMA_PAGE_SIZE)
169 newlen = len + ((hole->end - len) & (align-1));
173 if(hole->size > newlen) {
175 hole->size -= newlen;
176 dvma_entry_use(hole->end) = newlen;
179 dvma_alloc_bytes += newlen;
182 } else if(hole->size == newlen) {
183 list_move(&(hole->list), &hole_cache);
184 dvma_entry_use(hole->start) = newlen;
187 dvma_alloc_bytes += newlen;
194 printk("unable to find dvma hole!\n");
199 static inline int free_baddr(unsigned long baddr)
204 struct list_head *cur;
205 unsigned long orig_baddr;
208 len = dvma_entry_use(baddr);
209 dvma_entry_use(baddr) = 0;
210 baddr &= DVMA_PAGE_MASK;
211 dvma_unmap_iommu(baddr, len);
215 dvma_free_bytes += len;
218 list_for_each(cur, &hole_list) {
219 hole = list_entry(cur, struct hole, list);
221 if(hole->end == baddr) {
225 } else if(hole->start == (baddr + len)) {
236 hole->end = baddr + len;
239 // list_add_tail(&(hole->list), cur);
240 list_add(&(hole->list), cur);
252 INIT_LIST_HEAD(&hole_list);
253 INIT_LIST_HEAD(&hole_cache);
255 /* prepare the hole cache */
256 for(i = 0; i < 64; i++)
257 list_add(&(initholes[i].list), &hole_cache);
260 hole->start = DVMA_START;
261 hole->end = DVMA_END;
262 hole->size = DVMA_SIZE;
264 list_add(&(hole->list), &hole_list);
266 memset(iommu_use, 0, sizeof(iommu_use));
268 dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
276 inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
286 // printk("error: kaddr %lx len %x\n", kaddr, len);
292 printk("dvma_map request %08lx bytes from %08lx\n",
295 off = kaddr & ~DVMA_PAGE_MASK;
298 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
301 align = DVMA_PAGE_SIZE;
303 align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
305 baddr = get_baddr(len, align);
306 // printk("using baddr %lx\n", baddr);
308 if(!dvma_map_iommu(kaddr, baddr, len))
309 return (baddr + off);
311 printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len);
316 void dvma_unmap(void *baddr)
320 addr = (unsigned long)baddr;
321 /* check if this is a vme mapping */
322 if(!(addr & 0x00f00000))
332 void *dvma_malloc_align(unsigned long len, unsigned long align)
342 printk("dvma_malloc request %lx bytes\n", len);
344 len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
346 if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
349 if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
350 free_pages(kaddr, get_order(len));
354 vaddr = dvma_btov(baddr);
356 if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
357 dvma_unmap((void *)baddr);
358 free_pages(kaddr, get_order(len));
363 printk("mapped %08lx bytes %08lx kern -> %08lx bus\n",
367 return (void *)vaddr;
371 void dvma_free(void *vaddr)