]> err.no Git - linux-2.6/blob - fs/hfsplus/btree.c
Merge git://git.infradead.org/~dwmw2/cafe-2.6
[linux-2.6] / fs / hfsplus / btree.c
1 /*
2  *  linux/fs/hfsplus/btree.c
3  *
4  * Copyright (C) 2001
5  * Brad Boyer (flar@allandria.com)
6  * (C) 2003 Ardis Technologies <roman@ardistech.com>
7  *
8  * Handle opening/closing btree
9  */
10
11 #include <linux/slab.h>
12 #include <linux/pagemap.h>
13
14 #include "hfsplus_fs.h"
15 #include "hfsplus_raw.h"
16
17
18 /* Get a reference to a B*Tree and do some initial checks */
19 struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
20 {
21         struct hfs_btree *tree;
22         struct hfs_btree_header_rec *head;
23         struct address_space *mapping;
24         struct page *page;
25         unsigned int size;
26
27         tree = kzalloc(sizeof(*tree), GFP_KERNEL);
28         if (!tree)
29                 return NULL;
30
31         init_MUTEX(&tree->tree_lock);
32         spin_lock_init(&tree->hash_lock);
33         tree->sb = sb;
34         tree->cnid = id;
35         tree->inode = iget(sb, id);
36         if (!tree->inode)
37                 goto free_tree;
38
39         mapping = tree->inode->i_mapping;
40         page = read_mapping_page(mapping, 0, NULL);
41         if (IS_ERR(page))
42                 goto free_tree;
43
44         /* Load the header */
45         head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
46         tree->root = be32_to_cpu(head->root);
47         tree->leaf_count = be32_to_cpu(head->leaf_count);
48         tree->leaf_head = be32_to_cpu(head->leaf_head);
49         tree->leaf_tail = be32_to_cpu(head->leaf_tail);
50         tree->node_count = be32_to_cpu(head->node_count);
51         tree->free_nodes = be32_to_cpu(head->free_nodes);
52         tree->attributes = be32_to_cpu(head->attributes);
53         tree->node_size = be16_to_cpu(head->node_size);
54         tree->max_key_len = be16_to_cpu(head->max_key_len);
55         tree->depth = be16_to_cpu(head->depth);
56
57         /* Set the correct compare function */
58         if (id == HFSPLUS_EXT_CNID) {
59                 tree->keycmp = hfsplus_ext_cmp_key;
60         } else if (id == HFSPLUS_CAT_CNID) {
61                 if ((HFSPLUS_SB(sb).flags & HFSPLUS_SB_HFSX) &&
62                     (head->key_type == HFSPLUS_KEY_BINARY))
63                         tree->keycmp = hfsplus_cat_bin_cmp_key;
64                 else
65                         tree->keycmp = hfsplus_cat_case_cmp_key;
66         } else {
67                 printk(KERN_ERR "hfs: unknown B*Tree requested\n");
68                 goto fail_page;
69         }
70
71         size = tree->node_size;
72         if (!size || size & (size - 1))
73                 goto fail_page;
74         if (!tree->node_count)
75                 goto fail_page;
76         tree->node_size_shift = ffs(size) - 1;
77
78         tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
79
80         kunmap(page);
81         page_cache_release(page);
82         return tree;
83
84  fail_page:
85         tree->inode->i_mapping->a_ops = &hfsplus_aops;
86         page_cache_release(page);
87  free_tree:
88         iput(tree->inode);
89         kfree(tree);
90         return NULL;
91 }
92
93 /* Release resources used by a btree */
94 void hfs_btree_close(struct hfs_btree *tree)
95 {
96         struct hfs_bnode *node;
97         int i;
98
99         if (!tree)
100                 return;
101
102         for (i = 0; i < NODE_HASH_SIZE; i++) {
103                 while ((node = tree->node_hash[i])) {
104                         tree->node_hash[i] = node->next_hash;
105                         if (atomic_read(&node->refcnt))
106                                 printk(KERN_CRIT "hfs: node %d:%d still has %d user(s)!\n",
107                                         node->tree->cnid, node->this, atomic_read(&node->refcnt));
108                         hfs_bnode_free(node);
109                         tree->node_hash_cnt--;
110                 }
111         }
112         iput(tree->inode);
113         kfree(tree);
114 }
115
116 void hfs_btree_write(struct hfs_btree *tree)
117 {
118         struct hfs_btree_header_rec *head;
119         struct hfs_bnode *node;
120         struct page *page;
121
122         node = hfs_bnode_find(tree, 0);
123         if (IS_ERR(node))
124                 /* panic? */
125                 return;
126         /* Load the header */
127         page = node->page[0];
128         head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
129
130         head->root = cpu_to_be32(tree->root);
131         head->leaf_count = cpu_to_be32(tree->leaf_count);
132         head->leaf_head = cpu_to_be32(tree->leaf_head);
133         head->leaf_tail = cpu_to_be32(tree->leaf_tail);
134         head->node_count = cpu_to_be32(tree->node_count);
135         head->free_nodes = cpu_to_be32(tree->free_nodes);
136         head->attributes = cpu_to_be32(tree->attributes);
137         head->depth = cpu_to_be16(tree->depth);
138
139         kunmap(page);
140         set_page_dirty(page);
141         hfs_bnode_put(node);
142 }
143
144 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
145 {
146         struct hfs_btree *tree = prev->tree;
147         struct hfs_bnode *node;
148         struct hfs_bnode_desc desc;
149         __be32 cnid;
150
151         node = hfs_bnode_create(tree, idx);
152         if (IS_ERR(node))
153                 return node;
154
155         tree->free_nodes--;
156         prev->next = idx;
157         cnid = cpu_to_be32(idx);
158         hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
159
160         node->type = HFS_NODE_MAP;
161         node->num_recs = 1;
162         hfs_bnode_clear(node, 0, tree->node_size);
163         desc.next = 0;
164         desc.prev = 0;
165         desc.type = HFS_NODE_MAP;
166         desc.height = 0;
167         desc.num_recs = cpu_to_be16(1);
168         desc.reserved = 0;
169         hfs_bnode_write(node, &desc, 0, sizeof(desc));
170         hfs_bnode_write_u16(node, 14, 0x8000);
171         hfs_bnode_write_u16(node, tree->node_size - 2, 14);
172         hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
173
174         return node;
175 }
176
177 struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
178 {
179         struct hfs_bnode *node, *next_node;
180         struct page **pagep;
181         u32 nidx, idx;
182         u16 off, len;
183         u8 *data, byte, m;
184         int i;
185
186         while (!tree->free_nodes) {
187                 struct inode *inode = tree->inode;
188                 u32 count;
189                 int res;
190
191                 res = hfsplus_file_extend(inode);
192                 if (res)
193                         return ERR_PTR(res);
194                 HFSPLUS_I(inode).phys_size = inode->i_size =
195                                 (loff_t)HFSPLUS_I(inode).alloc_blocks <<
196                                 HFSPLUS_SB(tree->sb).alloc_blksz_shift;
197                 HFSPLUS_I(inode).fs_blocks = HFSPLUS_I(inode).alloc_blocks <<
198                                              HFSPLUS_SB(tree->sb).fs_shift;
199                 inode_set_bytes(inode, inode->i_size);
200                 count = inode->i_size >> tree->node_size_shift;
201                 tree->free_nodes = count - tree->node_count;
202                 tree->node_count = count;
203         }
204
205         nidx = 0;
206         node = hfs_bnode_find(tree, nidx);
207         if (IS_ERR(node))
208                 return node;
209         len = hfs_brec_lenoff(node, 2, &off);
210
211         off += node->page_offset;
212         pagep = node->page + (off >> PAGE_CACHE_SHIFT);
213         data = kmap(*pagep);
214         off &= ~PAGE_CACHE_MASK;
215         idx = 0;
216
217         for (;;) {
218                 while (len) {
219                         byte = data[off];
220                         if (byte != 0xff) {
221                                 for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
222                                         if (!(byte & m)) {
223                                                 idx += i;
224                                                 data[off] |= m;
225                                                 set_page_dirty(*pagep);
226                                                 kunmap(*pagep);
227                                                 tree->free_nodes--;
228                                                 mark_inode_dirty(tree->inode);
229                                                 hfs_bnode_put(node);
230                                                 return hfs_bnode_create(tree, idx);
231                                         }
232                                 }
233                         }
234                         if (++off >= PAGE_CACHE_SIZE) {
235                                 kunmap(*pagep);
236                                 data = kmap(*++pagep);
237                                 off = 0;
238                         }
239                         idx += 8;
240                         len--;
241                 }
242                 kunmap(*pagep);
243                 nidx = node->next;
244                 if (!nidx) {
245                         printk(KERN_DEBUG "hfs: create new bmap node...\n");
246                         next_node = hfs_bmap_new_bmap(node, idx);
247                 } else
248                         next_node = hfs_bnode_find(tree, nidx);
249                 hfs_bnode_put(node);
250                 if (IS_ERR(next_node))
251                         return next_node;
252                 node = next_node;
253
254                 len = hfs_brec_lenoff(node, 0, &off);
255                 off += node->page_offset;
256                 pagep = node->page + (off >> PAGE_CACHE_SHIFT);
257                 data = kmap(*pagep);
258                 off &= ~PAGE_CACHE_MASK;
259         }
260 }
261
262 void hfs_bmap_free(struct hfs_bnode *node)
263 {
264         struct hfs_btree *tree;
265         struct page *page;
266         u16 off, len;
267         u32 nidx;
268         u8 *data, byte, m;
269
270         dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
271         BUG_ON(!node->this);
272         tree = node->tree;
273         nidx = node->this;
274         node = hfs_bnode_find(tree, 0);
275         if (IS_ERR(node))
276                 return;
277         len = hfs_brec_lenoff(node, 2, &off);
278         while (nidx >= len * 8) {
279                 u32 i;
280
281                 nidx -= len * 8;
282                 i = node->next;
283                 hfs_bnode_put(node);
284                 if (!i) {
285                         /* panic */;
286                         printk(KERN_CRIT "hfs: unable to free bnode %u. bmap not found!\n", node->this);
287                         return;
288                 }
289                 node = hfs_bnode_find(tree, i);
290                 if (IS_ERR(node))
291                         return;
292                 if (node->type != HFS_NODE_MAP) {
293                         /* panic */;
294                         printk(KERN_CRIT "hfs: invalid bmap found! (%u,%d)\n", node->this, node->type);
295                         hfs_bnode_put(node);
296                         return;
297                 }
298                 len = hfs_brec_lenoff(node, 0, &off);
299         }
300         off += node->page_offset + nidx / 8;
301         page = node->page[off >> PAGE_CACHE_SHIFT];
302         data = kmap(page);
303         off &= ~PAGE_CACHE_MASK;
304         m = 1 << (~nidx & 7);
305         byte = data[off];
306         if (!(byte & m)) {
307                 printk(KERN_CRIT "hfs: trying to free free bnode %u(%d)\n", node->this, node->type);
308                 kunmap(page);
309                 hfs_bnode_put(node);
310                 return;
311         }
312         data[off] = byte & ~m;
313         set_page_dirty(page);
314         kunmap(page);
315         hfs_bnode_put(node);
316         tree->free_nodes++;
317         mark_inode_dirty(tree->inode);
318 }