/[pearpc]/src/io/prom/fs/hfsplus/btree.c
This is repository of my old source code which isn't updated any more. Go to git.rot13.org for current projects!
ViewVC logotype

Contents of /src/io/prom/fs/hfsplus/btree.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1 - (show annotations)
Wed Sep 5 17:11:21 2007 UTC (16 years, 6 months ago) by dpavlin
File MIME type: text/plain
File size: 19724 byte(s)
import upstream CVS
1 /*
2 * libhfs - library for reading and writing Macintosh HFS volumes
3 * The fucntions are used to handle the various forms of btrees
4 * found on HFS+ volumes.
5 *
6 * The functions are used to handle the various forms of btrees
7 * found on HFS+ volumes.
8 *
9 * Copyright (C) 2000 Klaus Halfmann <klaus.halfmann@feri.de>
10 * Original 1996-1998 Robert Leslie <rob@mars.org>
11 * Additional work by Brad Boyer (flar@pants.nu)
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 */
28
29 # ifdef HAVE_CONFIG_H
30 # include "config.h"
31 # endif
32
33 # include <stdio.h>
34 # include <stdlib.h>
35 # include <string.h>
36 # include <limits.h>
37 # include <errno.h>
38
39 # include "libhfsp.h"
40 # include "volume.h"
41 # include "btree.h"
42 # include "record.h"
43 # include "swab.h"
44
45 #ifdef HAVE_MEMSET
46 # define bzero(_a_,_s_) memset (_a_,0,_s_)
47 #endif
48
49 /* Read the node from the given buffer and swap the bytes.
50 *
51 * return pointer after reading the structure
52 */
53 char* btree_readnode(btree_node_desc* node, char *p)
54 {
55 node->next = bswabU32_inc(&p);
56 node->prev = bswabU32_inc(&p);
57 node->kind = bswabU8_inc(&p);
58 node->height = bswabU8_inc(&p);
59 node->num_rec = bswabU16_inc(&p);
60 node->reserved = bswabU16_inc(&p);
61 return p;
62 }
63
64 /* Write the node to the given buffer and swap the bytes.
65 *
66 * return pointer after writing the structure
67 */
68 char* btree_writenode(btree_node_desc* node, char *p)
69 {
70 bstoreU32_inc(&p, node->next);
71 bstoreU32_inc(&p, node->prev);
72 bstoreU8_inc (&p, node->kind);
73 bstoreU8_inc (&p, node->height);
74 bstoreU16_inc(&p, node->num_rec);
75 bstoreU16_inc(&p, node->reserved);
76 return p;
77 }
78
79
80 /* read a btree header from the given buffer and swap the bytes.
81 *
82 * return pointer after reading the structure
83 */
84 char* btree_readhead(btree_head* head, char *p)
85 {
86 int i;
87 head->depth = bswabU16_inc(&p);
88 head->root = bswabU32_inc(&p);
89 head->leaf_count = bswabU32_inc(&p);
90 head->leaf_head = bswabU32_inc(&p);
91 head->leaf_tail = bswabU32_inc(&p);
92 head->node_size = bswabU16_inc(&p);
93 head->max_key_len = bswabU16_inc(&p);
94 head->node_count = bswabU32_inc(&p);
95 head->free_nodes = bswabU32_inc(&p);
96 head->reserved1 = bswabU16_inc(&p);
97 head->clump_size = bswabU32_inc(&p);
98 head->btree_type = bswabU8_inc(&p);
99 head->reserved2 = bswabU8_inc(&p);
100 head->attributes = bswabU32_inc(&p);
101 for (i=0; i < 16; i++)
102 head->reserved3[i] = bswabU32_inc(&p);
103 return p;
104 }
105
106 /* read a btree header from the given buffer and swap the bytes.
107 *
108 * return pointer after reading the structure
109 */
110 char* btree_writehead(btree_head* head, char *p)
111 {
112 int i;
113 bstoreU16_inc(&p, head->depth);
114 bstoreU32_inc(&p, head->root);
115 bstoreU32_inc(&p, head->leaf_count);
116 bstoreU32_inc(&p, head->leaf_head);
117 bstoreU32_inc(&p, head->leaf_tail);
118 bstoreU16_inc(&p, head->node_size);
119 bstoreU16_inc(&p, head->max_key_len);
120 bstoreU32_inc(&p, head->node_count);
121 bstoreU32_inc(&p, head->free_nodes);
122 bstoreU16_inc(&p, head->reserved1);
123 bstoreU32_inc(&p, head->clump_size);
124 bstoreU8_inc (&p, head->btree_type);
125 bstoreU8_inc (&p, head->reserved2);
126 bstoreU32_inc(&p, head->attributes);
127 for (i=0; i < 16; i++)
128 bstoreU32_inc(&p, head->reserved3[i]);
129 return p;
130 }
131
132 /* Intialize cache with default cache Size,
133 * must call node_cache_close to deallocate memory */
134 int node_cache_init(node_cache* cache, btree* tree, int size)
135 {
136 int nodebufsize;
137 char * buf;
138
139 cache->size = size;
140 cache->currindex = 0;
141 nodebufsize = tree->head.node_size + sizeof(node_buf);
142 buf = malloc(size *(sizeof(node_entry) + nodebufsize));
143 if (!buf)
144 return -1;
145 cache -> nodebufsize = nodebufsize;
146 cache -> entries = (node_entry*) buf;
147 cache -> buffers = (char*) &cache->entries[size];
148 bzero(cache->entries, size*sizeof(node_entry));
149 return 0;
150 }
151
152 /* Like cache->buffers[i], since size of node_buf is variable */
153 static inline node_buf* node_buf_get(node_cache* cache, int i)
154 {
155 return (node_buf*) (cache->buffers + (i * cache->nodebufsize));
156 }
157
158
159 /* write back a node at given node in fork with nodebuf */
160 static int btree_write_node(btree* bt, int index, char* nodebuf)
161 {
162 UInt16 blkpernode = bt->blkpernode;
163 UInt16 nodeperblk = bt->nodeperblk;
164
165 if (blkpernode)
166 {
167 return volume_writetofork(bt->vol, nodebuf, bt->fork,
168 index * blkpernode, blkpernode, HFSP_EXTENT_DATA, bt->cnid);
169 }
170 else // use nodeperblk, must reread other blocks, too
171 {
172 char buf[bt->vol->blksize];
173
174 UInt16 node_size = bt->head.node_size;
175 UInt16 offset = (index % nodeperblk) * node_size;
176 int block = index / nodeperblk;
177
178 // read block including this one
179 void* p = volume_readfromfork(bt->vol, buf, bt->fork,
180 block, 1, HFSP_EXTENT_DATA, bt->cnid);
181 if (p != buf) // usually NULL
182 return -1; // evil ...
183 p = &nodebuf[offset]; // node is found at that offset
184 memcpy(p, nodebuf, node_size);
185 if (volume_writetofork(bt->vol, buf, bt->fork,
186 block, 1, HFSP_EXTENT_DATA, bt->cnid))
187 return -1; // evil ...
188 }
189 return 0; // all is fine
190 }
191
192 /* flush the node at cache index */
193 static int node_cache_flush_node(btree* bt, int index)
194 {
195 node_entry *e = &bt->cache.entries[index];
196 int result = 0;
197 int node_index = e->index;
198
199 // Only write back valid, dirty nodes
200 if(e->index && (e->flags & NODE_DIRTY))
201 {
202 node_buf* b = node_buf_get(&bt->cache, index);
203 if (!b->index)
204 {
205 hfsp_error = "cache inconsistency in node_cache_flush_node";
206 return -1;
207 }
208 // Write back node header to cached memory
209 btree_writenode(&b->desc, b->node);
210 result = btree_write_node(bt, node_index, b->node);
211 b->index = 0; // invalidate block entry
212 }
213 e->index = 0; // invalidate cache entry
214 return result; // all is fine
215 }
216
217 /* flush the cache */
218 static int node_cache_flush(btree* bt)
219 {
220 int i, size = bt->cache.size;
221 int result = 0;
222
223 for (i=0; i < size; i++)
224 {
225 node_entry* e = &bt->cache.entries[i];
226 if(e->index && (e->flags & NODE_DIRTY))
227 if (node_cache_flush_node(bt, i))
228 result = -1;
229 }
230 return result;
231 }
232
233 static int node_cache_close(btree* bt)
234 {
235 int result = 0;
236 if (!bt->cache.entries) // not (fully) intialized ?
237 return result;
238 result = node_cache_flush(bt);
239 free(bt->cache.entries);
240 return result;
241 }
242
243 /* Load the cach node indentified by index with
244 * the node identified by node_index.
245 *
246 * Set the inital flags as given.
247 */
248
249 static node_buf* node_cache_load_buf
250 (btree* bt, node_cache* cache, int index, UInt16 node_index, int flags)
251 {
252 node_buf *result = node_buf_get(cache ,index);
253 UInt16 blkpernode = bt->blkpernode;
254 UInt16 nodeperblk = bt->nodeperblk;
255 node_entry *e = &cache->entries[index];
256 UInt32 block;
257 void *p;
258
259 if (blkpernode)
260 {
261 block = node_index * blkpernode;
262 p = volume_readfromfork(bt->vol, result->node, bt->fork,
263 block, blkpernode, HFSP_EXTENT_DATA, bt->cnid);
264 if (!p)
265 return NULL; // evil ...
266
267 btree_readnode(&result->desc, p);
268 }
269 else // use nodeperblk
270 {
271 char buf[bt->vol->blksize];
272
273 UInt16 node_size = bt->head.node_size;
274 UInt16 offset = node_index % nodeperblk * node_size;
275 block = node_index / nodeperblk;
276 p = volume_readfromfork(bt->vol, buf, bt->fork,
277 block, 1, HFSP_EXTENT_DATA, bt->cnid);
278 if (p != buf) // usually NULL
279 return NULL; // evil ...
280 p = &buf[offset]; // node is found at that offset
281 memcpy(&result->node, p , node_size);
282 btree_readnode(&result->desc, p);
283
284 }
285
286 result->index = node_index;
287
288 e -> priority = result->desc.height * DEPTH_FACTOR;
289 e -> index = node_index;
290 e -> flags = flags;
291 return result;
292 }
293
294 /* Mark node at given index dirty in cache.
295 */
296 inline static void btree_dirty_node(btree* bt, UInt16 index)
297 {
298 node_cache* cache = &bt->cache;
299 node_entry *e = &cache->entries[index];
300 e->flags |= NODE_DIRTY;
301 }
302
303 /* Read node at given index, using cache.
304 *
305 * Make node with given flag, usually NODE_CLEAN/NODE_DIRTY
306 */
307 node_buf* btree_node_by_index(btree* bt, UInt16 index, int flags)
308 {
309 node_cache* cache = &bt->cache;
310 int oldindex, lruindex;
311 int currindex = cache->currindex;
312 UInt32 prio;
313 node_entry *e;
314
315 // Shortcut acces to current node, will not change priorities
316 if (cache->entries[currindex].index == index)
317 {
318 cache->entries[currindex].flags |= flags;
319 return node_buf_get(cache ,currindex);
320 }
321 oldindex = currindex;
322 if (currindex == 0)
323 currindex = cache->size;
324 currindex--;
325 lruindex = oldindex; // entry to be flushed when needed
326 prio = 0; // current priority
327 while (currindex != oldindex) // round robin
328 {
329 e = &cache->entries[currindex];
330 if (e->index == index) // got it
331 {
332 if (e->priority != 0) // already top, uuh
333 e->priority--;
334 cache->currindex = currindex;
335 e -> flags |= flags;
336 return node_buf_get(cache ,currindex);
337 }
338 else
339 {
340 if (!e->index) // free entry, well
341 {
342 lruindex = currindex;
343 prio = UINT_MAX; // remember empty entry
344 }
345 if (e->priority != UINT_MAX) // already least, uuh
346 e->priority++;
347 }
348 if (prio < e->priority)
349 {
350 lruindex = currindex;
351 prio = e->priority;
352 }
353 if (currindex == 0)
354 currindex = cache->size;
355 currindex--;
356 }
357 e = &cache->entries[lruindex];
358 cache->currindex = lruindex;
359 if (e->flags & NODE_DIRTY)
360 node_cache_flush_node(bt, lruindex);
361 return node_cache_load_buf (bt, cache, lruindex, index, flags);
362 }
363
364 /** intialize the btree with the first entry in the fork */
365 static int btree_init(btree* bt, volume* vol, hfsp_fork_raw* fork)
366 {
367 char *p;
368 char buf[vol->blksize];
369 UInt16 node_size;
370 btree_node_desc* node = &bt->head_node;
371 int alloc_size;
372
373 bt->vol = vol;
374 bt->fork = fork;
375 p = volume_readfromfork(vol, buf, fork, 0, 1,
376 HFSP_EXTENT_DATA, bt->cnid);
377 if (!p)
378 return -1;
379 p = btree_readnode(node, p);
380 if (node->kind != HFSP_NODE_HEAD)
381 return -1; // should not happen ?
382 p = btree_readhead(&bt->head, p);
383 node_size = bt->head.node_size;
384
385 bt->blkpernode = node_size / vol->blksize;
386
387 if (bt->blkpernode == 0) // maybe the other way round ?
388 bt->nodeperblk = vol->blksize / node_size;
389
390 alloc_size = node_size - HEADER_RESERVEDOFFSET; // sizeof(node_desc) + sizeof(header)
391 /* Sometimes the node_size is bigger than the volume-blocksize
392 * so here I reread the node when needed */
393 { // need new block for allocation
394 char nodebuf[node_size];
395 if (bt->blkpernode > 1)
396 {
397 p = volume_readfromfork(vol, nodebuf, fork, 0, bt->blkpernode,
398 HFSP_EXTENT_DATA, bt->cnid);
399 p += HEADER_RESERVEDOFFSET; // skip header
400 }
401
402 bt->alloc_bits = malloc(alloc_size);
403 if (!bt->alloc_bits)
404 return ENOMEM;
405 memcpy(bt->alloc_bits, p, alloc_size);
406 }
407
408 /*** for debugging ***/
409 bt->attributes = 0;
410
411 if (node_cache_init(&bt->cache, bt, bt->head.depth + EXTRA_CACHESIZE))
412 return -1;
413
414 return 0;
415 }
416
417 /** Intialize catalog btree, so that btree_close can safely be called. */
418 void btree_reset(btree* bt)
419 {
420 bt->alloc_bits = NULL;
421 bt->cache.entries = NULL;
422 }
423
424 /** Intialize catalog btree */
425 int btree_init_cat(btree* bt, volume* vol, hfsp_fork_raw* fork)
426 {
427 int result = btree_init(bt,vol,fork); // super (...)
428 bt->cnid = HFSP_CAT_CNID;
429 bt->kcomp = record_key_compare;
430 bt->kread = record_readkey;
431 bt->rread = record_readentry;
432 bt->max_rec_size = sizeof(hfsp_cat_entry);
433 // this is a bit too large due to alignment/padding
434 return result;
435 }
436
437 /** Intialize catalog btree */
438 int btree_init_extent(btree* bt, volume* vol, hfsp_fork_raw* fork)
439 {
440 int result = btree_init(bt,vol,fork); // super (...)
441 bt->cnid = HFSP_EXT_CNID;
442 bt->kcomp = record_extent_key_compare;
443 bt->kread = record_extent_readkey;
444 bt->rread = record_extent_readrecord;
445 bt->max_rec_size = sizeof(hfsp_extent);
446 return result;
447 }
448
449 /** close the btree and free any resources */
450 int btree_close(btree* bt)
451 {
452 int result = 0;
453
454 node_cache_close(bt);
455 // a dirty header without alloc_bits must not happen, mmh
456 if ((bt->attributes & BTREE_HEADDIRTY) && bt->alloc_bits)
457 {
458 btree_head* head = &bt->head;
459 btree_node_desc* node = &bt->head_node;
460 UInt16 node_size = head->node_size;
461 UInt16 alloc_size = node_size - HEADER_RESERVEDOFFSET;
462 char buf[node_size];
463 void *p;
464
465 p = btree_writenode(node, buf);
466 p = btree_writehead(head, p);
467 memcpy(p, bt->alloc_bits, alloc_size);
468 result = btree_write_node(bt, 0, buf);
469 }
470 if (bt->alloc_bits)
471 free(bt->alloc_bits);
472 return result;
473 }
474
475 /* returns pointer to key given by index in current node.
476 *
477 * Assumes that current node is not NODE_HEAD ...
478 * index may be == num_rec in whcih case a pointer
479 * to the first free byte is returned ...
480 */
481 void* btree_key_by_index(btree* bt, node_buf* buf, UInt16 index)
482 {
483 UInt16 node_size, off_pos;
484 btree_record_offset offset;
485
486 if (index > buf->desc.num_rec) // oops out of range
487 {
488 hfsp_error = "btree_key_by_index: index out of range";
489 return NULL;
490 }
491 node_size = bt->head.node_size;
492 // The offsets are found at the end of the node ...
493 off_pos = node_size - (index +1) * sizeof(btree_record_offset);
494 // position of offset at end of node
495 if (off_pos >= node_size) // oops out of range
496 {
497 hfsp_error = "btree_key_by_index: off_pos out of range";
498 return NULL;
499 }
500 offset = bswabU16(*((btree_record_offset*) (buf->node + off_pos)));
501 if (offset >= node_size) // oops out of range
502 {
503 hfsp_error = "btree_key_by_index: offset out of range";
504 return NULL;
505 }
506
507 // now we have the offset and can read the key ...
508 return buf->node + offset;
509 }
510
511 /** return allocation status of node given by index in btree */
512
513 int btree_check_nodealloc(btree* bt, UInt16 node)
514 {
515 btree_head* head = &bt->head;
516 btree_node_desc* desc = &bt->head_node;
517 UInt16 node_size = head->node_size;
518 UInt16 node_num = desc->next;
519 UInt32 node_count = head->node_count;
520 char* alloc_bits = bt->alloc_bits + 128; // skip reserved node
521 int bit = node & 0x07;
522 int alloc_size = node_size - 256;
523 // sizeof(node_desc) + sizeof(header) + 128 - 8
524 node_buf* nbuf = NULL;
525
526 if (node >= node_count)
527 HFSP_ERROR(-1, "Node index out of range.");
528 node >>= 3;
529 // First must check bits in saved allocation bits from node header
530 if (node < alloc_size)
531 return (alloc_bits[node] & (0x80 >> bit)); /* Bit one is 0x80 ! */
532 // Now load matching node by iterating over MAP-Nodes
533 node -= alloc_size;
534 while (node_num && node >= node_size)
535 {
536 nbuf = btree_node_by_index(bt, node_num, NODE_CLEAN);
537 if (!nbuf)
538 HFSP_ERROR(-1, "Unable to fetch map node");
539 desc = &nbuf->desc;
540 if (desc->kind != HFSP_NODE_MAP)
541 HFSP_ERROR(-1, "Invalid chain of map nodes");
542 node -= node_size;
543 node = desc->next;
544 }
545 if (!nbuf)
546 HFSP_ERROR(-1, "Oops this code is wrong"); // Should not happen, oops
547 return (nbuf->node[node] & (0x80 >> bit)); /* Bit one is 0x80 ! */
548 fail:
549 return -1;
550 }
551
552 /* Remove the key and an (eventual) record from the btree.
553 * Warning, you must WRITELOCK the btree before calling this
554 */
555 int btree_remove_record(btree* bt, UInt16 node_index, UInt16 keyind)
556 {
557 node_buf* node = btree_node_by_index(bt, node_index, NODE_DIRTY);
558 btree_node_desc* desc = &node->desc;
559 int num_rec = desc->num_rec;
560 void* curr = btree_key_by_index(bt, node, keyind);
561
562 if (keyind != num_rec) // Last record needs no special action
563 {
564 void *next = btree_key_by_index(bt, node, keyind+1);
565 void *last = btree_key_by_index(bt, node, num_rec);
566 // difference needed to update the backpointers
567 int diff = ((char*) next) - ((char*) curr);
568 // hfsp_key* key = (hfsp_key*) curr;
569 // UInt16 help = key->key_length;
570 // size of the block to move "down"
571 int size = ((char*) last) - ((char*) next);
572 UInt16 node_size = bt->head.node_size;
573 int i,n, off_pos;
574 btree_record_offset* offsets;
575 btree_record_offset old;
576
577 memmove(curr, next, size);
578
579 // Now care about the backpointers,
580 off_pos = node_size - (num_rec + 1) * sizeof(btree_record_offset);
581 offsets = (btree_record_offset*) (node->node + off_pos);
582
583 // fprintf(stderr,
584 // "moving backpointers in node %d by %4x (%d)\n",
585 // node_index, diff, help);
586
587 // The backpointer at keyind is already correct
588 n = num_rec - keyind;
589 old = 0xffff;
590 // will override former index to free area, thats ok
591 for (i=0; i <= n; i++)
592 {
593 btree_record_offset off = offsets[i];
594 // fprintf(stderr, "moving backpointers %4x, %4x\n", off, old);
595 offsets[i] = old;
596
597 old = off - diff; // adjust the backpointer
598 }
599 }
600 desc->num_rec = num_rec - 1;
601
602 if (desc->kind == HFSP_NODE_LEAF)
603 {
604 bt->head.leaf_count --;
605 bt->attributes |= BTREE_HEADDIRTY;
606 }
607
608 return 0;
609 }
610
611 /* insert a key and an (eventual) record into the btree.
612 * Warning, you must WRITELOCK the btree before calling this.
613 * keyind may well be == num_rec indicating an append.
614 *
615 * node_index number of the node in the tree.
616 * keyind the index where the key/record should be insert in the node
617 * key the key (+ record) to append
618 * len the lenght of the key or key + record
619 */
620 int btree_insert_record(btree* bt, UInt16 node_index, UInt16 keyind,
621 void* key, int len)
622 {
623 node_buf* node = btree_node_by_index(bt, node_index, NODE_DIRTY);
624 btree_node_desc* desc = &node->desc;
625 int num_rec = desc->num_rec;
626 UInt16 node_size= bt->head.node_size;
627 void *curr,*last; // Pointer for calculations
628 int size, total;
629 int i,n,off_pos;
630 btree_record_offset* offsets;
631
632 curr = btree_key_by_index(bt, node, keyind);
633 last = btree_key_by_index(bt, node, num_rec);
634 // size of the block to move "up"
635 size = ((char*) last) - ((char*) curr);
636 total = ((char*) last) - node->node;
637
638 // account for backpointers, too
639 if ((total + len) > (node_size - num_rec * sizeof(btree_record_offset)))
640 return -1; // need to split/repartition node first, NYI
641
642 memmove(curr + len, curr, size);
643 // printf("Moving to [%p %p]\n", curr+len, curr+len+size);
644 memcpy (curr , key , len); // Copy the key / record
645
646 num_rec ++;
647 // Now care about the backpointers,
648 off_pos = node_size - (num_rec + 1) * sizeof(btree_record_offset);
649 offsets = (btree_record_offset*) (node->node + off_pos);
650
651 // Recalculate backpointers
652 n = num_rec - keyind;
653 // printf("Copying to [%p %p]\n", offsets, &offsets[n]);
654 for (i=0; i < n; i++)
655 offsets[i] = offsets[i+1] + len;
656 desc->num_rec = num_rec; // Adjust node descriptor
657
658 if (desc->kind == HFSP_NODE_LEAF)
659 {
660 bt->head.leaf_count ++;
661 bt->attributes |= BTREE_HEADDIRTY;
662 }
663
664 return 0;
665 }

  ViewVC Help
Powered by ViewVC 1.1.26