Lines Matching refs:node

9  * Handle basic btree node operations
21 /* Copy a specified range of bytes from the raw data of a node */
22 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
27 off += node->page_offset;
28 pagep = node->page + (off >> PAGE_SHIFT);
43 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
47 hfs_bnode_read(node, &data, off, 2);
51 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
55 hfs_bnode_read(node, &data, off, 1);
59 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
64 tree = node->tree;
65 if (node->type == HFS_NODE_LEAF ||
67 node->tree->cnid == HFSPLUS_ATTR_CNID)
68 key_len = hfs_bnode_read_u16(node, off) + 2;
72 hfs_bnode_read(node, key, off, key_len);
75 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
80 off += node->page_offset;
81 pagep = node->page + (off >> PAGE_SHIFT);
98 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
102 hfs_bnode_write(node, &v, off, 2);
105 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
110 off += node->page_offset;
111 pagep = node->page + (off >> PAGE_SHIFT);
185 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
193 src += node->page_offset;
194 dst += node->page_offset;
197 src_page = node->page + (src >> PAGE_SHIFT);
200 dst_page = node->page + (dst >> PAGE_SHIFT);
247 src_page = node->page + (src >> PAGE_SHIFT);
249 dst_page = node->page + (dst >> PAGE_SHIFT);
298 void hfs_bnode_dump(struct hfs_bnode *node)
304 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
305 hfs_bnode_read(node, &desc, 0, sizeof(desc));
310 off = node->tree->node_size - 2;
312 key_off = hfs_bnode_read_u16(node, off);
314 if (i && node->type == HFS_NODE_INDEX) {
317 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
318 node->tree->cnid == HFSPLUS_ATTR_CNID)
319 tmp = hfs_bnode_read_u16(node, key_off) + 2;
321 tmp = node->tree->max_key_len + 2;
323 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
325 } else if (i && node->type == HFS_NODE_LEAF) {
328 tmp = hfs_bnode_read_u16(node, key_off);
335 void hfs_bnode_unlink(struct hfs_bnode *node)
341 tree = node->tree;
342 if (node->prev) {
343 tmp = hfs_bnode_find(tree, node->prev);
346 tmp->next = node->next;
351 } else if (node->type == HFS_NODE_LEAF)
352 tree->leaf_head = node->next;
354 if (node->next) {
355 tmp = hfs_bnode_find(tree, node->next);
358 tmp->prev = node->prev;
363 } else if (node->type == HFS_NODE_LEAF)
364 tree->leaf_tail = node->prev;
367 if (!node->prev && !node->next)
369 if (!node->parent) {
373 set_bit(HFS_BNODE_DELETED, &node->flags);
385 struct hfs_bnode *node;
388 pr_err("request for non-existent node %d in B*Tree\n",
393 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
394 node; node = node->next_hash)
395 if (node->this == cnid)
396 return node;
402 struct hfs_bnode *node, *node2;
409 pr_err("request for non-existent node %d in B*Tree\n",
416 node = kzalloc(size, GFP_KERNEL);
417 if (!node)
419 node->tree = tree;
420 node->this = cnid;
421 set_bit(HFS_BNODE_NEW, &node->flags);
422 atomic_set(&node->refcnt, 1);
424 node->tree->cnid, node->this);
425 init_waitqueue_head(&node->lock_wq);
430 node->next_hash = tree->node_hash[hash];
431 tree->node_hash[hash] = node;
435 kfree(node);
445 node->page_offset = off & ~PAGE_MASK;
454 node->page[i] = page;
457 return node;
459 set_bit(HFS_BNODE_ERROR, &node->flags);
460 return node;
463 void hfs_bnode_unhash(struct hfs_bnode *node)
468 node->tree->cnid, node->this, atomic_read(&node->refcnt));
469 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
470 *p && *p != node; p = &(*p)->next_hash)
473 *p = node->next_hash;
474 node->tree->node_hash_cnt--;
477 /* Load a particular node out of a tree */
480 struct hfs_bnode *node;
486 node = hfs_bnode_findhash(tree, num);
487 if (node) {
488 hfs_bnode_get(node);
490 wait_event(node->lock_wq,
491 !test_bit(HFS_BNODE_NEW, &node->flags));
492 if (test_bit(HFS_BNODE_ERROR, &node->flags))
494 return node;
497 node = __hfs_bnode_create(tree, num);
498 if (!node)
500 if (test_bit(HFS_BNODE_ERROR, &node->flags))
502 if (!test_bit(HFS_BNODE_NEW, &node->flags))
503 return node;
505 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
506 node->page_offset);
507 node->prev = be32_to_cpu(desc->prev);
508 node->next = be32_to_cpu(desc->next);
509 node->num_recs = be16_to_cpu(desc->num_recs);
510 node->type = desc->type;
511 node->height = desc->height;
512 kunmap(node->page[0]);
514 switch (node->type) {
517 if (node->height != 0)
521 if (node->height != 1)
525 if (node->height <= 1 || node->height > tree->depth)
533 off = hfs_bnode_read_u16(node, rec_off);
536 for (i = 1; i <= node->num_recs; off = next_off, i++) {
538 next_off = hfs_bnode_read_u16(node, rec_off);
544 if (node->type != HFS_NODE_INDEX &&
545 node->type != HFS_NODE_LEAF)
547 key_size = hfs_bnode_read_u16(node, off) + 2;
551 clear_bit(HFS_BNODE_NEW, &node->flags);
552 wake_up(&node->lock_wq);
553 return node;
556 set_bit(HFS_BNODE_ERROR, &node->flags);
557 clear_bit(HFS_BNODE_NEW, &node->flags);
558 wake_up(&node->lock_wq);
559 hfs_bnode_put(node);
563 void hfs_bnode_free(struct hfs_bnode *node)
567 for (i = 0; i < node->tree->pages_per_bnode; i++)
568 if (node->page[i])
569 put_page(node->page[i]);
570 kfree(node);
575 struct hfs_bnode *node;
580 node = hfs_bnode_findhash(tree, num);
582 if (node) {
583 pr_crit("new node %u already hashed?\n", num);
585 return node;
587 node = __hfs_bnode_create(tree, num);
588 if (!node)
590 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
591 hfs_bnode_put(node);
595 pagep = node->page;
596 memset(kmap(*pagep) + node->page_offset, 0,
605 clear_bit(HFS_BNODE_NEW, &node->flags);
606 wake_up(&node->lock_wq);
608 return node;
611 void hfs_bnode_get(struct hfs_bnode *node)
613 if (node) {
614 atomic_inc(&node->refcnt);
616 node->tree->cnid, node->this,
617 atomic_read(&node->refcnt));
621 /* Dispose of resources used by a node */
622 void hfs_bnode_put(struct hfs_bnode *node)
624 if (node) {
625 struct hfs_btree *tree = node->tree;
629 node->tree->cnid, node->this,
630 atomic_read(&node->refcnt));
631 BUG_ON(!atomic_read(&node->refcnt));
632 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
635 if (!node->page[i])
637 mark_page_accessed(node->page[i]);
640 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
641 hfs_bnode_unhash(node);
644 hfs_bnode_clear(node, 0, tree->node_size);
645 hfs_bmap_free(node);
646 hfs_bnode_free(node);