Lines Matching refs:node

9  * Handle basic btree node operations
18 void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
25 off += node->page_offset;
30 if (pagenum >= node->tree->pages_per_bnode)
32 page = node->page[pagenum];
42 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
46 hfs_bnode_read(node, &data, off, 2);
50 u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
54 hfs_bnode_read(node, &data, off, 1);
58 void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
63 tree = node->tree;
64 if (node->type == HFS_NODE_LEAF ||
66 key_len = hfs_bnode_read_u8(node, off) + 1;
70 hfs_bnode_read(node, key, off, key_len);
73 void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
77 off += node->page_offset;
78 page = node->page[0];
84 void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
88 hfs_bnode_write(node, &v, off, 2);
91 void hfs_bnode_write_u8(struct hfs_bnode *node, int off, u8 data)
94 hfs_bnode_write(node, &data, off, 1);
97 void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
101 off += node->page_offset;
102 page = node->page[0];
125 void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
133 src += node->page_offset;
134 dst += node->page_offset;
135 page = node->page[0];
142 void hfs_bnode_dump(struct hfs_bnode *node)
148 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
149 hfs_bnode_read(node, &desc, 0, sizeof(desc));
154 off = node->tree->node_size - 2;
156 key_off = hfs_bnode_read_u16(node, off);
158 if (i && node->type == HFS_NODE_INDEX) {
161 if (node->tree->attributes & HFS_TREE_VARIDXKEYS)
162 tmp = (hfs_bnode_read_u8(node, key_off) | 1) + 1;
164 tmp = node->tree->max_key_len + 1;
166 tmp, hfs_bnode_read_u8(node, key_off));
167 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
169 } else if (i && node->type == HFS_NODE_LEAF) {
172 tmp = hfs_bnode_read_u8(node, key_off);
179 void hfs_bnode_unlink(struct hfs_bnode *node)
185 tree = node->tree;
186 if (node->prev) {
187 tmp = hfs_bnode_find(tree, node->prev);
190 tmp->next = node->next;
194 } else if (node->type == HFS_NODE_LEAF)
195 tree->leaf_head = node->next;
197 if (node->next) {
198 tmp = hfs_bnode_find(tree, node->next);
201 tmp->prev = node->prev;
205 } else if (node->type == HFS_NODE_LEAF)
206 tree->leaf_tail = node->prev;
209 if (!node->prev && !node->next) {
212 if (!node->parent) {
216 set_bit(HFS_BNODE_DELETED, &node->flags);
228 struct hfs_bnode *node;
231 pr_err("request for non-existent node %d in B*Tree\n", cnid);
235 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
236 node; node = node->next_hash) {
237 if (node->this == cnid) {
238 return node;
246 struct hfs_bnode *node, *node2;
253 pr_err("request for non-existent node %d in B*Tree\n", cnid);
259 node = kzalloc(size, GFP_KERNEL);
260 if (!node)
262 node->tree = tree;
263 node->this = cnid;
264 set_bit(HFS_BNODE_NEW, &node->flags);
265 atomic_set(&node->refcnt, 1);
267 node->tree->cnid, node->this);
268 init_waitqueue_head(&node->lock_wq);
273 node->next_hash = tree->node_hash[hash];
274 tree->node_hash[hash] = node;
279 kfree(node);
288 node->page_offset = off & ~PAGE_MASK;
293 node->page[i] = page;
296 return node;
298 set_bit(HFS_BNODE_ERROR, &node->flags);
299 return node;
302 void hfs_bnode_unhash(struct hfs_bnode *node)
307 node->tree->cnid, node->this, atomic_read(&node->refcnt));
308 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
309 *p && *p != node; p = &(*p)->next_hash)
312 *p = node->next_hash;
313 node->tree->node_hash_cnt--;
316 /* Load a particular node out of a tree */
319 struct hfs_bnode *node;
325 node = hfs_bnode_findhash(tree, num);
326 if (node) {
327 hfs_bnode_get(node);
329 wait_event(node->lock_wq, !test_bit(HFS_BNODE_NEW, &node->flags));
330 if (test_bit(HFS_BNODE_ERROR, &node->flags))
332 return node;
335 node = __hfs_bnode_create(tree, num);
336 if (!node)
338 if (test_bit(HFS_BNODE_ERROR, &node->flags))
340 if (!test_bit(HFS_BNODE_NEW, &node->flags))
341 return node;
343 desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
344 node->page_offset);
345 node->prev = be32_to_cpu(desc->prev);
346 node->next = be32_to_cpu(desc->next);
347 node->num_recs = be16_to_cpu(desc->num_recs);
348 node->type = desc->type;
349 node->height = desc->height;
352 switch (node->type) {
355 if (node->height != 0)
359 if (node->height != 1)
363 if (node->height <= 1 || node->height > tree->depth)
371 off = hfs_bnode_read_u16(node, rec_off);
374 for (i = 1; i <= node->num_recs; off = next_off, i++) {
376 next_off = hfs_bnode_read_u16(node, rec_off);
382 if (node->type != HFS_NODE_INDEX &&
383 node->type != HFS_NODE_LEAF)
385 key_size = hfs_bnode_read_u8(node, off) + 1;
389 clear_bit(HFS_BNODE_NEW, &node->flags);
390 wake_up(&node->lock_wq);
391 return node;
394 set_bit(HFS_BNODE_ERROR, &node->flags);
395 clear_bit(HFS_BNODE_NEW, &node->flags);
396 wake_up(&node->lock_wq);
397 hfs_bnode_put(node);
401 void hfs_bnode_free(struct hfs_bnode *node)
405 for (i = 0; i < node->tree->pages_per_bnode; i++)
406 if (node->page[i])
407 put_page(node->page[i]);
408 kfree(node);
413 struct hfs_bnode *node;
418 node = hfs_bnode_findhash(tree, num);
420 if (node) {
421 pr_crit("new node %u already hashed?\n", num);
423 return node;
425 node = __hfs_bnode_create(tree, num);
426 if (!node)
428 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
429 hfs_bnode_put(node);
433 pagep = node->page;
434 memzero_page(*pagep, node->page_offset,
441 clear_bit(HFS_BNODE_NEW, &node->flags);
442 wake_up(&node->lock_wq);
444 return node;
447 void hfs_bnode_get(struct hfs_bnode *node)
449 if (node) {
450 atomic_inc(&node->refcnt);
452 node->tree->cnid, node->this,
453 atomic_read(&node->refcnt));
457 /* Dispose of resources used by a node */
458 void hfs_bnode_put(struct hfs_bnode *node)
460 if (node) {
461 struct hfs_btree *tree = node->tree;
465 node->tree->cnid, node->this,
466 atomic_read(&node->refcnt));
467 BUG_ON(!atomic_read(&node->refcnt));
468 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
471 if (!node->page[i])
473 mark_page_accessed(node->page[i]);
476 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
477 hfs_bnode_unhash(node);
479 hfs_bmap_free(node);
480 hfs_bnode_free(node);