Lines Matching defs:num_locks
399 unsigned int num_locks;
406 static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
408 return dm_hash_locks_index(block, num_locks);
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
424 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
430 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
432 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
438 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
440 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
458 lh->no_previous = cache->num_locks;
509 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
538 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
542 bc->num_locks = num_locks;
545 for (i = 0; i < bc->num_locks; i++) {
561 for (i = 0; i < bc->num_locks; i++)
620 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
694 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
860 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
886 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
956 for (i = 0; i < bc->num_locks; i++) {
2445 unsigned int num_locks;
2455 num_locks = dm_num_hash_locks();
2456 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2461 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);