Lines Matching defs:arena

27 static struct device *to_dev(struct arena_info *arena)
29 return &arena->nd_btt->dev;
37 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
40 struct nd_btt *nd_btt = arena->nd_btt;
43 /* arena offsets may be shifted from the base of the device */
48 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
51 struct nd_btt *nd_btt = arena->nd_btt;
54 /* arena offsets may be shifted from the base of the device */
59 static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
68 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
69 "arena->infooff: %#llx is unaligned\n", arena->infooff);
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
71 "arena->info2off: %#llx is unaligned\n", arena->info2off);
73 ret = arena_write_bytes(arena, arena->info2off, super,
78 return arena_write_bytes(arena, arena->infooff, super,
82 static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
84 return arena_read_bytes(arena, arena->infooff, super,
94 static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
97 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
99 if (unlikely(lba >= arena->external_nlba))
100 dev_err_ratelimited(to_dev(arena),
102 __func__, lba, arena->external_nlba);
103 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
106 static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
141 dev_err_ratelimited(to_dev(arena),
147 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
150 static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
156 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
158 if (unlikely(lba >= arena->external_nlba))
159 dev_err_ratelimited(to_dev(arena),
161 __func__, lba, arena->external_nlba);
163 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
206 static int btt_log_group_read(struct arena_info *arena, u32 lane,
209 return arena_read_bytes(arena,
210 arena->logoff + (lane * LOG_GRP_SIZE), log,
226 snprintf(dirname, 32, "arena%d", idx);
258 struct arena_info *arena;
265 list_for_each_entry(arena, &btt->arena_list, list) {
266 arena_debugfs_init(arena, btt->debugfs_dir, i);
327 static int btt_log_read(struct arena_info *arena, u32 lane,
334 ret = btt_log_group_read(arena, lane, &log);
338 old_ent = btt_log_get_old(arena, &log);
340 dev_err(to_dev(arena),
342 old_ent, lane, log.ent[arena->log_index[0]].seq,
343 log.ent[arena->log_index[1]].seq);
351 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
361 static int __btt_log_write(struct arena_info *arena, u32 lane,
365 u32 group_slot = arena->log_index[sub];
370 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
373 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
379 return arena_write_bytes(arena, ns_off, src, log_half, flags);
382 static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
387 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
393 if (++(arena->freelist[lane].seq) == 4)
394 arena->freelist[lane].seq = 1;
396 arena->freelist[lane].has_err = 1;
397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
406 static int btt_map_init(struct arena_info *arena)
412 size_t mapsize = arena->logoff - arena->mapoff;
423 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
424 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
429 dev_WARN_ONCE(to_dev(arena), size < 512,
431 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
450 static int btt_log_init(struct arena_info *arena)
452 size_t logsize = arena->info2off - arena->logoff;
467 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
468 "arena->logoff: %#llx is unaligned\n", arena->logoff);
473 dev_WARN_ONCE(to_dev(arena), size < 512,
475 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
485 for (i = 0; i < arena->nfree; i++) {
487 ent.old_map = cpu_to_le32(arena->external_nlba + i);
488 ent.new_map = cpu_to_le32(arena->external_nlba + i);
490 ret = __btt_log_write(arena, i, 0, &ent, 0);
500 static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
502 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
505 static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
509 if (arena->freelist[lane].has_err) {
511 u32 lba = arena->freelist[lane].block;
512 u64 nsoff = to_namespace_offset(arena, lba);
513 unsigned long len = arena->sector_size;
515 mutex_lock(&arena->err_lock);
520 ret = arena_write_bytes(arena, nsoff, zero_page,
527 arena->freelist[lane].has_err = 0;
529 mutex_unlock(&arena->err_lock);
534 static int btt_freelist_init(struct arena_info *arena)
540 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
542 if (!arena->freelist)
545 for (i = 0; i < arena->nfree; i++) {
546 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
555 arena->freelist[i].sub = 1 - new;
556 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
557 arena->freelist[i].block = log_oldmap;
565 arena->freelist[i].has_err = 1;
566 ret = arena_clear_freelist_error(arena, i);
568 dev_err_ratelimited(to_dev(arena),
577 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
594 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
623 static int log_set_indices(struct arena_info *arena)
631 for (i = 0; i < arena->nfree; i++) {
632 ret = btt_log_group_read(arena, i, &log);
710 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
714 arena->log_index[0] = log_index[0];
715 arena->log_index[1] = log_index[1];
716 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
717 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
721 static int btt_rtt_init(struct arena_info *arena)
723 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
724 if (arena->rtt == NULL)
730 static int btt_maplocks_init(struct arena_info *arena)
734 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
736 if (!arena->map_locks)
739 for (i = 0; i < arena->nfree; i++)
740 spin_lock_init(&arena->map_locks[i].lock);
748 struct arena_info *arena;
752 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
753 if (!arena)
755 arena->nd_btt = btt->nd_btt;
756 arena->sector_size = btt->sector_size;
757 mutex_init(&arena->err_lock);
760 return arena;
762 arena->size = size;
763 arena->external_lba_start = start;
764 arena->external_lbasize = btt->lbasize;
765 arena->internal_lbasize = roundup(arena->external_lbasize,
767 arena->nfree = BTT_DEFAULT_NFREE;
768 arena->version_major = btt->nd_btt->version_major;
769 arena->version_minor = btt->nd_btt->version_minor;
778 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
782 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
783 arena->internal_lbasize + MAP_ENT_SIZE);
784 arena->external_nlba = arena->internal_nlba - arena->nfree;
786 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
790 arena->infooff = arena_off;
791 arena->dataoff = arena->infooff + BTT_PG_SIZE;
792 arena->mapoff = arena->dataoff + datasize;
793 arena->logoff = arena->mapoff + mapsize;
794 arena->info2off = arena->logoff + logsize;
797 arena->log_index[0] = 0;
798 arena->log_index[1] = 1;
799 return arena;
804 struct arena_info *arena, *next;
806 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
807 list_del(&arena->list);
808 kfree(arena->rtt);
809 kfree(arena->map_locks);
810 kfree(arena->freelist);
811 debugfs_remove_recursive(arena->debugfs_dir);
812 kfree(arena);
820 static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
823 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
824 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
825 arena->external_nlba = le32_to_cpu(super->external_nlba);
826 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
827 arena->nfree = le32_to_cpu(super->nfree);
828 arena->version_major = le16_to_cpu(super->version_major);
829 arena->version_minor = le16_to_cpu(super->version_minor);
831 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
833 arena->infooff = arena_off;
834 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
835 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
836 arena->logoff = arena_off + le64_to_cpu(super->logoff);
837 arena->info2off = arena_off + le64_to_cpu(super->info2off);
839 arena->size = (le64_to_cpu(super->nextoff) > 0)
841 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
843 arena->flags = le32_to_cpu(super->flags);
849 struct arena_info *arena;
861 /* Alloc memory for arena */
862 arena = alloc_arena(btt, 0, 0, 0);
863 if (!arena) {
868 arena->infooff = cur_off;
869 ret = btt_info_read(arena, super);
876 dev_info(to_dev(arena), "No existing arenas\n");
879 dev_err(to_dev(arena),
886 arena->external_lba_start = cur_nlba;
887 parse_arena_meta(arena, super, cur_off);
889 ret = log_set_indices(arena);
891 dev_err(to_dev(arena),
896 ret = btt_freelist_init(arena);
900 ret = btt_rtt_init(arena);
904 ret = btt_maplocks_init(arena);
908 list_add_tail(&arena->list, &btt->arena_list);
910 remaining -= arena->size;
911 cur_off += arena->size;
912 cur_nlba += arena->external_nlba;
915 if (arena->nextoff == 0)
926 kfree(arena);
939 struct arena_info *arena;
946 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
947 if (!arena) {
951 btt->nlba += arena->external_nlba;
953 arena->nextoff = arena->size;
955 arena->nextoff = 0;
957 list_add_tail(&arena->list, &btt->arena_list);
964 * This function completes arena initialization by writing
966 * It is only called for an uninitialized arena when a write
967 * to that arena occurs for the first time.
969 static int btt_arena_write_layout(struct arena_info *arena)
974 struct nd_btt *nd_btt = arena->nd_btt;
977 ret = btt_map_init(arena);
981 ret = btt_log_init(arena);
992 super->flags = cpu_to_le32(arena->flags);
993 super->version_major = cpu_to_le16(arena->version_major);
994 super->version_minor = cpu_to_le16(arena->version_minor);
995 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
996 super->external_nlba = cpu_to_le32(arena->external_nlba);
997 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
998 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
999 super->nfree = cpu_to_le32(arena->nfree);
1001 super->nextoff = cpu_to_le64(arena->nextoff);
1003 * Subtract arena->infooff (arena start) so numbers are relative
1004 * to 'this' arena
1006 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1007 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1008 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1009 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1015 ret = btt_info_write(arena, super);
1028 struct arena_info *arena;
1031 list_for_each_entry(arena, &btt->arena_list, list) {
1032 ret = btt_arena_write_layout(arena);
1036 ret = btt_freelist_init(arena);
1040 ret = btt_rtt_init(arena);
1044 ret = btt_maplocks_init(arena);
1062 * This function calculates the arena in which the given LBA lies
1069 struct arena_info **arena)
1076 *arena = arena_list;
1090 static void lock_map(struct arena_info *arena, u32 premap)
1091 __acquires(&arena->map_locks[idx].lock)
1093 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1095 spin_lock(&arena->map_locks[idx].lock);
1098 static void unlock_map(struct arena_info *arena, u32 premap)
1099 __releases(&arena->map_locks[idx].lock)
1101 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1103 spin_unlock(&arena->map_locks[idx].lock);
1106 static int btt_data_read(struct arena_info *arena, struct page *page,
1110 u64 nsoff = to_namespace_offset(arena, lba);
1113 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1119 static int btt_data_write(struct arena_info *arena, u32 lba,
1123 u64 nsoff = to_namespace_offset(arena, lba);
1126 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1142 struct arena_info *arena, u32 postmap, int rw)
1151 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1168 ret = arena_write_bytes(arena, meta_nsoff,
1172 ret = arena_read_bytes(arena, meta_nsoff,
1191 struct arena_info *arena, u32 postmap, int rw)
1203 struct arena_info *arena = NULL;
1211 ret = lba_to_arena(btt, sector, &premap, &arena);
1217 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1241 arena->rtt[lane] = RTT_VALID | postmap;
1248 ret = btt_map_read(arena, premap, &new_map, &new_t,
1262 ret = btt_data_read(arena, page, off, postmap, cur_len);
1265 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1266 dev_warn_ratelimited(to_dev(arena),
1273 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1278 arena->rtt[lane] = RTT_INVALID;
1289 arena->rtt[lane] = RTT_INVALID;
1300 static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1303 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1304 to_namespace_offset(arena, postmap));
1307 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1315 struct arena_info *arena = NULL;
1327 ret = lba_to_arena(btt, sector, &premap, &arena);
1332 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1337 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1338 arena->freelist[lane].has_err = 1;
1340 if (mutex_is_locked(&arena->err_lock)
1341 || arena->freelist[lane].has_err) {
1344 ret = arena_clear_freelist_error(arena, lane);
1352 new_postmap = arena->freelist[lane].block;
1355 for (i = 0; i < arena->nfree; i++)
1356 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1360 if (new_postmap >= arena->internal_nlba) {
1365 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1370 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1376 lock_map(arena, premap);
1377 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1381 if (old_postmap >= arena->internal_nlba) {
1391 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1392 sub = arena->freelist[lane].sub;
1393 ret = btt_flog_write(arena, lane, sub, &log);
1397 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1402 unlock_map(arena, premap);
1406 ret = arena_clear_freelist_error(arena, lane);
1419 unlock_map(arena, premap);