Lines Matching refs:badrange
19 void badrange_init(struct badrange *badrange)
21 INIT_LIST_HEAD(&badrange->list);
22 spin_lock_init(&badrange->lock);
26 static void append_badrange_entry(struct badrange *badrange,
29 lockdep_assert_held(&badrange->lock);
32 list_add_tail(&bre->list, &badrange->list);
35 static int alloc_and_append_badrange_entry(struct badrange *badrange,
44 append_badrange_entry(badrange, bre, addr, length);
48 static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
52 spin_unlock(&badrange->lock);
54 spin_lock(&badrange->lock);
56 if (list_empty(&badrange->list)) {
59 append_badrange_entry(badrange, bre_new, addr, length);
68 list_for_each_entry(bre, &badrange->list, list)
84 append_badrange_entry(badrange, bre_new, addr, length);
89 int badrange_add(struct badrange *badrange, u64 addr, u64 length)
93 spin_lock(&badrange->lock);
94 rc = add_badrange(badrange, addr, length);
95 spin_unlock(&badrange->lock);
101 void badrange_forget(struct badrange *badrange, phys_addr_t start,
104 struct list_head *badrange_list = &badrange->list;
108 spin_lock(&badrange->lock);
111 * [start, clr_end] is the badrange interval being cleared.
113 * the above interval against. The badrange list entry may need
126 /* Delete completely overlapped badrange entries */
154 alloc_and_append_badrange_entry(badrange, new_start,
161 spin_unlock(&badrange->lock);
179 * @len: number of bytes of badrange to be added
213 static void badblocks_populate(struct badrange *badrange,
218 if (list_empty(&badrange->list))
221 list_for_each_entry(bre, &badrange->list, list) {
243 * Deal with overlap for badrange starting before
264 * The badrange list generated during bus initialization may contain
282 badblocks_populate(&nvdimm_bus->badrange, bb, range);