Lines Matching refs:base
208 unsigned long base = round_down(index, 1UL << order);
209 unsigned long next = base + (1UL << order);
219 for (i = base; i < next; i++) {
677 static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
683 /* An empty array should assign %base to the first alloc */
684 xa_alloc_index(xa, base, GFP_KERNEL);
687 xa_erase_index(xa, base);
690 /* And it should assign %base again */
691 xa_alloc_index(xa, base, GFP_KERNEL);
693 /* Allocating and then erasing a lot should not lose base */
694 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
696 for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
698 xa_alloc_index(xa, base, GFP_KERNEL);
703 /* And it should assign %base again */
704 xa_alloc_index(xa, base, GFP_KERNEL);
706 /* The next assigned ID should be base+1 */
707 xa_alloc_index(xa, base + 1, GFP_KERNEL);
708 xa_erase_index(xa, base + 1);
711 xa_store_index(xa, base + 1, GFP_KERNEL);
712 xa_alloc_index(xa, base + 2, GFP_KERNEL);
714 /* If we then erase base, it should be free */
715 xa_erase_index(xa, base);
716 xa_alloc_index(xa, base, GFP_KERNEL);
718 xa_erase_index(xa, base + 1);
719 xa_erase_index(xa, base + 2);
722 xa_alloc_index(xa, base + i, GFP_KERNEL);
752 static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
761 XA_BUG_ON(xa, id != base);
769 XA_BUG_ON(xa, id != base);
774 for (i = base; i < base + 10; i++) {
791 for (i = base; i < base + 9; i++) {
797 XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL);
803 static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
834 if (base != 0)
835 xa_erase_index(xa, base);
836 xa_erase_index(xa, base + 1);
841 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
843 XA_BUG_ON(xa, id != base);
844 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
846 XA_BUG_ON(xa, id != base + 1);
1438 unsigned long base = xas.xa_index;
1448 void *old = xas_store(&xas, xa_mk_index(base + i));
1450 XA_BUG_ON(xa, old != xa_mk_index(base + i));
1461 for (i = base; i < base + (1UL << order); i++)