1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * idr-test.c: Test the IDR API 4 * Copyright (c) 2016 Matthew Wilcox <willy@infradead.org> 5 */ 6#include <linux/bitmap.h> 7#include <linux/idr.h> 8#include <linux/slab.h> 9#include <linux/kernel.h> 10#include <linux/errno.h> 11 12#include "test.h" 13 14#define DUMMY_PTR ((void *)0x10) 15 16int item_idr_free(int id, void *p, void *data) 17{ 18 struct item *item = p; 19 assert(item->index == id); 20 free(p); 21 22 return 0; 23} 24 25void item_idr_remove(struct idr *idr, int id) 26{ 27 struct item *item = idr_find(idr, id); 28 assert(item->index == id); 29 idr_remove(idr, id); 30 free(item); 31} 32 33void idr_alloc_test(void) 34{ 35 unsigned long i; 36 DEFINE_IDR(idr); 37 38 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0, 0x4000, GFP_KERNEL) == 0); 39 assert(idr_alloc_cyclic(&idr, DUMMY_PTR, 0x3ffd, 0x4000, GFP_KERNEL) == 0x3ffd); 40 idr_remove(&idr, 0x3ffd); 41 idr_remove(&idr, 0); 42 43 for (i = 0x3ffe; i < 0x4003; i++) { 44 int id; 45 struct item *item; 46 47 if (i < 0x4000) 48 item = item_create(i, 0); 49 else 50 item = item_create(i - 0x3fff, 0); 51 52 id = idr_alloc_cyclic(&idr, item, 1, 0x4000, GFP_KERNEL); 53 assert(id == item->index); 54 } 55 56 idr_for_each(&idr, item_idr_free, &idr); 57 idr_destroy(&idr); 58} 59 60void idr_replace_test(void) 61{ 62 DEFINE_IDR(idr); 63 64 idr_alloc(&idr, (void *)-1, 10, 11, GFP_KERNEL); 65 idr_replace(&idr, &idr, 10); 66 67 idr_destroy(&idr); 68} 69 70/* 71 * Unlike the radix tree, you can put a NULL pointer -- with care -- into 72 * the IDR. Some interfaces, like idr_find() do not distinguish between 73 * "present, value is NULL" and "not present", but that's exactly what some 74 * users want. 75 */ 76void idr_null_test(void) 77{ 78 int i; 79 DEFINE_IDR(idr); 80 81 assert(idr_is_empty(&idr)); 82 83 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 84 assert(!idr_is_empty(&idr)); 85 idr_remove(&idr, 0); 86 assert(idr_is_empty(&idr)); 87 88 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 89 assert(!idr_is_empty(&idr)); 90 idr_destroy(&idr); 91 assert(idr_is_empty(&idr)); 92 93 for (i = 0; i < 10; i++) { 94 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == i); 95 } 96 97 assert(idr_replace(&idr, DUMMY_PTR, 3) == NULL); 98 assert(idr_replace(&idr, DUMMY_PTR, 4) == NULL); 99 assert(idr_replace(&idr, NULL, 4) == DUMMY_PTR); 100 assert(idr_replace(&idr, DUMMY_PTR, 11) == ERR_PTR(-ENOENT)); 101 idr_remove(&idr, 5); 102 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 5); 103 idr_remove(&idr, 5); 104 105 for (i = 0; i < 9; i++) { 106 idr_remove(&idr, i); 107 assert(!idr_is_empty(&idr)); 108 } 109 idr_remove(&idr, 8); 110 assert(!idr_is_empty(&idr)); 111 idr_remove(&idr, 9); 112 assert(idr_is_empty(&idr)); 113 114 assert(idr_alloc(&idr, NULL, 0, 0, GFP_KERNEL) == 0); 115 assert(idr_replace(&idr, DUMMY_PTR, 3) == ERR_PTR(-ENOENT)); 116 assert(idr_replace(&idr, DUMMY_PTR, 0) == NULL); 117 assert(idr_replace(&idr, NULL, 0) == DUMMY_PTR); 118 119 idr_destroy(&idr); 120 assert(idr_is_empty(&idr)); 121 122 for (i = 1; i < 10; i++) { 123 assert(idr_alloc(&idr, NULL, 1, 0, GFP_KERNEL) == i); 124 } 125 126 idr_destroy(&idr); 127 assert(idr_is_empty(&idr)); 128} 129 130void idr_nowait_test(void) 131{ 132 unsigned int i; 133 DEFINE_IDR(idr); 134 135 idr_preload(GFP_KERNEL); 136 137 for (i = 0; i < 3; i++) { 138 struct item *item = item_create(i, 0); 139 assert(idr_alloc(&idr, item, i, i + 1, GFP_NOWAIT) == i); 140 } 141 142 idr_preload_end(); 143 144 idr_for_each(&idr, item_idr_free, &idr); 145 idr_destroy(&idr); 146} 147 148void idr_get_next_test(int base) 149{ 150 unsigned long i; 151 int nextid; 152 DEFINE_IDR(idr); 153 idr_init_base(&idr, base); 154 155 int indices[] = {4, 7, 9, 15, 65, 128, 1000, 99999, 0}; 156 157 for(i = 0; indices[i]; i++) { 158 struct item *item = item_create(indices[i], 0); 159 assert(idr_alloc(&idr, item, indices[i], indices[i+1], 160 GFP_KERNEL) == indices[i]); 161 } 162 163 for(i = 0, nextid = 0; indices[i]; i++) { 164 idr_get_next(&idr, &nextid); 165 assert(nextid == indices[i]); 166 nextid++; 167 } 168 169 idr_for_each(&idr, item_idr_free, &idr); 170 idr_destroy(&idr); 171} 172 173int idr_u32_cb(int id, void *ptr, void *data) 174{ 175 BUG_ON(id < 0); 176 BUG_ON(ptr != DUMMY_PTR); 177 return 0; 178} 179 180void idr_u32_test1(struct idr *idr, u32 handle) 181{ 182 static bool warned = false; 183 u32 id = handle; 184 int sid = 0; 185 void *ptr; 186 187 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL)); 188 BUG_ON(id != handle); 189 BUG_ON(idr_alloc_u32(idr, DUMMY_PTR, &id, id, GFP_KERNEL) != -ENOSPC); 190 BUG_ON(id != handle); 191 if (!warned && id > INT_MAX) 192 printk("vvv Ignore these warnings\n"); 193 ptr = idr_get_next(idr, &sid); 194 if (id > INT_MAX) { 195 BUG_ON(ptr != NULL); 196 BUG_ON(sid != 0); 197 } else { 198 BUG_ON(ptr != DUMMY_PTR); 199 BUG_ON(sid != id); 200 } 201 idr_for_each(idr, idr_u32_cb, NULL); 202 if (!warned && id > INT_MAX) { 203 printk("^^^ Warnings over\n"); 204 warned = true; 205 } 206 BUG_ON(idr_remove(idr, id) != DUMMY_PTR); 207 BUG_ON(!idr_is_empty(idr)); 208} 209 210void idr_u32_test(int base) 211{ 212 DEFINE_IDR(idr); 213 idr_init_base(&idr, base); 214 idr_u32_test1(&idr, 10); 215 idr_u32_test1(&idr, 0x7fffffff); 216 idr_u32_test1(&idr, 0x80000000); 217 idr_u32_test1(&idr, 0x80000001); 218 idr_u32_test1(&idr, 0xffe00000); 219 idr_u32_test1(&idr, 0xffffffff); 220} 221 222static void idr_align_test(struct idr *idr) 223{ 224 char name[] = "Motorola 68000"; 225 int i, id; 226 void *entry; 227 228 for (i = 0; i < 9; i++) { 229 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i); 230 idr_for_each_entry(idr, entry, id); 231 } 232 idr_destroy(idr); 233 234 for (i = 1; i < 10; i++) { 235 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1); 236 idr_for_each_entry(idr, entry, id); 237 } 238 idr_destroy(idr); 239 240 for (i = 2; i < 11; i++) { 241 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2); 242 idr_for_each_entry(idr, entry, id); 243 } 244 idr_destroy(idr); 245 246 for (i = 3; i < 12; i++) { 247 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3); 248 idr_for_each_entry(idr, entry, id); 249 } 250 idr_destroy(idr); 251 252 for (i = 0; i < 8; i++) { 253 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 254 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1); 255 idr_for_each_entry(idr, entry, id); 256 idr_remove(idr, 1); 257 idr_for_each_entry(idr, entry, id); 258 idr_remove(idr, 0); 259 BUG_ON(!idr_is_empty(idr)); 260 } 261 262 for (i = 0; i < 8; i++) { 263 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0); 264 idr_for_each_entry(idr, entry, id); 265 idr_replace(idr, &name[i], 0); 266 idr_for_each_entry(idr, entry, id); 267 BUG_ON(idr_find(idr, 0) != &name[i]); 268 idr_remove(idr, 0); 269 } 270 271 for (i = 0; i < 8; i++) { 272 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0); 273 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1); 274 idr_remove(idr, 1); 275 idr_for_each_entry(idr, entry, id); 276 idr_replace(idr, &name[i + 1], 0); 277 idr_for_each_entry(idr, entry, id); 278 idr_remove(idr, 0); 279 } 280} 281 282DEFINE_IDR(find_idr); 283 284static void *idr_throbber(void *arg) 285{ 286 time_t start = time(NULL); 287 int id = *(int *)arg; 288 289 rcu_register_thread(); 290 do { 291 idr_alloc(&find_idr, xa_mk_value(id), id, id + 1, GFP_KERNEL); 292 idr_remove(&find_idr, id); 293 } while (time(NULL) < start + 10); 294 rcu_unregister_thread(); 295 296 return NULL; 297} 298 299void idr_find_test_1(int anchor_id, int throbber_id) 300{ 301 pthread_t throbber; 302 time_t start = time(NULL); 303 304 BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id, 305 anchor_id + 1, GFP_KERNEL) != anchor_id); 306 307 pthread_create(&throbber, NULL, idr_throbber, &throbber_id); 308 309 rcu_read_lock(); 310 do { 311 int id = 0; 312 void *entry = idr_get_next(&find_idr, &id); 313 rcu_read_unlock(); 314 BUG_ON(entry != xa_mk_value(id)); 315 rcu_read_lock(); 316 } while (time(NULL) < start + 11); 317 rcu_read_unlock(); 318 319 pthread_join(throbber, NULL); 320 321 idr_remove(&find_idr, anchor_id); 322 BUG_ON(!idr_is_empty(&find_idr)); 323} 324 325void idr_find_test(void) 326{ 327 idr_find_test_1(100000, 0); 328 idr_find_test_1(0, 100000); 329} 330 331void idr_checks(void) 332{ 333 unsigned long i; 334 DEFINE_IDR(idr); 335 336 for (i = 0; i < 10000; i++) { 337 struct item *item = item_create(i, 0); 338 assert(idr_alloc(&idr, item, 0, 20000, GFP_KERNEL) == i); 339 } 340 341 assert(idr_alloc(&idr, DUMMY_PTR, 5, 30, GFP_KERNEL) < 0); 342 343 for (i = 0; i < 5000; i++) 344 item_idr_remove(&idr, i); 345 346 idr_remove(&idr, 3); 347 348 idr_for_each(&idr, item_idr_free, &idr); 349 idr_destroy(&idr); 350 351 assert(idr_is_empty(&idr)); 352 353 idr_remove(&idr, 3); 354 idr_remove(&idr, 0); 355 356 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == 0); 357 idr_remove(&idr, 1); 358 for (i = 1; i < RADIX_TREE_MAP_SIZE; i++) 359 assert(idr_alloc(&idr, DUMMY_PTR, 0, 0, GFP_KERNEL) == i); 360 idr_remove(&idr, 1 << 30); 361 idr_destroy(&idr); 362 363 for (i = INT_MAX - 3UL; i < INT_MAX + 1UL; i++) { 364 struct item *item = item_create(i, 0); 365 assert(idr_alloc(&idr, item, i, i + 10, GFP_KERNEL) == i); 366 } 367 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i, GFP_KERNEL) == -ENOSPC); 368 assert(idr_alloc(&idr, DUMMY_PTR, i - 2, i + 10, GFP_KERNEL) == -ENOSPC); 369 370 idr_for_each(&idr, item_idr_free, &idr); 371 idr_destroy(&idr); 372 idr_destroy(&idr); 373 374 assert(idr_is_empty(&idr)); 375 376 idr_set_cursor(&idr, INT_MAX - 3UL); 377 for (i = INT_MAX - 3UL; i < INT_MAX + 3UL; i++) { 378 struct item *item; 379 unsigned int id; 380 if (i <= INT_MAX) 381 item = item_create(i, 0); 382 else 383 item = item_create(i - INT_MAX - 1, 0); 384 385 id = idr_alloc_cyclic(&idr, item, 0, 0, GFP_KERNEL); 386 assert(id == item->index); 387 } 388 389 idr_for_each(&idr, item_idr_free, &idr); 390 idr_destroy(&idr); 391 assert(idr_is_empty(&idr)); 392 393 for (i = 1; i < 10000; i++) { 394 struct item *item = item_create(i, 0); 395 assert(idr_alloc(&idr, item, 1, 20000, GFP_KERNEL) == i); 396 } 397 398 idr_for_each(&idr, item_idr_free, &idr); 399 idr_destroy(&idr); 400 401 idr_replace_test(); 402 idr_alloc_test(); 403 idr_null_test(); 404 idr_nowait_test(); 405 idr_get_next_test(0); 406 idr_get_next_test(1); 407 idr_get_next_test(4); 408 idr_u32_test(4); 409 idr_u32_test(1); 410 idr_u32_test(0); 411 idr_align_test(&idr); 412 idr_find_test(); 413} 414 415#define module_init(x) 416#define module_exit(x) 417#define MODULE_AUTHOR(x) 418#define MODULE_LICENSE(x) 419#define dump_stack() assert(0) 420void ida_dump(struct ida *); 421 422#include "../../../lib/test_ida.c" 423 424/* 425 * Check that we get the correct error when we run out of memory doing 426 * allocations. In userspace, GFP_NOWAIT will always fail an allocation. 427 * The first test is for not having a bitmap available, and the second test 428 * is for not being able to allocate a level of the radix tree. 429 */ 430void ida_check_nomem(void) 431{ 432 DEFINE_IDA(ida); 433 int id; 434 435 id = ida_alloc_min(&ida, 256, GFP_NOWAIT); 436 IDA_BUG_ON(&ida, id != -ENOMEM); 437 id = ida_alloc_min(&ida, 1UL << 30, GFP_NOWAIT); 438 IDA_BUG_ON(&ida, id != -ENOMEM); 439 IDA_BUG_ON(&ida, !ida_is_empty(&ida)); 440} 441 442/* 443 * Check handling of conversions between exceptional entries and full bitmaps. 444 */ 445void ida_check_conv_user(void) 446{ 447 DEFINE_IDA(ida); 448 unsigned long i; 449 450 for (i = 0; i < 1000000; i++) { 451 int id = ida_alloc(&ida, GFP_NOWAIT); 452 if (id == -ENOMEM) { 453 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) != 454 BITS_PER_XA_VALUE) && 455 ((i % IDA_BITMAP_BITS) != 0)); 456 id = ida_alloc(&ida, GFP_KERNEL); 457 } else { 458 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == 459 BITS_PER_XA_VALUE); 460 } 461 IDA_BUG_ON(&ida, id != i); 462 } 463 ida_destroy(&ida); 464} 465 466void ida_check_random(void) 467{ 468 DEFINE_IDA(ida); 469 DECLARE_BITMAP(bitmap, 2048); 470 unsigned int i; 471 time_t s = time(NULL); 472 473 repeat: 474 memset(bitmap, 0, sizeof(bitmap)); 475 for (i = 0; i < 100000; i++) { 476 int i = rand(); 477 int bit = i & 2047; 478 if (test_bit(bit, bitmap)) { 479 __clear_bit(bit, bitmap); 480 ida_free(&ida, bit); 481 } else { 482 __set_bit(bit, bitmap); 483 IDA_BUG_ON(&ida, ida_alloc_min(&ida, bit, GFP_KERNEL) 484 != bit); 485 } 486 } 487 ida_destroy(&ida); 488 if (time(NULL) < s + 10) 489 goto repeat; 490} 491 492void ida_simple_get_remove_test(void) 493{ 494 DEFINE_IDA(ida); 495 unsigned long i; 496 497 for (i = 0; i < 10000; i++) { 498 assert(ida_simple_get(&ida, 0, 20000, GFP_KERNEL) == i); 499 } 500 assert(ida_simple_get(&ida, 5, 30, GFP_KERNEL) < 0); 501 502 for (i = 0; i < 10000; i++) { 503 ida_simple_remove(&ida, i); 504 } 505 assert(ida_is_empty(&ida)); 506 507 ida_destroy(&ida); 508} 509 510void user_ida_checks(void) 511{ 512 radix_tree_cpu_dead(1); 513 514 ida_check_nomem(); 515 ida_check_conv_user(); 516 ida_check_random(); 517 ida_simple_get_remove_test(); 518 519 radix_tree_cpu_dead(1); 520} 521 522static void *ida_random_fn(void *arg) 523{ 524 rcu_register_thread(); 525 ida_check_random(); 526 rcu_unregister_thread(); 527 return NULL; 528} 529 530static void *ida_leak_fn(void *arg) 531{ 532 struct ida *ida = arg; 533 time_t s = time(NULL); 534 int i, ret; 535 536 rcu_register_thread(); 537 538 do for (i = 0; i < 1000; i++) { 539 ret = ida_alloc_range(ida, 128, 128, GFP_KERNEL); 540 if (ret >= 0) 541 ida_free(ida, 128); 542 } while (time(NULL) < s + 2); 543 544 rcu_unregister_thread(); 545 return NULL; 546} 547 548void ida_thread_tests(void) 549{ 550 DEFINE_IDA(ida); 551 pthread_t threads[20]; 552 int i; 553 554 for (i = 0; i < ARRAY_SIZE(threads); i++) 555 if (pthread_create(&threads[i], NULL, ida_random_fn, NULL)) { 556 perror("creating ida thread"); 557 exit(1); 558 } 559 560 while (i--) 561 pthread_join(threads[i], NULL); 562 563 for (i = 0; i < ARRAY_SIZE(threads); i++) 564 if (pthread_create(&threads[i], NULL, ida_leak_fn, &ida)) { 565 perror("creating ida thread"); 566 exit(1); 567 } 568 569 while (i--) 570 pthread_join(threads[i], NULL); 571 assert(ida_is_empty(&ida)); 572} 573 574void ida_tests(void) 575{ 576 user_ida_checks(); 577 ida_checks(); 578 ida_exit(); 579 ida_thread_tests(); 580} 581 582int __weak main(void) 583{ 584 rcu_register_thread(); 585 radix_tree_init(); 586 idr_checks(); 587 ida_tests(); 588 radix_tree_cpu_dead(1); 589 rcu_barrier(); 590 if (nr_allocated) 591 printf("nr_allocated = %d\n", nr_allocated); 592 rcu_unregister_thread(); 593 return 0; 594} 595