Lines Matching refs:si
134 static int __try_to_reclaim_swap(struct swap_info_struct *si,
137 swp_entry_t entry = swp_entry(si->type, offset);
178 static int discard_swap(struct swap_info_struct *si)
186 se = first_se(si);
190 err = blkdev_issue_discard(si->bdev, start_block,
201 err = blkdev_issue_discard(si->bdev, start_block,
248 static void discard_swap_cluster(struct swap_info_struct *si,
251 struct swap_extent *se = offset_to_swap_extent(si, start_page);
265 if (blkdev_issue_discard(si->bdev, start_block,
358 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
363 ci = si->cluster_info;
382 struct swap_info_struct *si, unsigned long offset)
387 ci = lock_cluster(si, offset);
390 spin_lock(&si->lock);
395 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
401 spin_unlock(&si->lock);
460 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
465 * si->swap_map directly. To make sure the discarding cluster isn't
469 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
472 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
474 schedule_work(&si->discard_work);
477 static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
479 struct swap_cluster_info *ci = si->cluster_info;
482 cluster_list_add_tail(&si->free_clusters, ci, idx);
487 * will be added to free cluster list. caller should hold si->lock.
489 static void swap_do_scheduled_discard(struct swap_info_struct *si)
494 info = si->cluster_info;
496 while (!cluster_list_empty(&si->discard_clusters)) {
497 idx = cluster_list_del_first(&si->discard_clusters, info);
498 spin_unlock(&si->lock);
500 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
503 spin_lock(&si->lock);
504 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
505 __free_cluster(si, idx);
506 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
514 struct swap_info_struct *si;
516 si = container_of(work, struct swap_info_struct, discard_work);
518 spin_lock(&si->lock);
519 swap_do_scheduled_discard(si);
520 spin_unlock(&si->lock);
525 struct swap_info_struct *si;
527 si = container_of(ref, struct swap_info_struct, users);
528 complete(&si->comp);
531 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
533 struct swap_cluster_info *ci = si->cluster_info;
535 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
536 cluster_list_del_first(&si->free_clusters, ci);
540 static void free_cluster(struct swap_info_struct *si, unsigned long idx)
542 struct swap_cluster_info *ci = si->cluster_info + idx;
550 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
552 swap_cluster_schedule_discard(si, idx);
556 __free_cluster(si, idx);
604 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
611 conflict = !cluster_list_empty(&si->free_clusters) &&
612 offset != cluster_list_first(&si->free_clusters) &&
613 cluster_is_free(&si->cluster_info[offset]);
618 percpu_cluster = this_cpu_ptr(si->percpu_cluster);
627 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
635 cluster = this_cpu_ptr(si->percpu_cluster);
637 if (!cluster_list_empty(&si->free_clusters)) {
638 cluster->index = si->free_clusters.head;
641 } else if (!cluster_list_empty(&si->discard_clusters)) {
645 * reread cluster_next_cpu since we dropped si->lock
647 swap_do_scheduled_discard(si);
648 *scan_base = this_cpu_read(*si->cluster_next_cpu);
660 max = min_t(unsigned long, si->max,
663 ci = lock_cluster(si, tmp);
665 if (!si->swap_map[tmp])
697 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
702 if (offset == si->lowest_bit)
703 si->lowest_bit += nr_entries;
704 if (end == si->highest_bit)
705 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
706 WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries);
707 if (si->inuse_pages == si->pages) {
708 si->lowest_bit = si->max;
709 si->highest_bit = 0;
710 del_from_avail_list(si);
724 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
731 if (offset < si->lowest_bit)
732 si->lowest_bit = offset;
733 if (end > si->highest_bit) {
734 bool was_full = !si->highest_bit;
736 WRITE_ONCE(si->highest_bit, end);
737 if (was_full && (si->flags & SWP_WRITEOK))
738 add_to_avail_list(si);
741 WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries);
742 if (si->flags & SWP_BLKDEV)
744 si->bdev->bd_disk->fops->swap_slot_free_notify;
748 arch_swap_invalidate_page(si->type, offset);
749 zswap_invalidate(si->type, offset);
751 swap_slot_free_notify(si->bdev, offset);
754 clear_shadow_from_swap_cache(si->type, begin, end);
757 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
761 if (!(si->flags & SWP_SOLIDSTATE)) {
762 si->cluster_next = next;
766 prev = this_cpu_read(*si->cluster_next_cpu);
775 if (si->highest_bit <= si->lowest_bit)
777 next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit);
779 next = max_t(unsigned int, next, si->lowest_bit);
781 this_cpu_write(*si->cluster_next_cpu, next);
784 static bool swap_offset_available_and_locked(struct swap_info_struct *si,
787 if (data_race(!si->swap_map[offset])) {
788 spin_lock(&si->lock);
792 if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
793 spin_lock(&si->lock);
800 static int scan_swap_map_slots(struct swap_info_struct *si,
823 si->flags += SWP_SCANNING;
829 if (si->flags & SWP_SOLIDSTATE)
830 scan_base = this_cpu_read(*si->cluster_next_cpu);
832 scan_base = si->cluster_next;
836 if (si->cluster_info) {
837 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
839 } else if (unlikely(!si->cluster_nr--)) {
840 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
841 si->cluster_nr = SWAPFILE_CLUSTER - 1;
845 spin_unlock(&si->lock);
850 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
853 scan_base = offset = si->lowest_bit;
857 for (; last_in_cluster <= si->highest_bit; offset++) {
858 if (si->swap_map[offset])
861 spin_lock(&si->lock);
863 si->cluster_next = offset;
864 si->cluster_nr = SWAPFILE_CLUSTER - 1;
874 spin_lock(&si->lock);
875 si->cluster_nr = SWAPFILE_CLUSTER - 1;
879 if (si->cluster_info) {
880 while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
884 if (!scan_swap_map_try_ssd_cluster(si, &offset,
889 if (!(si->flags & SWP_WRITEOK))
891 if (!si->highest_bit)
893 if (offset > si->highest_bit)
894 scan_base = offset = si->lowest_bit;
896 ci = lock_cluster(si, offset);
898 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
901 spin_unlock(&si->lock);
902 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
903 spin_lock(&si->lock);
910 if (si->swap_map[offset]) {
917 WRITE_ONCE(si->swap_map[offset], usage);
918 inc_cluster_info_page(si, si->cluster_info, offset);
921 swap_range_alloc(si, offset, 1);
922 slots[n_ret++] = swp_entry(si->type, offset);
925 if ((n_ret == nr) || (offset >= si->highest_bit))
934 spin_unlock(&si->lock);
936 spin_lock(&si->lock);
941 if (si->cluster_info) {
942 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
944 } else if (si->cluster_nr && !si->swap_map[++offset]) {
946 --si->cluster_nr;
961 scan_limit = si->highest_bit;
964 if (!si->swap_map[offset])
970 set_cluster_next(si, offset + 1);
971 si->flags -= SWP_SCANNING;
975 spin_unlock(&si->lock);
976 while (++offset <= READ_ONCE(si->highest_bit)) {
982 if (swap_offset_available_and_locked(si, offset))
985 offset = si->lowest_bit;
992 if (swap_offset_available_and_locked(si, offset))
996 spin_lock(&si->lock);
999 si->flags -= SWP_SCANNING;
1003 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
1018 if (cluster_list_empty(&si->free_clusters))
1021 idx = cluster_list_first(&si->free_clusters);
1023 ci = lock_cluster(si, offset);
1024 alloc_cluster(si, idx);
1027 memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER);
1029 swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1030 *slot = swp_entry(si->type, offset);
1035 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1040 ci = lock_cluster(si, offset);
1041 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1043 free_cluster(si, idx);
1045 swap_range_free(si, offset, SWAPFILE_CLUSTER);
1051 struct swap_info_struct *si, *next;
1073 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1074 /* requeue si to after same-priority siblings */
1075 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1077 spin_lock(&si->lock);
1078 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1080 if (plist_node_empty(&si->avail_lists[node])) {
1081 spin_unlock(&si->lock);
1084 WARN(!si->highest_bit,
1086 si->type);
1087 WARN(!(si->flags & SWP_WRITEOK),
1089 si->type);
1090 __del_from_avail_list(si);
1091 spin_unlock(&si->lock);
1095 if (si->flags & SWP_BLKDEV)
1096 n_ret = swap_alloc_cluster(si, swp_entries);
1098 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1100 spin_unlock(&si->lock);
1108 * if we got here, it's likely that si was almost full before,
1109 * and since scan_swap_map_slots() can drop the si->lock,
1111 * same si and it filled up before we could get one; or, the si
1113 * si->lock. Since we dropped the swap_avail_lock, the
1264 struct swap_info_struct *si;
1269 si = swp_swap_info(entry);
1270 if (!si)
1272 if (!percpu_ref_tryget_live(&si->users))
1275 * Guarantee the si->users are checked before accessing other
1283 if (offset >= si->max)
1286 return si;
1293 percpu_ref_put(&si->users);
1351 struct swap_info_struct *si;
1357 si = _swap_info_get(entry);
1358 if (!si)
1361 ci = lock_cluster_or_swap_info(si, offset);
1364 map = si->swap_map + offset;
1373 unlock_cluster_or_swap_info(si, ci);
1374 spin_lock(&si->lock);
1376 swap_free_cluster(si, idx);
1377 spin_unlock(&si->lock);
1382 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1383 unlock_cluster_or_swap_info(si, ci);
1387 lock_cluster_or_swap_info(si, offset);
1390 unlock_cluster_or_swap_info(si, ci);
1396 struct swap_info_struct *si;
1400 si = _swap_info_get(entry);
1401 if (!si)
1403 ci = lock_cluster(si, offset);
1447 struct swap_info_struct *si = swp_swap_info(entry);
1450 return swap_count(si->swap_map[offset]);
1458 int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1464 ci = lock_cluster_or_swap_info(si, offset);
1465 count = swap_count(si->swap_map[offset]);
1466 unlock_cluster_or_swap_info(si, ci);
1516 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1520 unsigned char *map = si->swap_map;
1526 ci = lock_cluster_or_swap_info(si, offset);
1539 unlock_cluster_or_swap_info(si, ci);
1546 struct swap_info_struct *si = _swap_info_get(entry);
1548 if (!si)
1552 return swap_swapcount(si, entry) != 0;
1554 return swap_page_trans_huge_swapped(si, entry);
1633 struct swap_info_struct *si = swap_type_to_swap_info(type);
1636 if (!si)
1640 spin_lock(&si->lock);
1641 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry))
1643 spin_unlock(&si->lock);
1707 struct swap_info_struct *si = swap_type_to_swap_info(type);
1710 if (!si || !(si->flags & SWP_WRITEOK))
1712 se = offset_to_swap_extent(si, offset);
1848 struct swap_info_struct *si;
1850 si = swap_info[type];
1894 swp_count = READ_ONCE(si->swap_map[offset]);
2024 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2036 for (i = prev + 1; i < si->max; i++) {
2037 count = READ_ONCE(si->swap_map[i]);
2044 if (i == si->max)
2056 struct swap_info_struct *si = swap_info[type];
2061 if (!READ_ONCE(si->inuse_pages))
2074 while (READ_ONCE(si->inuse_pages) &&
2102 while (READ_ONCE(si->inuse_pages) &&
2104 (i = find_next_to_unuse(si, i)) != 0) {
2137 if (READ_ONCE(si->inuse_pages)) {
2451 struct swap_info_struct *si = p;
2454 plist_for_each_entry_continue(si, &swap_active_head, list) {
2455 si->prio++;
2456 si->list.prio--;
2458 if (si->avail_lists[nid].prio != 1)
2459 si->avail_lists[nid].prio--;
2595 struct swap_info_struct *si;
2604 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2605 if (!(si->flags & SWP_USED) || !si->swap_map)
2608 return si;
2616 struct swap_info_struct *si = v;
2622 type = si->type + 1;
2625 for (; (si = swap_type_to_swap_info(type)); type++) {
2626 if (!(si->flags & SWP_USED) || !si->swap_map)
2628 return si;
2641 struct swap_info_struct *si = v;
2646 if (si == SEQ_START_TOKEN) {
2651 bytes = K(si->pages);
2652 inuse = K(READ_ONCE(si->inuse_pages));
2654 file = si->swap_file;
2662 si->prio);
3259 struct swap_info_struct *si = swap_info[type];
3261 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3262 nr_to_be_unused += READ_ONCE(si->inuse_pages);
3278 struct swap_info_struct *si = swap_info[type];
3280 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3281 nr_to_be_unused += si->inuse_pages;
3399 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry)
3405 ci = lock_cluster_or_swap_info(si, offset);
3406 usage = __swap_entry_free_locked(si, offset, SWAP_HAS_CACHE);
3407 unlock_cluster_or_swap_info(si, ci);
3456 struct swap_info_struct *si;
3471 si = get_swap_device(entry);
3472 if (!si) {
3479 spin_lock(&si->lock);
3483 ci = lock_cluster(si, offset);
3485 count = swap_count(si->swap_map[offset]);
3501 head = vmalloc_to_page(si->swap_map + offset);
3504 spin_lock(&si->cont_lock);
3513 si->flags |= SWP_CONTINUED;
3541 spin_unlock(&si->cont_lock);
3544 spin_unlock(&si->lock);
3545 put_swap_device(si);
3561 static bool swap_count_continued(struct swap_info_struct *si,
3569 head = vmalloc_to_page(si->swap_map + offset);
3575 spin_lock(&si->cont_lock);
3637 spin_unlock(&si->cont_lock);
3645 static void free_swap_count_continuations(struct swap_info_struct *si)
3649 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3651 head = vmalloc_to_page(si->swap_map + offset);
3666 struct swap_info_struct *si, *next;
3683 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3685 if (si->bdev) {
3686 blkcg_schedule_throttle(si->bdev->bd_disk, true);