Lines Matching refs:si

127 static int __try_to_reclaim_swap(struct swap_info_struct *si,
130 swp_entry_t entry = swp_entry(si->type, offset);
171 static int discard_swap(struct swap_info_struct *si)
179 se = first_se(si);
183 err = blkdev_issue_discard(si->bdev, start_block,
194 err = blkdev_issue_discard(si->bdev, start_block,
241 static void discard_swap_cluster(struct swap_info_struct *si,
244 struct swap_extent *se = offset_to_swap_extent(si, start_page);
258 if (blkdev_issue_discard(si->bdev, start_block,
351 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
356 ci = si->cluster_info;
375 struct swap_info_struct *si, unsigned long offset)
380 ci = lock_cluster(si, offset);
383 spin_lock(&si->lock);
388 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si,
394 spin_unlock(&si->lock);
453 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
458 * si->swap_map directly. To make sure the discarding cluster isn't
462 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
465 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx);
467 schedule_work(&si->discard_work);
470 static void __free_cluster(struct swap_info_struct *si, unsigned long idx)
472 struct swap_cluster_info *ci = si->cluster_info;
475 cluster_list_add_tail(&si->free_clusters, ci, idx);
480 * will be added to free cluster list. caller should hold si->lock.
482 static void swap_do_scheduled_discard(struct swap_info_struct *si)
487 info = si->cluster_info;
489 while (!cluster_list_empty(&si->discard_clusters)) {
490 idx = cluster_list_del_first(&si->discard_clusters, info);
491 spin_unlock(&si->lock);
493 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
496 spin_lock(&si->lock);
497 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER);
498 __free_cluster(si, idx);
499 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
507 struct swap_info_struct *si;
509 si = container_of(work, struct swap_info_struct, discard_work);
511 spin_lock(&si->lock);
512 swap_do_scheduled_discard(si);
513 spin_unlock(&si->lock);
516 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx)
518 struct swap_cluster_info *ci = si->cluster_info;
520 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx);
521 cluster_list_del_first(&si->free_clusters, ci);
525 static void free_cluster(struct swap_info_struct *si, unsigned long idx)
527 struct swap_cluster_info *ci = si->cluster_info + idx;
535 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
537 swap_cluster_schedule_discard(si, idx);
541 __free_cluster(si, idx);
589 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si,
596 conflict = !cluster_list_empty(&si->free_clusters) &&
597 offset != cluster_list_first(&si->free_clusters) &&
598 cluster_is_free(&si->cluster_info[offset]);
603 percpu_cluster = this_cpu_ptr(si->percpu_cluster);
612 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si,
620 cluster = this_cpu_ptr(si->percpu_cluster);
622 if (!cluster_list_empty(&si->free_clusters)) {
623 cluster->index = si->free_clusters.head;
626 } else if (!cluster_list_empty(&si->discard_clusters)) {
630 * reread cluster_next_cpu since we dropped si->lock
632 swap_do_scheduled_discard(si);
633 *scan_base = this_cpu_read(*si->cluster_next_cpu);
645 max = min_t(unsigned long, si->max,
648 ci = lock_cluster(si, tmp);
650 if (!si->swap_map[tmp])
682 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
687 if (offset == si->lowest_bit)
688 si->lowest_bit += nr_entries;
689 if (end == si->highest_bit)
690 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries);
691 si->inuse_pages += nr_entries;
692 if (si->inuse_pages == si->pages) {
693 si->lowest_bit = si->max;
694 si->highest_bit = 0;
695 del_from_avail_list(si);
711 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
718 if (offset < si->lowest_bit)
719 si->lowest_bit = offset;
720 if (end > si->highest_bit) {
721 bool was_full = !si->highest_bit;
723 WRITE_ONCE(si->highest_bit, end);
724 if (was_full && (si->flags & SWP_WRITEOK))
725 add_to_avail_list(si);
728 si->inuse_pages -= nr_entries;
729 if (si->flags & SWP_BLKDEV)
731 si->bdev->bd_disk->fops->swap_slot_free_notify;
735 arch_swap_invalidate_page(si->type, offset);
736 frontswap_invalidate_page(si->type, offset);
738 swap_slot_free_notify(si->bdev, offset);
741 clear_shadow_from_swap_cache(si->type, begin, end);
744 static void set_cluster_next(struct swap_info_struct *si, unsigned long next)
748 if (!(si->flags & SWP_SOLIDSTATE)) {
749 si->cluster_next = next;
753 prev = this_cpu_read(*si->cluster_next_cpu);
762 if (si->highest_bit <= si->lowest_bit)
764 next = si->lowest_bit +
765 prandom_u32_max(si->highest_bit - si->lowest_bit + 1);
767 next = max_t(unsigned int, next, si->lowest_bit);
769 this_cpu_write(*si->cluster_next_cpu, next);
772 static int scan_swap_map_slots(struct swap_info_struct *si,
795 si->flags += SWP_SCANNING;
801 if (si->flags & SWP_SOLIDSTATE)
802 scan_base = this_cpu_read(*si->cluster_next_cpu);
804 scan_base = si->cluster_next;
808 if (si->cluster_info) {
809 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
811 } else if (unlikely(!si->cluster_nr--)) {
812 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) {
813 si->cluster_nr = SWAPFILE_CLUSTER - 1;
817 spin_unlock(&si->lock);
822 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info
825 scan_base = offset = si->lowest_bit;
829 for (; last_in_cluster <= si->highest_bit; offset++) {
830 if (si->swap_map[offset])
833 spin_lock(&si->lock);
835 si->cluster_next = offset;
836 si->cluster_nr = SWAPFILE_CLUSTER - 1;
846 spin_lock(&si->lock);
847 si->cluster_nr = SWAPFILE_CLUSTER - 1;
851 if (si->cluster_info) {
852 while (scan_swap_map_ssd_cluster_conflict(si, offset)) {
856 if (!scan_swap_map_try_ssd_cluster(si, &offset,
861 if (!(si->flags & SWP_WRITEOK))
863 if (!si->highest_bit)
865 if (offset > si->highest_bit)
866 scan_base = offset = si->lowest_bit;
868 ci = lock_cluster(si, offset);
870 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
873 spin_unlock(&si->lock);
874 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
875 spin_lock(&si->lock);
882 if (si->swap_map[offset]) {
889 WRITE_ONCE(si->swap_map[offset], usage);
890 inc_cluster_info_page(si, si->cluster_info, offset);
893 swap_range_alloc(si, offset, 1);
894 slots[n_ret++] = swp_entry(si->type, offset);
897 if ((n_ret == nr) || (offset >= si->highest_bit))
906 spin_unlock(&si->lock);
908 spin_lock(&si->lock);
913 if (si->cluster_info) {
914 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base))
916 } else if (si->cluster_nr && !si->swap_map[++offset]) {
918 --si->cluster_nr;
933 scan_limit = si->highest_bit;
936 if (!si->swap_map[offset])
942 set_cluster_next(si, offset + 1);
943 si->flags -= SWP_SCANNING;
947 spin_unlock(&si->lock);
948 while (++offset <= READ_ONCE(si->highest_bit)) {
949 if (data_race(!si->swap_map[offset])) {
950 spin_lock(&si->lock);
954 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
955 spin_lock(&si->lock);
964 offset = si->lowest_bit;
966 if (data_race(!si->swap_map[offset])) {
967 spin_lock(&si->lock);
971 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
972 spin_lock(&si->lock);
982 spin_lock(&si->lock);
985 si->flags -= SWP_SCANNING;
989 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot)
1005 if (cluster_list_empty(&si->free_clusters))
1008 idx = cluster_list_first(&si->free_clusters);
1010 ci = lock_cluster(si, offset);
1011 alloc_cluster(si, idx);
1014 map = si->swap_map + offset;
1018 swap_range_alloc(si, offset, SWAPFILE_CLUSTER);
1019 *slot = swp_entry(si->type, offset);
1024 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx)
1029 ci = lock_cluster(si, offset);
1030 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER);
1032 free_cluster(si, idx);
1034 swap_range_free(si, offset, SWAPFILE_CLUSTER);
1037 static unsigned long scan_swap_map(struct swap_info_struct *si,
1043 n_ret = scan_swap_map_slots(si, usage, 1, &entry);
1055 struct swap_info_struct *si, *next;
1077 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1078 /* requeue si to after same-priority siblings */
1079 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1081 spin_lock(&si->lock);
1082 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
1084 if (plist_node_empty(&si->avail_lists[node])) {
1085 spin_unlock(&si->lock);
1088 WARN(!si->highest_bit,
1090 si->type);
1091 WARN(!(si->flags & SWP_WRITEOK),
1093 si->type);
1094 __del_from_avail_list(si);
1095 spin_unlock(&si->lock);
1099 if (si->flags & SWP_BLKDEV)
1100 n_ret = swap_alloc_cluster(si, swp_entries);
1102 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1104 spin_unlock(&si->lock);
1107 pr_debug("scan_swap_map of si %d failed to find offset\n",
1108 si->type);
1114 * if we got here, it's likely that si was almost full before,
1115 * and since scan_swap_map() can drop the si->lock, multiple
1116 * callers probably all tried to get a page from the same si
1117 * and it filled up before we could get one; or, the si filled
1118 * up between us dropping swap_avail_lock and taking si->lock.
1141 struct swap_info_struct *si = swap_type_to_swap_info(type);
1144 if (!si)
1147 spin_lock(&si->lock);
1148 if (si->flags & SWP_WRITEOK) {
1150 offset = scan_swap_map(si, 1);
1153 spin_unlock(&si->lock);
1157 spin_unlock(&si->lock);
1287 * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is
1288 * true, the si->map, si->cluster_info, etc. must be valid in the
1316 struct swap_info_struct *si;
1321 si = swp_swap_info(entry);
1322 if (!si)
1326 if (data_race(!(si->flags & SWP_VALID)))
1329 if (offset >= si->max)
1332 return si;
1396 struct swap_info_struct *si;
1402 si = _swap_info_get(entry);
1403 if (!si)
1406 ci = lock_cluster_or_swap_info(si, offset);
1409 map = si->swap_map + offset;
1418 unlock_cluster_or_swap_info(si, ci);
1419 spin_lock(&si->lock);
1421 swap_free_cluster(si, idx);
1422 spin_unlock(&si->lock);
1427 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) {
1428 unlock_cluster_or_swap_info(si, ci);
1432 lock_cluster_or_swap_info(si, offset);
1435 unlock_cluster_or_swap_info(si, ci);
1441 struct swap_info_struct *si;
1445 si = _swap_info_get(entry);
1446 if (!si)
1448 ci = lock_cluster(si, offset);
1516 struct swap_info_struct *si;
1520 si = get_swap_device(entry);
1521 if (si) {
1522 count = swap_count(si->swap_map[offset]);
1523 put_swap_device(si);
1528 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1534 ci = lock_cluster_or_swap_info(si, offset);
1535 count = swap_count(si->swap_map[offset]);
1536 unlock_cluster_or_swap_info(si, ci);
1548 struct swap_info_struct *si;
1550 si = get_swap_device(entry);
1551 if (si) {
1552 count = swap_swapcount(si, entry);
1553 put_swap_device(si);
1604 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1608 unsigned char *map = si->swap_map;
1614 ci = lock_cluster_or_swap_info(si, offset);
1627 unlock_cluster_or_swap_info(si, ci);
1634 struct swap_info_struct *si;
1641 si = _swap_info_get(entry);
1642 if (si)
1643 return swap_page_trans_huge_swapped(si, entry);
1652 struct swap_info_struct *si;
1676 si = _swap_info_get(entry);
1677 if (si) {
1678 map = si->swap_map;
1683 ci = lock_cluster(si, offset);
1884 struct swap_info_struct *si = swap_type_to_swap_info(type);
1886 if (!si || !(si->flags & SWP_WRITEOK))
1976 struct swap_info_struct *si;
1981 si = swap_info[type];
1994 if (frontswap && !frontswap_test(si, offset))
1998 swap_map = &si->swap_map[offset];
2155 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2167 for (i = prev + 1; i < si->max; i++) {
2168 count = READ_ONCE(si->swap_map[i]);
2170 if (!frontswap || frontswap_test(si, i))
2176 if (i == si->max)
2193 struct swap_info_struct *si = swap_info[type];
2198 if (!READ_ONCE(si->inuse_pages))
2214 while (READ_ONCE(si->inuse_pages) &&
2243 while (READ_ONCE(si->inuse_pages) &&
2245 (i = find_next_to_unuse(si, i, frontswap)) != 0) {
2285 if (READ_ONCE(si->inuse_pages)) {
2630 struct swap_info_struct *si = p;
2633 plist_for_each_entry_continue(si, &swap_active_head, list) {
2634 si->prio++;
2635 si->list.prio--;
2637 if (si->avail_lists[nid].prio != 1)
2638 si->avail_lists[nid].prio--;
2777 struct swap_info_struct *si;
2786 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2787 if (!(si->flags & SWP_USED) || !si->swap_map)
2790 return si;
2798 struct swap_info_struct *si = v;
2804 type = si->type + 1;
2807 for (; (si = swap_type_to_swap_info(type)); type++) {
2808 if (!(si->flags & SWP_USED) || !si->swap_map)
2810 return si;
2823 struct swap_info_struct *si = v;
2828 if (si == SEQ_START_TOKEN) {
2833 bytes = si->pages << (PAGE_SHIFT - 10);
2834 inuse = si->inuse_pages << (PAGE_SHIFT - 10);
2836 file = si->swap_file;
2844 si->prio);
3168 static bool swap_discardable(struct swap_info_struct *si)
3170 struct request_queue *q = bdev_get_queue(si->bdev);
3448 struct swap_info_struct *si = swap_info[type];
3450 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3451 nr_to_be_unused += si->inuse_pages;
3467 struct swap_info_struct *si = swap_info[type];
3469 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3470 nr_to_be_unused += si->inuse_pages;
3637 struct swap_info_struct *si;
3652 si = get_swap_device(entry);
3653 if (!si) {
3660 spin_lock(&si->lock);
3664 ci = lock_cluster(si, offset);
3666 count = si->swap_map[offset] & ~SWAP_HAS_CACHE;
3687 head = vmalloc_to_page(si->swap_map + offset);
3690 spin_lock(&si->cont_lock);
3699 si->flags |= SWP_CONTINUED;
3727 spin_unlock(&si->cont_lock);
3730 spin_unlock(&si->lock);
3731 put_swap_device(si);
3747 static bool swap_count_continued(struct swap_info_struct *si,
3755 head = vmalloc_to_page(si->swap_map + offset);
3761 spin_lock(&si->cont_lock);
3823 spin_unlock(&si->cont_lock);
3831 static void free_swap_count_continuations(struct swap_info_struct *si)
3835 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3837 head = vmalloc_to_page(si->swap_map + offset);
3852 struct swap_info_struct *si, *next;
3869 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3871 if (si->bdev) {
3872 blkcg_schedule_throttle(bdev_get_queue(si->bdev), true);