1// SPDX-License-Identifier: GPL-2.0 2/* 3 * fs/hmdfs/server_writeback.c 4 * 5 * Copyright (c) 2020-2021 Huawei Device Co., Ltd. 6 */ 7 8#include <linux/slab.h> 9#include <linux/fs.h> 10#include <linux/backing-dev.h> 11 12#include "hmdfs.h" 13#include "hmdfs_trace.h" 14#include "server_writeback.h" 15 16#define HMDFS_SRV_WB_DEF_DIRTY_THRESH 50UL 17 18static void hmdfs_srv_wb_handler(struct work_struct *work) 19{ 20 struct hmdfs_server_writeback *hswb = container_of(work, 21 struct hmdfs_server_writeback, 22 dirty_sb_writeback_work); 23 struct super_block *lower_sb = hswb->sbi->lower_sb; 24 int dirty_pages; 25 26 if (writeback_in_progress(&lower_sb->s_bdi->wb) || 27 !down_read_trylock(&lower_sb->s_umount)) 28 return; 29 30 dirty_pages = hswb->dirty_nr_pages_to_wb; 31 writeback_inodes_sb_nr(lower_sb, dirty_pages, WB_REASON_FS_FREE_SPACE); 32 up_read(&lower_sb->s_umount); 33 34 trace_hmdfs_start_srv_wb(hswb->sbi, dirty_pages, hswb->dirty_thresh_pg); 35} 36 37void hmdfs_server_check_writeback(struct hmdfs_server_writeback *hswb) 38{ 39 unsigned long old_time, now; 40 int dirty_nr_pages; 41 42 old_time = hswb->last_reset_time; 43 now = jiffies; 44 dirty_nr_pages = atomic_inc_return(&hswb->dirty_nr_pages); 45 if (time_after(now, old_time + HZ) && 46 cmpxchg(&hswb->last_reset_time, old_time, now) == old_time) { 47 /* 48 * We calculate the speed of page dirting to handle 49 * following situations: 50 * 51 * 1. Dense writing, average page writing speed 52 * exceeds @hswb->dirty_thresh_pg: 53 * 0-1s 100MB 54 * 2. Sporadic writing, average page writing speed 55 * belows @hswb->dirty_thresh_pg: 56 * 0-0.1s 40MB 57 * 3.1-3.2 20MB 58 */ 59 unsigned int writepage_speed; 60 61 writepage_speed = dirty_nr_pages / ((now - old_time) / HZ); 62 if (writepage_speed >= hswb->dirty_thresh_pg) { 63 /* 64 * Writeback @hswb->dirty_nr_pages_to_wb pages in 65 * server-writeback work. If work is delayed after 66 * 1s, @hswb->dirty_nr_pages_to_wb could be assigned 67 * another new value (eg. 60MB), the old value (eg. 68 * 80MB) will be overwritten, which means 80MB data 69 * will be omitted to writeback. We can tolerate this 70 * situation, The writeback pressure is too high if 71 * the previous work is not completed, so it's 72 * meaningless to continue subsequent work. 73 */ 74 hswb->dirty_nr_pages_to_wb = dirty_nr_pages; 75 /* 76 * There are 3 conditions to trigger queuing work: 77 * 78 * A. Server successfully handles writepage for client 79 * B. Every 1 second interval 80 * C. Speed for page dirting exceeds @dirty_thresh_pg 81 */ 82 queue_work(hswb->dirty_writeback_wq, 83 &hswb->dirty_sb_writeback_work); 84 } 85 86 /* 87 * There is no need to account the number of dirty pages 88 * from remote client very accurately. Allow the missing 89 * count to increase by other process in the gap between 90 * increment and zero out. 91 */ 92 atomic_set(&hswb->dirty_nr_pages, 0); 93 } 94} 95 96void hmdfs_destroy_server_writeback(struct hmdfs_sb_info *sbi) 97{ 98 if (!sbi->h_swb) 99 return; 100 101 flush_work(&sbi->h_swb->dirty_sb_writeback_work); 102 destroy_workqueue(sbi->h_swb->dirty_writeback_wq); 103 kfree(sbi->h_swb); 104 sbi->h_swb = NULL; 105} 106 107int hmdfs_init_server_writeback(struct hmdfs_sb_info *sbi) 108{ 109 struct hmdfs_server_writeback *hswb; 110 char name[HMDFS_WQ_NAME_LEN]; 111 112 hswb = kzalloc(sizeof(struct hmdfs_server_writeback), GFP_KERNEL); 113 if (!hswb) 114 return -ENOMEM; 115 116 hswb->sbi = sbi; 117 hswb->dirty_writeback_control = true; 118 hswb->dirty_thresh_pg = HMDFS_SRV_WB_DEF_DIRTY_THRESH << 119 HMDFS_MB_TO_PAGE_SHIFT; 120 atomic_set(&hswb->dirty_nr_pages, 0); 121 hswb->last_reset_time = jiffies; 122 123 snprintf(name, sizeof(name), "dfs_srv_wb%u", sbi->seq); 124 hswb->dirty_writeback_wq = create_singlethread_workqueue(name); 125 if (!hswb->dirty_writeback_wq) { 126 hmdfs_err("Failed to create server writeback workqueue!"); 127 kfree(hswb); 128 return -ENOMEM; 129 } 130 INIT_WORK(&hswb->dirty_sb_writeback_work, hmdfs_srv_wb_handler); 131 sbi->h_swb = hswb; 132 133 return 0; 134} 135 136