1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2023 Huawei Device Co., Ltd.
4  */
5 #include "ucollection_process_cpu.h"
6 
7 #include <asm/div64.h>
8 #ifdef CONFIG_CPU_FREQ_TIMES
9 #include <linux/cpufreq_times.h>
10 #endif // CONFIG_CPU_FREQ_TIMES
11 #include <linux/sched/stat.h>
12 #include <linux/version.h>
13 #include <linux/uaccess.h>
14 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
15 #include <linux/sched.h>
16 #include <linux/sched/cputime.h>
17 #include <linux/sched/signal.h>
18 #endif // LINUX_VERSION_CODE
19 #ifdef CONFIG_SMT_MODE_GOV
20 #include <platform_include/cee/linux/time_in_state.h>
21 #endif // CONFIG_SMT_MODE_GOV
22 
23 #include "unified_collection_data.h"
24 
25 #define NS_TO_MS 1000000
26 static char dmips_values[DMIPS_NUM];
27 
get_proc_cpu_load(struct task_struct *task, char dmips[], unsigned int dmips_num)28 unsigned long long __attribute__((weak)) get_proc_cpu_load(struct task_struct *task, char dmips[],
29 	unsigned int dmips_num)
30 {
31 	return 0;
32 }
33 
get_process_flt(struct task_struct *task, struct ucollection_process_cpu_item* proc_cpu_entry)34 static void get_process_flt(struct task_struct *task, struct ucollection_process_cpu_item* proc_cpu_entry)
35 {
36 	unsigned long tmp_min_flt = 0;
37 	unsigned long tmp_maj_flt = 0;
38 
39 	struct task_struct *t = task;
40 	signed int thread_count = 0;
41 	do {
42 		tmp_min_flt += t->min_flt;
43 		tmp_maj_flt += t->maj_flt;
44 		++thread_count;
45 	} while_each_thread(task, t);
46 
47 	struct signal_struct *sig = task->signal;
48 	if (sig != NULL) {
49 		tmp_min_flt += sig->min_flt;
50 		tmp_maj_flt += sig->maj_flt;
51 	}
52 
53 	proc_cpu_entry->min_flt = tmp_min_flt;
54 	proc_cpu_entry->maj_flt = tmp_maj_flt;
55 	proc_cpu_entry->thread_total = thread_count;
56 }
57 
get_process_load_cputime(struct task_struct *task)58 static unsigned long long get_process_load_cputime(struct task_struct *task)
59 {
60 	unsigned long long proc_load_cputime = 0;
61 	proc_load_cputime = get_proc_cpu_load(task, dmips_values, DMIPS_NUM);
62 	return proc_load_cputime;
63 }
64 
get_process_usage_cputime(struct task_struct *task, unsigned long long *ut, unsigned long long *st)65 static void get_process_usage_cputime(struct task_struct *task, unsigned long long *ut, unsigned long long *st)
66 {
67 	unsigned long long utime, stime;
68 
69 	thread_group_cputime_adjusted(task, &utime, &stime);
70 	do_div(utime, NS_TO_MS);
71 	do_div(stime, NS_TO_MS);
72 	*ut = utime;
73 	*st = stime;
74 }
75 
get_process_load(struct task_struct *task, int cur_count, struct ucollection_process_cpu_entry __user *entry)76 static void get_process_load(struct task_struct *task, int cur_count,
77 	struct ucollection_process_cpu_entry __user *entry)
78 {
79 	struct ucollection_process_cpu_item proc_cpu_entry;
80 	memset(&proc_cpu_entry, 0, sizeof(struct ucollection_process_cpu_item));
81 	proc_cpu_entry.pid = task->pid;
82 	get_process_flt(task, &proc_cpu_entry);
83 	proc_cpu_entry.cpu_load_time = get_process_load_cputime(task);
84 	get_process_usage_cputime(task, &proc_cpu_entry.cpu_usage_utime, &proc_cpu_entry.cpu_usage_stime);
85 	(void)copy_to_user(&entry->datas[cur_count], &proc_cpu_entry, sizeof(struct ucollection_process_cpu_item));
86 }
87 
get_thread_load(struct task_struct *task, int cur_count, struct ucollection_thread_cpu_entry __user *entry)88 static void get_thread_load(struct task_struct *task, int cur_count,
89 	struct ucollection_thread_cpu_entry __user *entry)
90 {
91 	struct ucollection_thread_cpu_item thread_cpu_item;
92 	memset(&thread_cpu_item, 0, sizeof(struct ucollection_thread_cpu_item));
93 	unsigned long long utime, stime;
94 	utime = task->utime;
95 	stime = task->stime;
96 	do_div(utime, NS_TO_MS);
97 	do_div(stime, NS_TO_MS);
98 	thread_cpu_item.tid = task->pid;
99 	strcpy(thread_cpu_item.name, task->comm);
100 	thread_cpu_item.cpu_usage_utime = utime;
101 	thread_cpu_item.cpu_usage_stime = stime;
102 	thread_cpu_item.cpu_load_time = 0;
103 	(void)copy_to_user(&entry->datas[cur_count], &thread_cpu_item, sizeof(struct ucollection_thread_cpu_item));
104 }
105 
ioctrl_collect_process_cpu(void __user *argp)106 static long ioctrl_collect_process_cpu(void __user *argp)
107 {
108 	struct task_struct *task = NULL;
109 	struct ucollection_process_cpu_entry kentry;
110 	struct ucollection_process_cpu_entry __user *entry = argp;
111 	if (entry == NULL) {
112 		pr_err("cpu entry is null");
113 		return -EINVAL;
114 	}
115 
116 	memset(&kentry, 0, sizeof(struct ucollection_process_cpu_entry));
117 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_process_cpu_entry));
118 
119 	rcu_read_lock();
120 	task = &init_task;
121 	for_each_process(task) {
122 		if (task->pid != task->tgid)
123 			continue;
124 
125 		if (kentry.cur_count >= kentry.total_count) {
126 			pr_err("process over total count");
127 			break;
128 		}
129 
130 		get_process_load(task, kentry.cur_count, entry);
131 		kentry.cur_count++;
132 	}
133 	put_user(kentry.cur_count, &entry->cur_count);
134 	rcu_read_unlock();
135 	return 0;
136 }
137 
get_alive_task_by_pid(unsigned int pid)138 static struct task_struct* get_alive_task_by_pid(unsigned int pid)
139 {
140 	struct task_struct *task = NULL;
141 	task = find_task_by_pid_ns(pid, &init_pid_ns);
142 	if (task == NULL || !pid_alive(task)) {
143 		return NULL;
144 	}
145 	return task;
146 }
147 
ioctrl_collect_process_count(void __user *argp)148 static long ioctrl_collect_process_count(void __user *argp)
149 {
150 	struct task_struct *task = NULL;
151 	unsigned int process_count = 0;
152 	unsigned int __user *count = argp;
153 	rcu_read_lock();
154 	task = &init_task;
155 	for_each_process(task) {
156 		if (task->pid != task->tgid) {
157 			continue;
158 		}
159 		++process_count;
160 	}
161 	rcu_read_unlock();
162 	put_user(process_count, count);
163 	return 0;
164 }
165 
read_thread_count_locked(struct ucollection_process_thread_count *kcount, struct ucollection_process_thread_count __user *count)166 static long read_thread_count_locked(struct ucollection_process_thread_count *kcount,
167 	struct ucollection_process_thread_count __user *count)
168 {
169 	rcu_read_lock();
170 	struct task_struct *task = get_alive_task_by_pid(kcount->pid);
171 	if (task == NULL) {
172 		pr_info("pid=%d is task NULL or not alive", kcount->pid);
173 		rcu_read_unlock();
174 		return -EINVAL;
175 	}
176 	unsigned int thread_count = 0;
177 	struct task_struct *t = task;
178 	do {
179 		thread_count++;
180 	} while_each_thread(task, t);
181 	put_user(thread_count, &count->thread_count);
182 	rcu_read_unlock();
183 	return 0;
184 }
185 
ioctrl_collect_thread_count(void __user *argp)186 static long ioctrl_collect_thread_count(void __user *argp)
187 {
188 	struct ucollection_process_thread_count kcount;
189 	struct ucollection_process_thread_count __user *count = argp;
190 	if (count == NULL) {
191 		pr_err("cpu entry is null");
192 		return -EINVAL;
193 	}
194 	memset(&kcount, 0, sizeof(struct ucollection_process_thread_count));
195 	(void)copy_from_user(&kcount, count, sizeof(struct ucollection_process_thread_count));
196 	return read_thread_count_locked(&kcount, count);
197 }
198 
ioctrl_collect_app_thread_count(void __user *argp)199 static long ioctrl_collect_app_thread_count(void __user *argp)
200 {
201 	struct ucollection_process_thread_count kcount;
202 	struct ucollection_process_thread_count __user *count = argp;
203 	if (count == NULL) {
204 		pr_err("cpu entry is null");
205 		return -EINVAL;
206 	}
207 	memset(&kcount, 0, sizeof(struct ucollection_process_thread_count));
208 	(void)copy_from_user(&kcount, count, sizeof(struct ucollection_process_thread_count));
209 	if (current->tgid != kcount.pid) {
210 		pr_err("pid=%d is not self current tgid:%d", kcount.pid, current->tgid);
211 		return -EINVAL;
212 	}
213 	return read_thread_count_locked(&kcount, count);
214 }
215 
read_thread_info_locked(struct ucollection_thread_cpu_entry *kentry, struct ucollection_thread_cpu_entry __user *entry)216 static long read_thread_info_locked(struct ucollection_thread_cpu_entry *kentry,
217 	struct ucollection_thread_cpu_entry __user *entry)
218 {
219 	rcu_read_lock();
220 	struct task_struct *task = get_alive_task_by_pid(kentry->filter.pid);
221 	if (task == NULL) {
222 		pr_info("pid=%d is task NULL not alive", kentry->filter.pid);
223 		rcu_read_unlock();
224 		return -EINVAL;
225 	}
226 	unsigned int thread_count = 0;
227 	struct task_struct *t = task;
228 	do {
229 		if (thread_count >= kentry->total_count) {
230 			pr_err("thread over total count");
231 			break;
232 		}
233 		get_thread_load(t, thread_count, entry);
234 		thread_count++;
235 	} while_each_thread(task, t);
236 	put_user(thread_count, &entry->cur_count);
237 	rcu_read_unlock();
238 	return 0;
239 }
240 
ioctrl_collect_app_thread_cpu(void __user *argp)241 static long ioctrl_collect_app_thread_cpu(void __user *argp)
242 {
243 	struct ucollection_thread_cpu_entry kentry;
244 	struct ucollection_thread_cpu_entry __user *entry = argp;
245 	if (entry == NULL) {
246 		pr_err("cpu entry is null");
247 		return -EINVAL;
248 	}
249 	memset(&kentry, 0, sizeof(struct ucollection_thread_cpu_entry));
250 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_thread_cpu_entry));
251 	if (current->tgid != kentry.filter.pid || kentry.cur_count >= kentry.total_count) {
252 		pr_err("pid=%d is not self current tgid:%d , or current count over total count"
253 			, kentry.filter.pid, current->tgid);
254 		return -EINVAL;
255 	}
256 	return read_thread_info_locked(&kentry, entry);
257 }
258 
ioctrl_collect_the_thread_cpu(void __user *argp)259 static long ioctrl_collect_the_thread_cpu(void __user *argp)
260 {
261 	struct ucollection_thread_cpu_entry kentry;
262 	struct ucollection_thread_cpu_entry __user *entry = argp;
263 	if (entry == NULL) {
264 		pr_err("cpu entry is null");
265 		return -EINVAL;
266 	}
267 	memset(&kentry, 0, sizeof(struct ucollection_thread_cpu_entry));
268 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_thread_cpu_entry));
269 	if (kentry.cur_count >= kentry.total_count) {
270 		pr_err("pid=%d is not self current:%d , or current count over total count"
271 			, kentry.filter.pid, current->pid);
272 		return -EINVAL;
273 	}
274 	return read_thread_info_locked(&kentry, entry);
275 }
276 
ioctrl_collect_the_process_cpu(void __user *argp)277 static long ioctrl_collect_the_process_cpu(void __user *argp)
278 {
279 	struct ucollection_process_cpu_entry kentry;
280 	struct ucollection_process_cpu_entry __user *entry = argp;
281 	if (entry == NULL) {
282 		pr_err("cpu entry is null");
283 		return -EINVAL;
284 	}
285 
286 	memset(&kentry, 0, sizeof(struct ucollection_process_cpu_entry));
287 	(void)copy_from_user(&kentry, entry, sizeof(struct ucollection_process_cpu_entry));
288 
289 	if (kentry.cur_count >= kentry.total_count) {
290 		pr_err("current count over total count");
291 		return -EINVAL;
292 	}
293 
294 	rcu_read_lock();
295 	struct task_struct *task = get_alive_task_by_pid(kentry.filter.pid);
296 	if (task == NULL) {
297 		pr_info("pid=%d is task null or not alive", kentry.filter.pid);
298 		rcu_read_unlock();
299 		return -EINVAL;
300 	}
301 
302 	get_process_load(task, kentry.cur_count, entry);
303 	kentry.cur_count++;
304 	put_user(kentry.cur_count, &entry->cur_count);
305 	rcu_read_unlock();
306 	return 0;
307 }
308 
unified_collection_collect_process_cpu(unsigned int cmd, void __user *argp)309 long unified_collection_collect_process_cpu(unsigned int cmd, void __user *argp)
310 {
311 	long ret = 0;
312 	switch(cmd) {
313 	case IOCTRL_COLLECT_ALL_PROC_CPU:
314 		ret = ioctrl_collect_process_cpu(argp);
315 		break;
316 	case IOCTRL_COLLECT_THE_PROC_CPU:
317 		ret = ioctrl_collect_the_process_cpu(argp);
318 		break;
319 	case IOCTRL_COLLECT_THREAD_COUNT:
320 		ret = ioctrl_collect_thread_count(argp);
321 		break;
322 	case IOCTRL_COLLECT_APP_THREAD_COUNT:
323 		ret = ioctrl_collect_app_thread_count(argp);
324 		break;
325 	case IOCTRL_COLLECT_APP_THREAD:
326 		ret = ioctrl_collect_app_thread_cpu(argp);
327 		break;
328 	case IOCTRL_COLLECT_THE_THREAD:
329 		ret = ioctrl_collect_the_thread_cpu(argp);
330 		break;
331 	case IOCTRL_COLLECT_PROC_COUNT:
332 		ret = ioctrl_collect_process_count(argp);
333 		break;
334 	default:
335 		pr_err("handle ioctrl cmd %u, _IOC_TYPE(cmd)=%d", cmd, _IOC_TYPE(cmd));
336 		ret = 0;
337 	}
338 	return ret;
339 }