Lines Matching refs:kentry
109 struct ucollection_process_cpu_entry kentry;
116 memset(&kentry, 0, sizeof(struct ucollection_process_cpu_entry));
117 (void)copy_from_user(&kentry, entry, sizeof(struct ucollection_process_cpu_entry));
125 if (kentry.cur_count >= kentry.total_count) {
130 get_process_load(task, kentry.cur_count, entry);
131 kentry.cur_count++;
133 put_user(kentry.cur_count, &entry->cur_count);
216 static long read_thread_info_locked(struct ucollection_thread_cpu_entry *kentry,
220 struct task_struct *task = get_alive_task_by_pid(kentry->filter.pid);
222 pr_info("pid=%d is task NULL not alive", kentry->filter.pid);
229 if (thread_count >= kentry->total_count) {
243 struct ucollection_thread_cpu_entry kentry;
249 memset(&kentry, 0, sizeof(struct ucollection_thread_cpu_entry));
250 (void)copy_from_user(&kentry, entry, sizeof(struct ucollection_thread_cpu_entry));
251 if (current->tgid != kentry.filter.pid || kentry.cur_count >= kentry.total_count) {
253 , kentry.filter.pid, current->tgid);
256 return read_thread_info_locked(&kentry, entry);
261 struct ucollection_thread_cpu_entry kentry;
267 memset(&kentry, 0, sizeof(struct ucollection_thread_cpu_entry));
268 (void)copy_from_user(&kentry, entry, sizeof(struct ucollection_thread_cpu_entry));
269 if (kentry.cur_count >= kentry.total_count) {
271 , kentry.filter.pid, current->pid);
274 return read_thread_info_locked(&kentry, entry);
279 struct ucollection_process_cpu_entry kentry;
286 memset(&kentry, 0, sizeof(struct ucollection_process_cpu_entry));
287 (void)copy_from_user(&kentry, entry, sizeof(struct ucollection_process_cpu_entry));
289 if (kentry.cur_count >= kentry.total_count) {
295 struct task_struct *task = get_alive_task_by_pid(kentry.filter.pid);
297 pr_info("pid=%d is task null or not alive", kentry.filter.pid);
302 get_process_load(task, kentry.cur_count, entry);
303 kentry.cur_count++;
304 put_user(kentry.cur_count, &entry->cur_count);