1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2023 Huawei Device Co., Ltd.
4 */
5 #include <linux/mman.h>
6 #include <linux/mm_types.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include "jit_memory.h"
11 #include "jit_space_list.h"
12 #include "avc.h"
13 #include "objsec.h"
14 #include <linux/version.h>
15
16 DEFINE_SPINLOCK(list_lock);
17
jit_avc_has_perm(u16 tclass, u32 requested, struct task_struct *task)18 static bool jit_avc_has_perm(u16 tclass, u32 requested, struct task_struct *task)
19 {
20 // Bypass 'init'
21 if (task_pid_nr(current) == 1) {
22 return false;
23 }
24
25 struct av_decision avd;
26 u32 secid;
27 security_cred_getsecid(task->cred, &secid);
28
29 #if (LINUX_VERSION_CODE < KERNEL_VERSION(6, 6, 0))
30 return (avc_has_perm_noaudit(&selinux_state, secid, secid, tclass, requested,
31 AVC_STRICT, &avd) == 0);
32 #else
33 return (avc_has_perm_noaudit(secid, secid, tclass, requested,
34 AVC_STRICT, &avd) == 0);
35 #endif
36 }
37
find_jit_memory(struct task_struct *task, unsigned long start, unsigned long size, int *err)38 void find_jit_memory(struct task_struct *task, unsigned long start, unsigned long size, int *err)
39 {
40 if (!jit_avc_has_perm(SECCLASS_JIT_MEMORY, JIT_MEMORY__EXEC_MEM_CTRL, task))
41 return;
42
43 struct list_head *head = (find_process_jit_space(&root_tree, task->pid).head);
44 if (head != NULL) {
45 spin_lock(&list_lock);
46 find_jit_space(head, start, size, err);
47 spin_unlock(&list_lock);
48 }
49
50 }
51
check_jit_memory(struct task_struct *task, unsigned long cookie, unsigned long prot, unsigned long flag, unsigned long size, unsigned long *err)52 void check_jit_memory(struct task_struct *task, unsigned long cookie, unsigned long prot,
53 unsigned long flag, unsigned long size, unsigned long *err)
54 {
55 if (!jit_avc_has_perm(SECCLASS_JIT_MEMORY, JIT_MEMORY__EXEC_MEM_CTRL, task) || !(flag & MAP_ANONYMOUS))
56 return;
57 unsigned long start = *err;
58
59 if (prot & PROT_EXEC) {
60 jit_memory_log_info("can not apply prot_exec");
61 *err = -EACCES;
62 vm_munmap(start, size);
63 return;
64 }
65 if (!(flag & MAP_JIT))
66 return;
67
68 struct list_head *head = update_process_jit_space(&root_tree, task->pid, cookie, err);
69 if (IS_ERR_VALUE(*err)) {
70 vm_munmap(start, size);
71 return;
72 }
73 if (head != NULL) {
74 spin_lock(&list_lock);
75 update_jit_space(head, start, size);
76 spin_unlock(&list_lock);
77 }
78 }
79
delete_jit_memory(struct task_struct *task, unsigned long start, unsigned long size, int *err)80 void delete_jit_memory(struct task_struct *task, unsigned long start, unsigned long size, int *err)
81 {
82 if (!jit_avc_has_perm(SECCLASS_JIT_MEMORY, JIT_MEMORY__EXEC_MEM_CTRL, task))
83 return;
84
85 struct list_head *head = (find_process_jit_space(&root_tree, task->pid).head);
86 if (head != NULL) {
87 spin_lock(&list_lock);
88 delete_jit_space(head, start, size, err);
89 spin_unlock(&list_lock);
90 }
91 }
92
exit_jit_memory(struct task_struct *task)93 void exit_jit_memory(struct task_struct *task)
94 {
95 if (!jit_avc_has_perm(SECCLASS_JIT_MEMORY, JIT_MEMORY__EXEC_MEM_CTRL, task))
96 return;
97
98 struct jit_process *process = delete_process_jit_space(&root_tree, task->pid);
99 if (process != NULL) {
100 spin_lock(&list_lock);
101 exit_jit_space(&(process->head));
102 spin_unlock(&list_lock);
103 kfree(process);
104 }
105 }