18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. 48c2ecf20Sopenharmony_ci */ 58c2ecf20Sopenharmony_ci 68c2ecf20Sopenharmony_ci/** 78c2ecf20Sopenharmony_ci * DOC: Enclave lifetime management driver for Nitro Enclaves (NE). 88c2ecf20Sopenharmony_ci * Nitro is a hypervisor that has been developed by Amazon. 98c2ecf20Sopenharmony_ci */ 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_ci#include <linux/anon_inodes.h> 128c2ecf20Sopenharmony_ci#include <linux/capability.h> 138c2ecf20Sopenharmony_ci#include <linux/cpu.h> 148c2ecf20Sopenharmony_ci#include <linux/device.h> 158c2ecf20Sopenharmony_ci#include <linux/file.h> 168c2ecf20Sopenharmony_ci#include <linux/hugetlb.h> 178c2ecf20Sopenharmony_ci#include <linux/limits.h> 188c2ecf20Sopenharmony_ci#include <linux/list.h> 198c2ecf20Sopenharmony_ci#include <linux/miscdevice.h> 208c2ecf20Sopenharmony_ci#include <linux/mm.h> 218c2ecf20Sopenharmony_ci#include <linux/mman.h> 228c2ecf20Sopenharmony_ci#include <linux/module.h> 238c2ecf20Sopenharmony_ci#include <linux/mutex.h> 248c2ecf20Sopenharmony_ci#include <linux/nitro_enclaves.h> 258c2ecf20Sopenharmony_ci#include <linux/pci.h> 268c2ecf20Sopenharmony_ci#include <linux/poll.h> 278c2ecf20Sopenharmony_ci#include <linux/slab.h> 288c2ecf20Sopenharmony_ci#include <linux/types.h> 298c2ecf20Sopenharmony_ci#include <uapi/linux/vm_sockets.h> 308c2ecf20Sopenharmony_ci 318c2ecf20Sopenharmony_ci#include "ne_misc_dev.h" 328c2ecf20Sopenharmony_ci#include "ne_pci_dev.h" 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_ci/** 358c2ecf20Sopenharmony_ci * NE_CPUS_SIZE - Size for max 128 CPUs, for now, in a cpu-list string, comma 368c2ecf20Sopenharmony_ci * separated. The NE CPU pool includes CPUs from a single NUMA 378c2ecf20Sopenharmony_ci * node. 388c2ecf20Sopenharmony_ci */ 398c2ecf20Sopenharmony_ci#define NE_CPUS_SIZE (512) 408c2ecf20Sopenharmony_ci 418c2ecf20Sopenharmony_ci/** 428c2ecf20Sopenharmony_ci * NE_EIF_LOAD_OFFSET - The offset where to copy the Enclave Image Format (EIF) 438c2ecf20Sopenharmony_ci * image in enclave memory. 448c2ecf20Sopenharmony_ci */ 458c2ecf20Sopenharmony_ci#define NE_EIF_LOAD_OFFSET (8 * 1024UL * 1024UL) 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci/** 488c2ecf20Sopenharmony_ci * NE_MIN_ENCLAVE_MEM_SIZE - The minimum memory size an enclave can be launched 498c2ecf20Sopenharmony_ci * with. 508c2ecf20Sopenharmony_ci */ 518c2ecf20Sopenharmony_ci#define NE_MIN_ENCLAVE_MEM_SIZE (64 * 1024UL * 1024UL) 528c2ecf20Sopenharmony_ci 538c2ecf20Sopenharmony_ci/** 548c2ecf20Sopenharmony_ci * NE_MIN_MEM_REGION_SIZE - The minimum size of an enclave memory region. 558c2ecf20Sopenharmony_ci */ 568c2ecf20Sopenharmony_ci#define NE_MIN_MEM_REGION_SIZE (2 * 1024UL * 1024UL) 578c2ecf20Sopenharmony_ci 588c2ecf20Sopenharmony_ci/** 598c2ecf20Sopenharmony_ci * NE_PARENT_VM_CID - The CID for the vsock device of the primary / parent VM. 608c2ecf20Sopenharmony_ci */ 618c2ecf20Sopenharmony_ci#define NE_PARENT_VM_CID (3) 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_cistatic long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 648c2ecf20Sopenharmony_ci 658c2ecf20Sopenharmony_cistatic const struct file_operations ne_fops = { 668c2ecf20Sopenharmony_ci .owner = THIS_MODULE, 678c2ecf20Sopenharmony_ci .llseek = noop_llseek, 688c2ecf20Sopenharmony_ci .unlocked_ioctl = ne_ioctl, 698c2ecf20Sopenharmony_ci}; 708c2ecf20Sopenharmony_ci 718c2ecf20Sopenharmony_cistatic struct miscdevice ne_misc_dev = { 728c2ecf20Sopenharmony_ci .minor = MISC_DYNAMIC_MINOR, 738c2ecf20Sopenharmony_ci .name = "nitro_enclaves", 748c2ecf20Sopenharmony_ci .fops = &ne_fops, 758c2ecf20Sopenharmony_ci .mode = 0660, 768c2ecf20Sopenharmony_ci}; 778c2ecf20Sopenharmony_ci 788c2ecf20Sopenharmony_cistruct ne_devs ne_devs = { 798c2ecf20Sopenharmony_ci .ne_misc_dev = &ne_misc_dev, 808c2ecf20Sopenharmony_ci}; 818c2ecf20Sopenharmony_ci 828c2ecf20Sopenharmony_ci/* 838c2ecf20Sopenharmony_ci * TODO: Update logic to create new sysfs entries instead of using 848c2ecf20Sopenharmony_ci * a kernel parameter e.g. if multiple sysfs files needed. 858c2ecf20Sopenharmony_ci */ 868c2ecf20Sopenharmony_cistatic int ne_set_kernel_param(const char *val, const struct kernel_param *kp); 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_cistatic const struct kernel_param_ops ne_cpu_pool_ops = { 898c2ecf20Sopenharmony_ci .get = param_get_string, 908c2ecf20Sopenharmony_ci .set = ne_set_kernel_param, 918c2ecf20Sopenharmony_ci}; 928c2ecf20Sopenharmony_ci 938c2ecf20Sopenharmony_cistatic char ne_cpus[NE_CPUS_SIZE]; 948c2ecf20Sopenharmony_cistatic struct kparam_string ne_cpus_arg = { 958c2ecf20Sopenharmony_ci .maxlen = sizeof(ne_cpus), 968c2ecf20Sopenharmony_ci .string = ne_cpus, 978c2ecf20Sopenharmony_ci}; 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_cimodule_param_cb(ne_cpus, &ne_cpu_pool_ops, &ne_cpus_arg, 0644); 1008c2ecf20Sopenharmony_ci/* https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists */ 1018c2ecf20Sopenharmony_ciMODULE_PARM_DESC(ne_cpus, "<cpu-list> - CPU pool used for Nitro Enclaves"); 1028c2ecf20Sopenharmony_ci 1038c2ecf20Sopenharmony_ci/** 1048c2ecf20Sopenharmony_ci * struct ne_cpu_pool - CPU pool used for Nitro Enclaves. 1058c2ecf20Sopenharmony_ci * @avail_threads_per_core: Available full CPU cores to be dedicated to 1068c2ecf20Sopenharmony_ci * enclave(s). The cpumasks from the array, indexed 1078c2ecf20Sopenharmony_ci * by core id, contain all the threads from the 1088c2ecf20Sopenharmony_ci * available cores, that are not set for created 1098c2ecf20Sopenharmony_ci * enclave(s). The full CPU cores are part of the 1108c2ecf20Sopenharmony_ci * NE CPU pool. 1118c2ecf20Sopenharmony_ci * @mutex: Mutex for the access to the NE CPU pool. 1128c2ecf20Sopenharmony_ci * @nr_parent_vm_cores : The size of the available threads per core array. 1138c2ecf20Sopenharmony_ci * The total number of CPU cores available on the 1148c2ecf20Sopenharmony_ci * primary / parent VM. 1158c2ecf20Sopenharmony_ci * @nr_threads_per_core: The number of threads that a full CPU core has. 1168c2ecf20Sopenharmony_ci * @numa_node: NUMA node of the CPUs in the pool. 1178c2ecf20Sopenharmony_ci */ 1188c2ecf20Sopenharmony_cistruct ne_cpu_pool { 1198c2ecf20Sopenharmony_ci cpumask_var_t *avail_threads_per_core; 1208c2ecf20Sopenharmony_ci struct mutex mutex; 1218c2ecf20Sopenharmony_ci unsigned int nr_parent_vm_cores; 1228c2ecf20Sopenharmony_ci unsigned int nr_threads_per_core; 1238c2ecf20Sopenharmony_ci int numa_node; 1248c2ecf20Sopenharmony_ci}; 1258c2ecf20Sopenharmony_ci 1268c2ecf20Sopenharmony_cistatic struct ne_cpu_pool ne_cpu_pool; 1278c2ecf20Sopenharmony_ci 1288c2ecf20Sopenharmony_ci/** 1298c2ecf20Sopenharmony_ci * ne_check_enclaves_created() - Verify if at least one enclave has been created. 1308c2ecf20Sopenharmony_ci * @void: No parameters provided. 1318c2ecf20Sopenharmony_ci * 1328c2ecf20Sopenharmony_ci * Context: Process context. 1338c2ecf20Sopenharmony_ci * Return: 1348c2ecf20Sopenharmony_ci * * True if at least one enclave is created. 1358c2ecf20Sopenharmony_ci * * False otherwise. 1368c2ecf20Sopenharmony_ci */ 1378c2ecf20Sopenharmony_cistatic bool ne_check_enclaves_created(void) 1388c2ecf20Sopenharmony_ci{ 1398c2ecf20Sopenharmony_ci struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; 1408c2ecf20Sopenharmony_ci bool ret = false; 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_ci if (!ne_pci_dev) 1438c2ecf20Sopenharmony_ci return ret; 1448c2ecf20Sopenharmony_ci 1458c2ecf20Sopenharmony_ci mutex_lock(&ne_pci_dev->enclaves_list_mutex); 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_ci if (!list_empty(&ne_pci_dev->enclaves_list)) 1488c2ecf20Sopenharmony_ci ret = true; 1498c2ecf20Sopenharmony_ci 1508c2ecf20Sopenharmony_ci mutex_unlock(&ne_pci_dev->enclaves_list_mutex); 1518c2ecf20Sopenharmony_ci 1528c2ecf20Sopenharmony_ci return ret; 1538c2ecf20Sopenharmony_ci} 1548c2ecf20Sopenharmony_ci 1558c2ecf20Sopenharmony_ci/** 1568c2ecf20Sopenharmony_ci * ne_setup_cpu_pool() - Set the NE CPU pool after handling sanity checks such 1578c2ecf20Sopenharmony_ci * as not sharing CPU cores with the primary / parent VM 1588c2ecf20Sopenharmony_ci * or not using CPU 0, which should remain available for 1598c2ecf20Sopenharmony_ci * the primary / parent VM. Offline the CPUs from the 1608c2ecf20Sopenharmony_ci * pool after the checks passed. 1618c2ecf20Sopenharmony_ci * @ne_cpu_list: The CPU list used for setting NE CPU pool. 1628c2ecf20Sopenharmony_ci * 1638c2ecf20Sopenharmony_ci * Context: Process context. 1648c2ecf20Sopenharmony_ci * Return: 1658c2ecf20Sopenharmony_ci * * 0 on success. 1668c2ecf20Sopenharmony_ci * * Negative return value on failure. 1678c2ecf20Sopenharmony_ci */ 1688c2ecf20Sopenharmony_cistatic int ne_setup_cpu_pool(const char *ne_cpu_list) 1698c2ecf20Sopenharmony_ci{ 1708c2ecf20Sopenharmony_ci int core_id = -1; 1718c2ecf20Sopenharmony_ci unsigned int cpu = 0; 1728c2ecf20Sopenharmony_ci cpumask_var_t cpu_pool; 1738c2ecf20Sopenharmony_ci unsigned int cpu_sibling = 0; 1748c2ecf20Sopenharmony_ci unsigned int i = 0; 1758c2ecf20Sopenharmony_ci int numa_node = -1; 1768c2ecf20Sopenharmony_ci int rc = -EINVAL; 1778c2ecf20Sopenharmony_ci 1788c2ecf20Sopenharmony_ci if (!zalloc_cpumask_var(&cpu_pool, GFP_KERNEL)) 1798c2ecf20Sopenharmony_ci return -ENOMEM; 1808c2ecf20Sopenharmony_ci 1818c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci rc = cpulist_parse(ne_cpu_list, cpu_pool); 1848c2ecf20Sopenharmony_ci if (rc < 0) { 1858c2ecf20Sopenharmony_ci pr_err("%s: Error in cpulist parse [rc=%d]\n", ne_misc_dev.name, rc); 1868c2ecf20Sopenharmony_ci 1878c2ecf20Sopenharmony_ci goto free_pool_cpumask; 1888c2ecf20Sopenharmony_ci } 1898c2ecf20Sopenharmony_ci 1908c2ecf20Sopenharmony_ci cpu = cpumask_any(cpu_pool); 1918c2ecf20Sopenharmony_ci if (cpu >= nr_cpu_ids) { 1928c2ecf20Sopenharmony_ci pr_err("%s: No CPUs available in CPU pool\n", ne_misc_dev.name); 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_ci rc = -EINVAL; 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci goto free_pool_cpumask; 1978c2ecf20Sopenharmony_ci } 1988c2ecf20Sopenharmony_ci 1998c2ecf20Sopenharmony_ci /* 2008c2ecf20Sopenharmony_ci * Check if the CPUs are online, to further get info about them 2018c2ecf20Sopenharmony_ci * e.g. numa node, core id, siblings. 2028c2ecf20Sopenharmony_ci */ 2038c2ecf20Sopenharmony_ci for_each_cpu(cpu, cpu_pool) 2048c2ecf20Sopenharmony_ci if (cpu_is_offline(cpu)) { 2058c2ecf20Sopenharmony_ci pr_err("%s: CPU %d is offline, has to be online to get its metadata\n", 2068c2ecf20Sopenharmony_ci ne_misc_dev.name, cpu); 2078c2ecf20Sopenharmony_ci 2088c2ecf20Sopenharmony_ci rc = -EINVAL; 2098c2ecf20Sopenharmony_ci 2108c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2118c2ecf20Sopenharmony_ci } 2128c2ecf20Sopenharmony_ci 2138c2ecf20Sopenharmony_ci /* 2148c2ecf20Sopenharmony_ci * Check if the CPUs from the NE CPU pool are from the same NUMA node. 2158c2ecf20Sopenharmony_ci */ 2168c2ecf20Sopenharmony_ci for_each_cpu(cpu, cpu_pool) 2178c2ecf20Sopenharmony_ci if (numa_node < 0) { 2188c2ecf20Sopenharmony_ci numa_node = cpu_to_node(cpu); 2198c2ecf20Sopenharmony_ci if (numa_node < 0) { 2208c2ecf20Sopenharmony_ci pr_err("%s: Invalid NUMA node %d\n", 2218c2ecf20Sopenharmony_ci ne_misc_dev.name, numa_node); 2228c2ecf20Sopenharmony_ci 2238c2ecf20Sopenharmony_ci rc = -EINVAL; 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2268c2ecf20Sopenharmony_ci } 2278c2ecf20Sopenharmony_ci } else { 2288c2ecf20Sopenharmony_ci if (numa_node != cpu_to_node(cpu)) { 2298c2ecf20Sopenharmony_ci pr_err("%s: CPUs with different NUMA nodes\n", 2308c2ecf20Sopenharmony_ci ne_misc_dev.name); 2318c2ecf20Sopenharmony_ci 2328c2ecf20Sopenharmony_ci rc = -EINVAL; 2338c2ecf20Sopenharmony_ci 2348c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2358c2ecf20Sopenharmony_ci } 2368c2ecf20Sopenharmony_ci } 2378c2ecf20Sopenharmony_ci 2388c2ecf20Sopenharmony_ci /* 2398c2ecf20Sopenharmony_ci * Check if CPU 0 and its siblings are included in the provided CPU pool 2408c2ecf20Sopenharmony_ci * They should remain available for the primary / parent VM. 2418c2ecf20Sopenharmony_ci */ 2428c2ecf20Sopenharmony_ci if (cpumask_test_cpu(0, cpu_pool)) { 2438c2ecf20Sopenharmony_ci pr_err("%s: CPU 0 has to remain available\n", ne_misc_dev.name); 2448c2ecf20Sopenharmony_ci 2458c2ecf20Sopenharmony_ci rc = -EINVAL; 2468c2ecf20Sopenharmony_ci 2478c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2488c2ecf20Sopenharmony_ci } 2498c2ecf20Sopenharmony_ci 2508c2ecf20Sopenharmony_ci for_each_cpu(cpu_sibling, topology_sibling_cpumask(0)) { 2518c2ecf20Sopenharmony_ci if (cpumask_test_cpu(cpu_sibling, cpu_pool)) { 2528c2ecf20Sopenharmony_ci pr_err("%s: CPU sibling %d for CPU 0 is in CPU pool\n", 2538c2ecf20Sopenharmony_ci ne_misc_dev.name, cpu_sibling); 2548c2ecf20Sopenharmony_ci 2558c2ecf20Sopenharmony_ci rc = -EINVAL; 2568c2ecf20Sopenharmony_ci 2578c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2588c2ecf20Sopenharmony_ci } 2598c2ecf20Sopenharmony_ci } 2608c2ecf20Sopenharmony_ci 2618c2ecf20Sopenharmony_ci /* 2628c2ecf20Sopenharmony_ci * Check if CPU siblings are included in the provided CPU pool. The 2638c2ecf20Sopenharmony_ci * expectation is that full CPU cores are made available in the CPU pool 2648c2ecf20Sopenharmony_ci * for enclaves. 2658c2ecf20Sopenharmony_ci */ 2668c2ecf20Sopenharmony_ci for_each_cpu(cpu, cpu_pool) { 2678c2ecf20Sopenharmony_ci for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) { 2688c2ecf20Sopenharmony_ci if (!cpumask_test_cpu(cpu_sibling, cpu_pool)) { 2698c2ecf20Sopenharmony_ci pr_err("%s: CPU %d is not in CPU pool\n", 2708c2ecf20Sopenharmony_ci ne_misc_dev.name, cpu_sibling); 2718c2ecf20Sopenharmony_ci 2728c2ecf20Sopenharmony_ci rc = -EINVAL; 2738c2ecf20Sopenharmony_ci 2748c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2758c2ecf20Sopenharmony_ci } 2768c2ecf20Sopenharmony_ci } 2778c2ecf20Sopenharmony_ci } 2788c2ecf20Sopenharmony_ci 2798c2ecf20Sopenharmony_ci /* Calculate the number of threads from a full CPU core. */ 2808c2ecf20Sopenharmony_ci cpu = cpumask_any(cpu_pool); 2818c2ecf20Sopenharmony_ci for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) 2828c2ecf20Sopenharmony_ci ne_cpu_pool.nr_threads_per_core++; 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core; 2858c2ecf20Sopenharmony_ci 2868c2ecf20Sopenharmony_ci ne_cpu_pool.avail_threads_per_core = kcalloc(ne_cpu_pool.nr_parent_vm_cores, 2878c2ecf20Sopenharmony_ci sizeof(*ne_cpu_pool.avail_threads_per_core), 2888c2ecf20Sopenharmony_ci GFP_KERNEL); 2898c2ecf20Sopenharmony_ci if (!ne_cpu_pool.avail_threads_per_core) { 2908c2ecf20Sopenharmony_ci rc = -ENOMEM; 2918c2ecf20Sopenharmony_ci 2928c2ecf20Sopenharmony_ci goto free_pool_cpumask; 2938c2ecf20Sopenharmony_ci } 2948c2ecf20Sopenharmony_ci 2958c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) 2968c2ecf20Sopenharmony_ci if (!zalloc_cpumask_var(&ne_cpu_pool.avail_threads_per_core[i], GFP_KERNEL)) { 2978c2ecf20Sopenharmony_ci rc = -ENOMEM; 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci goto free_cores_cpumask; 3008c2ecf20Sopenharmony_ci } 3018c2ecf20Sopenharmony_ci 3028c2ecf20Sopenharmony_ci /* 3038c2ecf20Sopenharmony_ci * Split the NE CPU pool in threads per core to keep the CPU topology 3048c2ecf20Sopenharmony_ci * after offlining the CPUs. 3058c2ecf20Sopenharmony_ci */ 3068c2ecf20Sopenharmony_ci for_each_cpu(cpu, cpu_pool) { 3078c2ecf20Sopenharmony_ci core_id = topology_core_id(cpu); 3088c2ecf20Sopenharmony_ci if (core_id < 0 || core_id >= ne_cpu_pool.nr_parent_vm_cores) { 3098c2ecf20Sopenharmony_ci pr_err("%s: Invalid core id %d for CPU %d\n", 3108c2ecf20Sopenharmony_ci ne_misc_dev.name, core_id, cpu); 3118c2ecf20Sopenharmony_ci 3128c2ecf20Sopenharmony_ci rc = -EINVAL; 3138c2ecf20Sopenharmony_ci 3148c2ecf20Sopenharmony_ci goto clear_cpumask; 3158c2ecf20Sopenharmony_ci } 3168c2ecf20Sopenharmony_ci 3178c2ecf20Sopenharmony_ci cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]); 3188c2ecf20Sopenharmony_ci } 3198c2ecf20Sopenharmony_ci 3208c2ecf20Sopenharmony_ci /* 3218c2ecf20Sopenharmony_ci * CPUs that are given to enclave(s) should not be considered online 3228c2ecf20Sopenharmony_ci * by Linux anymore, as the hypervisor will degrade them to floating. 3238c2ecf20Sopenharmony_ci * The physical CPUs (full cores) are carved out of the primary / parent 3248c2ecf20Sopenharmony_ci * VM and given to the enclave VM. The same number of vCPUs would run 3258c2ecf20Sopenharmony_ci * on less pCPUs for the primary / parent VM. 3268c2ecf20Sopenharmony_ci * 3278c2ecf20Sopenharmony_ci * We offline them here, to not degrade performance and expose correct 3288c2ecf20Sopenharmony_ci * topology to Linux and user space. 3298c2ecf20Sopenharmony_ci */ 3308c2ecf20Sopenharmony_ci for_each_cpu(cpu, cpu_pool) { 3318c2ecf20Sopenharmony_ci rc = remove_cpu(cpu); 3328c2ecf20Sopenharmony_ci if (rc != 0) { 3338c2ecf20Sopenharmony_ci pr_err("%s: CPU %d is not offlined [rc=%d]\n", 3348c2ecf20Sopenharmony_ci ne_misc_dev.name, cpu, rc); 3358c2ecf20Sopenharmony_ci 3368c2ecf20Sopenharmony_ci goto online_cpus; 3378c2ecf20Sopenharmony_ci } 3388c2ecf20Sopenharmony_ci } 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci free_cpumask_var(cpu_pool); 3418c2ecf20Sopenharmony_ci 3428c2ecf20Sopenharmony_ci ne_cpu_pool.numa_node = numa_node; 3438c2ecf20Sopenharmony_ci 3448c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 3458c2ecf20Sopenharmony_ci 3468c2ecf20Sopenharmony_ci return 0; 3478c2ecf20Sopenharmony_ci 3488c2ecf20Sopenharmony_cionline_cpus: 3498c2ecf20Sopenharmony_ci for_each_cpu(cpu, cpu_pool) 3508c2ecf20Sopenharmony_ci add_cpu(cpu); 3518c2ecf20Sopenharmony_ciclear_cpumask: 3528c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) 3538c2ecf20Sopenharmony_ci cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]); 3548c2ecf20Sopenharmony_cifree_cores_cpumask: 3558c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) 3568c2ecf20Sopenharmony_ci free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]); 3578c2ecf20Sopenharmony_ci kfree(ne_cpu_pool.avail_threads_per_core); 3588c2ecf20Sopenharmony_cifree_pool_cpumask: 3598c2ecf20Sopenharmony_ci free_cpumask_var(cpu_pool); 3608c2ecf20Sopenharmony_ci ne_cpu_pool.nr_parent_vm_cores = 0; 3618c2ecf20Sopenharmony_ci ne_cpu_pool.nr_threads_per_core = 0; 3628c2ecf20Sopenharmony_ci ne_cpu_pool.numa_node = -1; 3638c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci return rc; 3668c2ecf20Sopenharmony_ci} 3678c2ecf20Sopenharmony_ci 3688c2ecf20Sopenharmony_ci/** 3698c2ecf20Sopenharmony_ci * ne_teardown_cpu_pool() - Online the CPUs from the NE CPU pool and cleanup the 3708c2ecf20Sopenharmony_ci * CPU pool. 3718c2ecf20Sopenharmony_ci * @void: No parameters provided. 3728c2ecf20Sopenharmony_ci * 3738c2ecf20Sopenharmony_ci * Context: Process context. 3748c2ecf20Sopenharmony_ci */ 3758c2ecf20Sopenharmony_cistatic void ne_teardown_cpu_pool(void) 3768c2ecf20Sopenharmony_ci{ 3778c2ecf20Sopenharmony_ci unsigned int cpu = 0; 3788c2ecf20Sopenharmony_ci unsigned int i = 0; 3798c2ecf20Sopenharmony_ci int rc = -EINVAL; 3808c2ecf20Sopenharmony_ci 3818c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 3828c2ecf20Sopenharmony_ci 3838c2ecf20Sopenharmony_ci if (!ne_cpu_pool.nr_parent_vm_cores) { 3848c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 3858c2ecf20Sopenharmony_ci 3868c2ecf20Sopenharmony_ci return; 3878c2ecf20Sopenharmony_ci } 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) { 3908c2ecf20Sopenharmony_ci for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) { 3918c2ecf20Sopenharmony_ci rc = add_cpu(cpu); 3928c2ecf20Sopenharmony_ci if (rc != 0) 3938c2ecf20Sopenharmony_ci pr_err("%s: CPU %d is not onlined [rc=%d]\n", 3948c2ecf20Sopenharmony_ci ne_misc_dev.name, cpu, rc); 3958c2ecf20Sopenharmony_ci } 3968c2ecf20Sopenharmony_ci 3978c2ecf20Sopenharmony_ci cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]); 3988c2ecf20Sopenharmony_ci 3998c2ecf20Sopenharmony_ci free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]); 4008c2ecf20Sopenharmony_ci } 4018c2ecf20Sopenharmony_ci 4028c2ecf20Sopenharmony_ci kfree(ne_cpu_pool.avail_threads_per_core); 4038c2ecf20Sopenharmony_ci ne_cpu_pool.nr_parent_vm_cores = 0; 4048c2ecf20Sopenharmony_ci ne_cpu_pool.nr_threads_per_core = 0; 4058c2ecf20Sopenharmony_ci ne_cpu_pool.numa_node = -1; 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 4088c2ecf20Sopenharmony_ci} 4098c2ecf20Sopenharmony_ci 4108c2ecf20Sopenharmony_ci/** 4118c2ecf20Sopenharmony_ci * ne_set_kernel_param() - Set the NE CPU pool value via the NE kernel parameter. 4128c2ecf20Sopenharmony_ci * @val: NE CPU pool string value. 4138c2ecf20Sopenharmony_ci * @kp : NE kernel parameter associated with the NE CPU pool. 4148c2ecf20Sopenharmony_ci * 4158c2ecf20Sopenharmony_ci * Context: Process context. 4168c2ecf20Sopenharmony_ci * Return: 4178c2ecf20Sopenharmony_ci * * 0 on success. 4188c2ecf20Sopenharmony_ci * * Negative return value on failure. 4198c2ecf20Sopenharmony_ci */ 4208c2ecf20Sopenharmony_cistatic int ne_set_kernel_param(const char *val, const struct kernel_param *kp) 4218c2ecf20Sopenharmony_ci{ 4228c2ecf20Sopenharmony_ci char error_val[] = ""; 4238c2ecf20Sopenharmony_ci int rc = -EINVAL; 4248c2ecf20Sopenharmony_ci 4258c2ecf20Sopenharmony_ci if (!capable(CAP_SYS_ADMIN)) 4268c2ecf20Sopenharmony_ci return -EPERM; 4278c2ecf20Sopenharmony_ci 4288c2ecf20Sopenharmony_ci if (ne_check_enclaves_created()) { 4298c2ecf20Sopenharmony_ci pr_err("%s: The CPU pool is used by enclave(s)\n", ne_misc_dev.name); 4308c2ecf20Sopenharmony_ci 4318c2ecf20Sopenharmony_ci return -EPERM; 4328c2ecf20Sopenharmony_ci } 4338c2ecf20Sopenharmony_ci 4348c2ecf20Sopenharmony_ci ne_teardown_cpu_pool(); 4358c2ecf20Sopenharmony_ci 4368c2ecf20Sopenharmony_ci rc = ne_setup_cpu_pool(val); 4378c2ecf20Sopenharmony_ci if (rc < 0) { 4388c2ecf20Sopenharmony_ci pr_err("%s: Error in setup CPU pool [rc=%d]\n", ne_misc_dev.name, rc); 4398c2ecf20Sopenharmony_ci 4408c2ecf20Sopenharmony_ci param_set_copystring(error_val, kp); 4418c2ecf20Sopenharmony_ci 4428c2ecf20Sopenharmony_ci return rc; 4438c2ecf20Sopenharmony_ci } 4448c2ecf20Sopenharmony_ci 4458c2ecf20Sopenharmony_ci rc = param_set_copystring(val, kp); 4468c2ecf20Sopenharmony_ci if (rc < 0) { 4478c2ecf20Sopenharmony_ci pr_err("%s: Error in param set copystring [rc=%d]\n", ne_misc_dev.name, rc); 4488c2ecf20Sopenharmony_ci 4498c2ecf20Sopenharmony_ci ne_teardown_cpu_pool(); 4508c2ecf20Sopenharmony_ci 4518c2ecf20Sopenharmony_ci param_set_copystring(error_val, kp); 4528c2ecf20Sopenharmony_ci 4538c2ecf20Sopenharmony_ci return rc; 4548c2ecf20Sopenharmony_ci } 4558c2ecf20Sopenharmony_ci 4568c2ecf20Sopenharmony_ci return 0; 4578c2ecf20Sopenharmony_ci} 4588c2ecf20Sopenharmony_ci 4598c2ecf20Sopenharmony_ci/** 4608c2ecf20Sopenharmony_ci * ne_donated_cpu() - Check if the provided CPU is already used by the enclave. 4618c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 4628c2ecf20Sopenharmony_ci * @cpu: CPU to check if already used. 4638c2ecf20Sopenharmony_ci * 4648c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 4658c2ecf20Sopenharmony_ci * Return: 4668c2ecf20Sopenharmony_ci * * True if the provided CPU is already used by the enclave. 4678c2ecf20Sopenharmony_ci * * False otherwise. 4688c2ecf20Sopenharmony_ci */ 4698c2ecf20Sopenharmony_cistatic bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu) 4708c2ecf20Sopenharmony_ci{ 4718c2ecf20Sopenharmony_ci if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) 4728c2ecf20Sopenharmony_ci return true; 4738c2ecf20Sopenharmony_ci 4748c2ecf20Sopenharmony_ci return false; 4758c2ecf20Sopenharmony_ci} 4768c2ecf20Sopenharmony_ci 4778c2ecf20Sopenharmony_ci/** 4788c2ecf20Sopenharmony_ci * ne_get_unused_core_from_cpu_pool() - Get the id of a full core from the 4798c2ecf20Sopenharmony_ci * NE CPU pool. 4808c2ecf20Sopenharmony_ci * @void: No parameters provided. 4818c2ecf20Sopenharmony_ci * 4828c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave and 4838c2ecf20Sopenharmony_ci * ne_cpu_pool mutexes held. 4848c2ecf20Sopenharmony_ci * Return: 4858c2ecf20Sopenharmony_ci * * Core id. 4868c2ecf20Sopenharmony_ci * * -1 if no CPU core available in the pool. 4878c2ecf20Sopenharmony_ci */ 4888c2ecf20Sopenharmony_cistatic int ne_get_unused_core_from_cpu_pool(void) 4898c2ecf20Sopenharmony_ci{ 4908c2ecf20Sopenharmony_ci int core_id = -1; 4918c2ecf20Sopenharmony_ci unsigned int i = 0; 4928c2ecf20Sopenharmony_ci 4938c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) 4948c2ecf20Sopenharmony_ci if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) { 4958c2ecf20Sopenharmony_ci core_id = i; 4968c2ecf20Sopenharmony_ci 4978c2ecf20Sopenharmony_ci break; 4988c2ecf20Sopenharmony_ci } 4998c2ecf20Sopenharmony_ci 5008c2ecf20Sopenharmony_ci return core_id; 5018c2ecf20Sopenharmony_ci} 5028c2ecf20Sopenharmony_ci 5038c2ecf20Sopenharmony_ci/** 5048c2ecf20Sopenharmony_ci * ne_set_enclave_threads_per_core() - Set the threads of the provided core in 5058c2ecf20Sopenharmony_ci * the enclave data structure. 5068c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 5078c2ecf20Sopenharmony_ci * @core_id: Core id to get its threads from the NE CPU pool. 5088c2ecf20Sopenharmony_ci * @vcpu_id: vCPU id part of the provided core. 5098c2ecf20Sopenharmony_ci * 5108c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave and 5118c2ecf20Sopenharmony_ci * ne_cpu_pool mutexes held. 5128c2ecf20Sopenharmony_ci * Return: 5138c2ecf20Sopenharmony_ci * * 0 on success. 5148c2ecf20Sopenharmony_ci * * Negative return value on failure. 5158c2ecf20Sopenharmony_ci */ 5168c2ecf20Sopenharmony_cistatic int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave, 5178c2ecf20Sopenharmony_ci int core_id, u32 vcpu_id) 5188c2ecf20Sopenharmony_ci{ 5198c2ecf20Sopenharmony_ci unsigned int cpu = 0; 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci if (core_id < 0 && vcpu_id == 0) { 5228c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 5238c2ecf20Sopenharmony_ci "No CPUs available in NE CPU pool\n"); 5248c2ecf20Sopenharmony_ci 5258c2ecf20Sopenharmony_ci return -NE_ERR_NO_CPUS_AVAIL_IN_POOL; 5268c2ecf20Sopenharmony_ci } 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci if (core_id < 0) { 5298c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 5308c2ecf20Sopenharmony_ci "CPU %d is not in NE CPU pool\n", vcpu_id); 5318c2ecf20Sopenharmony_ci 5328c2ecf20Sopenharmony_ci return -NE_ERR_VCPU_NOT_IN_CPU_POOL; 5338c2ecf20Sopenharmony_ci } 5348c2ecf20Sopenharmony_ci 5358c2ecf20Sopenharmony_ci if (core_id >= ne_enclave->nr_parent_vm_cores) { 5368c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 5378c2ecf20Sopenharmony_ci "Invalid core id %d - ne_enclave\n", core_id); 5388c2ecf20Sopenharmony_ci 5398c2ecf20Sopenharmony_ci return -NE_ERR_VCPU_INVALID_CPU_CORE; 5408c2ecf20Sopenharmony_ci } 5418c2ecf20Sopenharmony_ci 5428c2ecf20Sopenharmony_ci for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]) 5438c2ecf20Sopenharmony_ci cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]); 5448c2ecf20Sopenharmony_ci 5458c2ecf20Sopenharmony_ci cpumask_clear(ne_cpu_pool.avail_threads_per_core[core_id]); 5468c2ecf20Sopenharmony_ci 5478c2ecf20Sopenharmony_ci return 0; 5488c2ecf20Sopenharmony_ci} 5498c2ecf20Sopenharmony_ci 5508c2ecf20Sopenharmony_ci/** 5518c2ecf20Sopenharmony_ci * ne_get_cpu_from_cpu_pool() - Get a CPU from the NE CPU pool, either from the 5528c2ecf20Sopenharmony_ci * remaining sibling(s) of a CPU core or the first 5538c2ecf20Sopenharmony_ci * sibling of a new CPU core. 5548c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 5558c2ecf20Sopenharmony_ci * @vcpu_id: vCPU to get from the NE CPU pool. 5568c2ecf20Sopenharmony_ci * 5578c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 5588c2ecf20Sopenharmony_ci * Return: 5598c2ecf20Sopenharmony_ci * * 0 on success. 5608c2ecf20Sopenharmony_ci * * Negative return value on failure. 5618c2ecf20Sopenharmony_ci */ 5628c2ecf20Sopenharmony_cistatic int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id) 5638c2ecf20Sopenharmony_ci{ 5648c2ecf20Sopenharmony_ci int core_id = -1; 5658c2ecf20Sopenharmony_ci unsigned int cpu = 0; 5668c2ecf20Sopenharmony_ci unsigned int i = 0; 5678c2ecf20Sopenharmony_ci int rc = -EINVAL; 5688c2ecf20Sopenharmony_ci 5698c2ecf20Sopenharmony_ci /* 5708c2ecf20Sopenharmony_ci * If previously allocated a thread of a core to this enclave, first 5718c2ecf20Sopenharmony_ci * check remaining sibling(s) for new CPU allocations, so that full 5728c2ecf20Sopenharmony_ci * CPU cores are used for the enclave. 5738c2ecf20Sopenharmony_ci */ 5748c2ecf20Sopenharmony_ci for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) 5758c2ecf20Sopenharmony_ci for_each_cpu(cpu, ne_enclave->threads_per_core[i]) 5768c2ecf20Sopenharmony_ci if (!ne_donated_cpu(ne_enclave, cpu)) { 5778c2ecf20Sopenharmony_ci *vcpu_id = cpu; 5788c2ecf20Sopenharmony_ci 5798c2ecf20Sopenharmony_ci return 0; 5808c2ecf20Sopenharmony_ci } 5818c2ecf20Sopenharmony_ci 5828c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 5838c2ecf20Sopenharmony_ci 5848c2ecf20Sopenharmony_ci /* 5858c2ecf20Sopenharmony_ci * If no remaining siblings, get a core from the NE CPU pool and keep 5868c2ecf20Sopenharmony_ci * track of all the threads in the enclave threads per core data structure. 5878c2ecf20Sopenharmony_ci */ 5888c2ecf20Sopenharmony_ci core_id = ne_get_unused_core_from_cpu_pool(); 5898c2ecf20Sopenharmony_ci 5908c2ecf20Sopenharmony_ci rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id); 5918c2ecf20Sopenharmony_ci if (rc < 0) 5928c2ecf20Sopenharmony_ci goto unlock_mutex; 5938c2ecf20Sopenharmony_ci 5948c2ecf20Sopenharmony_ci *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]); 5958c2ecf20Sopenharmony_ci 5968c2ecf20Sopenharmony_ci rc = 0; 5978c2ecf20Sopenharmony_ci 5988c2ecf20Sopenharmony_ciunlock_mutex: 5998c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci return rc; 6028c2ecf20Sopenharmony_ci} 6038c2ecf20Sopenharmony_ci 6048c2ecf20Sopenharmony_ci/** 6058c2ecf20Sopenharmony_ci * ne_get_vcpu_core_from_cpu_pool() - Get from the NE CPU pool the id of the 6068c2ecf20Sopenharmony_ci * core associated with the provided vCPU. 6078c2ecf20Sopenharmony_ci * @vcpu_id: Provided vCPU id to get its associated core id. 6088c2ecf20Sopenharmony_ci * 6098c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave and 6108c2ecf20Sopenharmony_ci * ne_cpu_pool mutexes held. 6118c2ecf20Sopenharmony_ci * Return: 6128c2ecf20Sopenharmony_ci * * Core id. 6138c2ecf20Sopenharmony_ci * * -1 if the provided vCPU is not in the pool. 6148c2ecf20Sopenharmony_ci */ 6158c2ecf20Sopenharmony_cistatic int ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id) 6168c2ecf20Sopenharmony_ci{ 6178c2ecf20Sopenharmony_ci int core_id = -1; 6188c2ecf20Sopenharmony_ci unsigned int i = 0; 6198c2ecf20Sopenharmony_ci 6208c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) 6218c2ecf20Sopenharmony_ci if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) { 6228c2ecf20Sopenharmony_ci core_id = i; 6238c2ecf20Sopenharmony_ci 6248c2ecf20Sopenharmony_ci break; 6258c2ecf20Sopenharmony_ci } 6268c2ecf20Sopenharmony_ci 6278c2ecf20Sopenharmony_ci return core_id; 6288c2ecf20Sopenharmony_ci} 6298c2ecf20Sopenharmony_ci 6308c2ecf20Sopenharmony_ci/** 6318c2ecf20Sopenharmony_ci * ne_check_cpu_in_cpu_pool() - Check if the given vCPU is in the available CPUs 6328c2ecf20Sopenharmony_ci * from the pool. 6338c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 6348c2ecf20Sopenharmony_ci * @vcpu_id: ID of the vCPU to check if available in the NE CPU pool. 6358c2ecf20Sopenharmony_ci * 6368c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 6378c2ecf20Sopenharmony_ci * Return: 6388c2ecf20Sopenharmony_ci * * 0 on success. 6398c2ecf20Sopenharmony_ci * * Negative return value on failure. 6408c2ecf20Sopenharmony_ci */ 6418c2ecf20Sopenharmony_cistatic int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id) 6428c2ecf20Sopenharmony_ci{ 6438c2ecf20Sopenharmony_ci int core_id = -1; 6448c2ecf20Sopenharmony_ci unsigned int i = 0; 6458c2ecf20Sopenharmony_ci int rc = -EINVAL; 6468c2ecf20Sopenharmony_ci 6478c2ecf20Sopenharmony_ci if (ne_donated_cpu(ne_enclave, vcpu_id)) { 6488c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 6498c2ecf20Sopenharmony_ci "CPU %d already used\n", vcpu_id); 6508c2ecf20Sopenharmony_ci 6518c2ecf20Sopenharmony_ci return -NE_ERR_VCPU_ALREADY_USED; 6528c2ecf20Sopenharmony_ci } 6538c2ecf20Sopenharmony_ci 6548c2ecf20Sopenharmony_ci /* 6558c2ecf20Sopenharmony_ci * If previously allocated a thread of a core to this enclave, but not 6568c2ecf20Sopenharmony_ci * the full core, first check remaining sibling(s). 6578c2ecf20Sopenharmony_ci */ 6588c2ecf20Sopenharmony_ci for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) 6598c2ecf20Sopenharmony_ci if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i])) 6608c2ecf20Sopenharmony_ci return 0; 6618c2ecf20Sopenharmony_ci 6628c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 6638c2ecf20Sopenharmony_ci 6648c2ecf20Sopenharmony_ci /* 6658c2ecf20Sopenharmony_ci * If no remaining siblings, get from the NE CPU pool the core 6668c2ecf20Sopenharmony_ci * associated with the vCPU and keep track of all the threads in the 6678c2ecf20Sopenharmony_ci * enclave threads per core data structure. 6688c2ecf20Sopenharmony_ci */ 6698c2ecf20Sopenharmony_ci core_id = ne_get_vcpu_core_from_cpu_pool(vcpu_id); 6708c2ecf20Sopenharmony_ci 6718c2ecf20Sopenharmony_ci rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id); 6728c2ecf20Sopenharmony_ci if (rc < 0) 6738c2ecf20Sopenharmony_ci goto unlock_mutex; 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_ci rc = 0; 6768c2ecf20Sopenharmony_ci 6778c2ecf20Sopenharmony_ciunlock_mutex: 6788c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_ci return rc; 6818c2ecf20Sopenharmony_ci} 6828c2ecf20Sopenharmony_ci 6838c2ecf20Sopenharmony_ci/** 6848c2ecf20Sopenharmony_ci * ne_add_vcpu_ioctl() - Add a vCPU to the slot associated with the current 6858c2ecf20Sopenharmony_ci * enclave. 6868c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 6878c2ecf20Sopenharmony_ci * @vcpu_id: ID of the CPU to be associated with the given slot, 6888c2ecf20Sopenharmony_ci * apic id on x86. 6898c2ecf20Sopenharmony_ci * 6908c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 6918c2ecf20Sopenharmony_ci * Return: 6928c2ecf20Sopenharmony_ci * * 0 on success. 6938c2ecf20Sopenharmony_ci * * Negative return value on failure. 6948c2ecf20Sopenharmony_ci */ 6958c2ecf20Sopenharmony_cistatic int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id) 6968c2ecf20Sopenharmony_ci{ 6978c2ecf20Sopenharmony_ci struct ne_pci_dev_cmd_reply cmd_reply = {}; 6988c2ecf20Sopenharmony_ci struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; 6998c2ecf20Sopenharmony_ci int rc = -EINVAL; 7008c2ecf20Sopenharmony_ci struct slot_add_vcpu_req slot_add_vcpu_req = {}; 7018c2ecf20Sopenharmony_ci 7028c2ecf20Sopenharmony_ci if (ne_enclave->mm != current->mm) 7038c2ecf20Sopenharmony_ci return -EIO; 7048c2ecf20Sopenharmony_ci 7058c2ecf20Sopenharmony_ci slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid; 7068c2ecf20Sopenharmony_ci slot_add_vcpu_req.vcpu_id = vcpu_id; 7078c2ecf20Sopenharmony_ci 7088c2ecf20Sopenharmony_ci rc = ne_do_request(pdev, SLOT_ADD_VCPU, 7098c2ecf20Sopenharmony_ci &slot_add_vcpu_req, sizeof(slot_add_vcpu_req), 7108c2ecf20Sopenharmony_ci &cmd_reply, sizeof(cmd_reply)); 7118c2ecf20Sopenharmony_ci if (rc < 0) { 7128c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 7138c2ecf20Sopenharmony_ci "Error in slot add vCPU [rc=%d]\n", rc); 7148c2ecf20Sopenharmony_ci 7158c2ecf20Sopenharmony_ci return rc; 7168c2ecf20Sopenharmony_ci } 7178c2ecf20Sopenharmony_ci 7188c2ecf20Sopenharmony_ci cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids); 7198c2ecf20Sopenharmony_ci 7208c2ecf20Sopenharmony_ci ne_enclave->nr_vcpus++; 7218c2ecf20Sopenharmony_ci 7228c2ecf20Sopenharmony_ci return 0; 7238c2ecf20Sopenharmony_ci} 7248c2ecf20Sopenharmony_ci 7258c2ecf20Sopenharmony_ci/** 7268c2ecf20Sopenharmony_ci * ne_sanity_check_user_mem_region() - Sanity check the user space memory 7278c2ecf20Sopenharmony_ci * region received during the set user 7288c2ecf20Sopenharmony_ci * memory region ioctl call. 7298c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 7308c2ecf20Sopenharmony_ci * @mem_region : User space memory region to be sanity checked. 7318c2ecf20Sopenharmony_ci * 7328c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 7338c2ecf20Sopenharmony_ci * Return: 7348c2ecf20Sopenharmony_ci * * 0 on success. 7358c2ecf20Sopenharmony_ci * * Negative return value on failure. 7368c2ecf20Sopenharmony_ci */ 7378c2ecf20Sopenharmony_cistatic int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave, 7388c2ecf20Sopenharmony_ci struct ne_user_memory_region mem_region) 7398c2ecf20Sopenharmony_ci{ 7408c2ecf20Sopenharmony_ci struct ne_mem_region *ne_mem_region = NULL; 7418c2ecf20Sopenharmony_ci 7428c2ecf20Sopenharmony_ci if (ne_enclave->mm != current->mm) 7438c2ecf20Sopenharmony_ci return -EIO; 7448c2ecf20Sopenharmony_ci 7458c2ecf20Sopenharmony_ci if (mem_region.memory_size & (NE_MIN_MEM_REGION_SIZE - 1)) { 7468c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 7478c2ecf20Sopenharmony_ci "User space memory size is not multiple of 2 MiB\n"); 7488c2ecf20Sopenharmony_ci 7498c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_MEM_REGION_SIZE; 7508c2ecf20Sopenharmony_ci } 7518c2ecf20Sopenharmony_ci 7528c2ecf20Sopenharmony_ci if (!IS_ALIGNED(mem_region.userspace_addr, NE_MIN_MEM_REGION_SIZE)) { 7538c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 7548c2ecf20Sopenharmony_ci "User space address is not 2 MiB aligned\n"); 7558c2ecf20Sopenharmony_ci 7568c2ecf20Sopenharmony_ci return -NE_ERR_UNALIGNED_MEM_REGION_ADDR; 7578c2ecf20Sopenharmony_ci } 7588c2ecf20Sopenharmony_ci 7598c2ecf20Sopenharmony_ci if ((mem_region.userspace_addr & (NE_MIN_MEM_REGION_SIZE - 1)) || 7608c2ecf20Sopenharmony_ci !access_ok((void __user *)(unsigned long)mem_region.userspace_addr, 7618c2ecf20Sopenharmony_ci mem_region.memory_size)) { 7628c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 7638c2ecf20Sopenharmony_ci "Invalid user space address range\n"); 7648c2ecf20Sopenharmony_ci 7658c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_MEM_REGION_ADDR; 7668c2ecf20Sopenharmony_ci } 7678c2ecf20Sopenharmony_ci 7688c2ecf20Sopenharmony_ci list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list, 7698c2ecf20Sopenharmony_ci mem_region_list_entry) { 7708c2ecf20Sopenharmony_ci u64 memory_size = ne_mem_region->memory_size; 7718c2ecf20Sopenharmony_ci u64 userspace_addr = ne_mem_region->userspace_addr; 7728c2ecf20Sopenharmony_ci 7738c2ecf20Sopenharmony_ci if ((userspace_addr <= mem_region.userspace_addr && 7748c2ecf20Sopenharmony_ci mem_region.userspace_addr < (userspace_addr + memory_size)) || 7758c2ecf20Sopenharmony_ci (mem_region.userspace_addr <= userspace_addr && 7768c2ecf20Sopenharmony_ci (mem_region.userspace_addr + mem_region.memory_size) > userspace_addr)) { 7778c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 7788c2ecf20Sopenharmony_ci "User space memory region already used\n"); 7798c2ecf20Sopenharmony_ci 7808c2ecf20Sopenharmony_ci return -NE_ERR_MEM_REGION_ALREADY_USED; 7818c2ecf20Sopenharmony_ci } 7828c2ecf20Sopenharmony_ci } 7838c2ecf20Sopenharmony_ci 7848c2ecf20Sopenharmony_ci return 0; 7858c2ecf20Sopenharmony_ci} 7868c2ecf20Sopenharmony_ci 7878c2ecf20Sopenharmony_ci/** 7888c2ecf20Sopenharmony_ci * ne_sanity_check_user_mem_region_page() - Sanity check a page from the user space 7898c2ecf20Sopenharmony_ci * memory region received during the set 7908c2ecf20Sopenharmony_ci * user memory region ioctl call. 7918c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 7928c2ecf20Sopenharmony_ci * @mem_region_page: Page from the user space memory region to be sanity checked. 7938c2ecf20Sopenharmony_ci * 7948c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 7958c2ecf20Sopenharmony_ci * Return: 7968c2ecf20Sopenharmony_ci * * 0 on success. 7978c2ecf20Sopenharmony_ci * * Negative return value on failure. 7988c2ecf20Sopenharmony_ci */ 7998c2ecf20Sopenharmony_cistatic int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave, 8008c2ecf20Sopenharmony_ci struct page *mem_region_page) 8018c2ecf20Sopenharmony_ci{ 8028c2ecf20Sopenharmony_ci if (!PageHuge(mem_region_page)) { 8038c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 8048c2ecf20Sopenharmony_ci "Not a hugetlbfs page\n"); 8058c2ecf20Sopenharmony_ci 8068c2ecf20Sopenharmony_ci return -NE_ERR_MEM_NOT_HUGE_PAGE; 8078c2ecf20Sopenharmony_ci } 8088c2ecf20Sopenharmony_ci 8098c2ecf20Sopenharmony_ci if (page_size(mem_region_page) & (NE_MIN_MEM_REGION_SIZE - 1)) { 8108c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 8118c2ecf20Sopenharmony_ci "Page size not multiple of 2 MiB\n"); 8128c2ecf20Sopenharmony_ci 8138c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_PAGE_SIZE; 8148c2ecf20Sopenharmony_ci } 8158c2ecf20Sopenharmony_ci 8168c2ecf20Sopenharmony_ci if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { 8178c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 8188c2ecf20Sopenharmony_ci "Page is not from NUMA node %d\n", 8198c2ecf20Sopenharmony_ci ne_enclave->numa_node); 8208c2ecf20Sopenharmony_ci 8218c2ecf20Sopenharmony_ci return -NE_ERR_MEM_DIFFERENT_NUMA_NODE; 8228c2ecf20Sopenharmony_ci } 8238c2ecf20Sopenharmony_ci 8248c2ecf20Sopenharmony_ci return 0; 8258c2ecf20Sopenharmony_ci} 8268c2ecf20Sopenharmony_ci 8278c2ecf20Sopenharmony_ci/** 8288c2ecf20Sopenharmony_ci * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot 8298c2ecf20Sopenharmony_ci * associated with the current enclave. 8308c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 8318c2ecf20Sopenharmony_ci * @mem_region : User space memory region to be associated with the given slot. 8328c2ecf20Sopenharmony_ci * 8338c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 8348c2ecf20Sopenharmony_ci * Return: 8358c2ecf20Sopenharmony_ci * * 0 on success. 8368c2ecf20Sopenharmony_ci * * Negative return value on failure. 8378c2ecf20Sopenharmony_ci */ 8388c2ecf20Sopenharmony_cistatic int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, 8398c2ecf20Sopenharmony_ci struct ne_user_memory_region mem_region) 8408c2ecf20Sopenharmony_ci{ 8418c2ecf20Sopenharmony_ci long gup_rc = 0; 8428c2ecf20Sopenharmony_ci unsigned long i = 0; 8438c2ecf20Sopenharmony_ci unsigned long max_nr_pages = 0; 8448c2ecf20Sopenharmony_ci unsigned long memory_size = 0; 8458c2ecf20Sopenharmony_ci struct ne_mem_region *ne_mem_region = NULL; 8468c2ecf20Sopenharmony_ci unsigned long nr_phys_contig_mem_regions = 0; 8478c2ecf20Sopenharmony_ci struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; 8488c2ecf20Sopenharmony_ci struct page **phys_contig_mem_regions = NULL; 8498c2ecf20Sopenharmony_ci int rc = -EINVAL; 8508c2ecf20Sopenharmony_ci 8518c2ecf20Sopenharmony_ci rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region); 8528c2ecf20Sopenharmony_ci if (rc < 0) 8538c2ecf20Sopenharmony_ci return rc; 8548c2ecf20Sopenharmony_ci 8558c2ecf20Sopenharmony_ci ne_mem_region = kzalloc(sizeof(*ne_mem_region), GFP_KERNEL); 8568c2ecf20Sopenharmony_ci if (!ne_mem_region) 8578c2ecf20Sopenharmony_ci return -ENOMEM; 8588c2ecf20Sopenharmony_ci 8598c2ecf20Sopenharmony_ci max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE; 8608c2ecf20Sopenharmony_ci 8618c2ecf20Sopenharmony_ci ne_mem_region->pages = kcalloc(max_nr_pages, sizeof(*ne_mem_region->pages), 8628c2ecf20Sopenharmony_ci GFP_KERNEL); 8638c2ecf20Sopenharmony_ci if (!ne_mem_region->pages) { 8648c2ecf20Sopenharmony_ci rc = -ENOMEM; 8658c2ecf20Sopenharmony_ci 8668c2ecf20Sopenharmony_ci goto free_mem_region; 8678c2ecf20Sopenharmony_ci } 8688c2ecf20Sopenharmony_ci 8698c2ecf20Sopenharmony_ci phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions), 8708c2ecf20Sopenharmony_ci GFP_KERNEL); 8718c2ecf20Sopenharmony_ci if (!phys_contig_mem_regions) { 8728c2ecf20Sopenharmony_ci rc = -ENOMEM; 8738c2ecf20Sopenharmony_ci 8748c2ecf20Sopenharmony_ci goto free_mem_region; 8758c2ecf20Sopenharmony_ci } 8768c2ecf20Sopenharmony_ci 8778c2ecf20Sopenharmony_ci do { 8788c2ecf20Sopenharmony_ci i = ne_mem_region->nr_pages; 8798c2ecf20Sopenharmony_ci 8808c2ecf20Sopenharmony_ci if (i == max_nr_pages) { 8818c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 8828c2ecf20Sopenharmony_ci "Reached max nr of pages in the pages data struct\n"); 8838c2ecf20Sopenharmony_ci 8848c2ecf20Sopenharmony_ci rc = -ENOMEM; 8858c2ecf20Sopenharmony_ci 8868c2ecf20Sopenharmony_ci goto put_pages; 8878c2ecf20Sopenharmony_ci } 8888c2ecf20Sopenharmony_ci 8898c2ecf20Sopenharmony_ci gup_rc = get_user_pages(mem_region.userspace_addr + memory_size, 1, FOLL_GET, 8908c2ecf20Sopenharmony_ci ne_mem_region->pages + i, NULL); 8918c2ecf20Sopenharmony_ci if (gup_rc < 0) { 8928c2ecf20Sopenharmony_ci rc = gup_rc; 8938c2ecf20Sopenharmony_ci 8948c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 8958c2ecf20Sopenharmony_ci "Error in get user pages [rc=%d]\n", rc); 8968c2ecf20Sopenharmony_ci 8978c2ecf20Sopenharmony_ci goto put_pages; 8988c2ecf20Sopenharmony_ci } 8998c2ecf20Sopenharmony_ci 9008c2ecf20Sopenharmony_ci rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]); 9018c2ecf20Sopenharmony_ci if (rc < 0) 9028c2ecf20Sopenharmony_ci goto put_pages; 9038c2ecf20Sopenharmony_ci 9048c2ecf20Sopenharmony_ci /* 9058c2ecf20Sopenharmony_ci * TODO: Update once handled non-contiguous memory regions 9068c2ecf20Sopenharmony_ci * received from user space or contiguous physical memory regions 9078c2ecf20Sopenharmony_ci * larger than 2 MiB e.g. 8 MiB. 9088c2ecf20Sopenharmony_ci */ 9098c2ecf20Sopenharmony_ci phys_contig_mem_regions[i] = ne_mem_region->pages[i]; 9108c2ecf20Sopenharmony_ci 9118c2ecf20Sopenharmony_ci memory_size += page_size(ne_mem_region->pages[i]); 9128c2ecf20Sopenharmony_ci 9138c2ecf20Sopenharmony_ci ne_mem_region->nr_pages++; 9148c2ecf20Sopenharmony_ci } while (memory_size < mem_region.memory_size); 9158c2ecf20Sopenharmony_ci 9168c2ecf20Sopenharmony_ci /* 9178c2ecf20Sopenharmony_ci * TODO: Update once handled non-contiguous memory regions received 9188c2ecf20Sopenharmony_ci * from user space or contiguous physical memory regions larger than 9198c2ecf20Sopenharmony_ci * 2 MiB e.g. 8 MiB. 9208c2ecf20Sopenharmony_ci */ 9218c2ecf20Sopenharmony_ci nr_phys_contig_mem_regions = ne_mem_region->nr_pages; 9228c2ecf20Sopenharmony_ci 9238c2ecf20Sopenharmony_ci if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) > 9248c2ecf20Sopenharmony_ci ne_enclave->max_mem_regions) { 9258c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 9268c2ecf20Sopenharmony_ci "Reached max memory regions %lld\n", 9278c2ecf20Sopenharmony_ci ne_enclave->max_mem_regions); 9288c2ecf20Sopenharmony_ci 9298c2ecf20Sopenharmony_ci rc = -NE_ERR_MEM_MAX_REGIONS; 9308c2ecf20Sopenharmony_ci 9318c2ecf20Sopenharmony_ci goto put_pages; 9328c2ecf20Sopenharmony_ci } 9338c2ecf20Sopenharmony_ci 9348c2ecf20Sopenharmony_ci for (i = 0; i < nr_phys_contig_mem_regions; i++) { 9358c2ecf20Sopenharmony_ci u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]); 9368c2ecf20Sopenharmony_ci u64 phys_region_size = page_size(phys_contig_mem_regions[i]); 9378c2ecf20Sopenharmony_ci 9388c2ecf20Sopenharmony_ci if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) { 9398c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 9408c2ecf20Sopenharmony_ci "Physical mem region size is not multiple of 2 MiB\n"); 9418c2ecf20Sopenharmony_ci 9428c2ecf20Sopenharmony_ci rc = -EINVAL; 9438c2ecf20Sopenharmony_ci 9448c2ecf20Sopenharmony_ci goto put_pages; 9458c2ecf20Sopenharmony_ci } 9468c2ecf20Sopenharmony_ci 9478c2ecf20Sopenharmony_ci if (!IS_ALIGNED(phys_region_addr, NE_MIN_MEM_REGION_SIZE)) { 9488c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 9498c2ecf20Sopenharmony_ci "Physical mem region address is not 2 MiB aligned\n"); 9508c2ecf20Sopenharmony_ci 9518c2ecf20Sopenharmony_ci rc = -EINVAL; 9528c2ecf20Sopenharmony_ci 9538c2ecf20Sopenharmony_ci goto put_pages; 9548c2ecf20Sopenharmony_ci } 9558c2ecf20Sopenharmony_ci } 9568c2ecf20Sopenharmony_ci 9578c2ecf20Sopenharmony_ci ne_mem_region->memory_size = mem_region.memory_size; 9588c2ecf20Sopenharmony_ci ne_mem_region->userspace_addr = mem_region.userspace_addr; 9598c2ecf20Sopenharmony_ci 9608c2ecf20Sopenharmony_ci list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list); 9618c2ecf20Sopenharmony_ci 9628c2ecf20Sopenharmony_ci for (i = 0; i < nr_phys_contig_mem_regions; i++) { 9638c2ecf20Sopenharmony_ci struct ne_pci_dev_cmd_reply cmd_reply = {}; 9648c2ecf20Sopenharmony_ci struct slot_add_mem_req slot_add_mem_req = {}; 9658c2ecf20Sopenharmony_ci 9668c2ecf20Sopenharmony_ci slot_add_mem_req.slot_uid = ne_enclave->slot_uid; 9678c2ecf20Sopenharmony_ci slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]); 9688c2ecf20Sopenharmony_ci slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]); 9698c2ecf20Sopenharmony_ci 9708c2ecf20Sopenharmony_ci rc = ne_do_request(pdev, SLOT_ADD_MEM, 9718c2ecf20Sopenharmony_ci &slot_add_mem_req, sizeof(slot_add_mem_req), 9728c2ecf20Sopenharmony_ci &cmd_reply, sizeof(cmd_reply)); 9738c2ecf20Sopenharmony_ci if (rc < 0) { 9748c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 9758c2ecf20Sopenharmony_ci "Error in slot add mem [rc=%d]\n", rc); 9768c2ecf20Sopenharmony_ci 9778c2ecf20Sopenharmony_ci kfree(phys_contig_mem_regions); 9788c2ecf20Sopenharmony_ci 9798c2ecf20Sopenharmony_ci /* 9808c2ecf20Sopenharmony_ci * Exit here without put pages as memory regions may 9818c2ecf20Sopenharmony_ci * already been added. 9828c2ecf20Sopenharmony_ci */ 9838c2ecf20Sopenharmony_ci return rc; 9848c2ecf20Sopenharmony_ci } 9858c2ecf20Sopenharmony_ci 9868c2ecf20Sopenharmony_ci ne_enclave->mem_size += slot_add_mem_req.size; 9878c2ecf20Sopenharmony_ci ne_enclave->nr_mem_regions++; 9888c2ecf20Sopenharmony_ci } 9898c2ecf20Sopenharmony_ci 9908c2ecf20Sopenharmony_ci kfree(phys_contig_mem_regions); 9918c2ecf20Sopenharmony_ci 9928c2ecf20Sopenharmony_ci return 0; 9938c2ecf20Sopenharmony_ci 9948c2ecf20Sopenharmony_ciput_pages: 9958c2ecf20Sopenharmony_ci for (i = 0; i < ne_mem_region->nr_pages; i++) 9968c2ecf20Sopenharmony_ci put_page(ne_mem_region->pages[i]); 9978c2ecf20Sopenharmony_cifree_mem_region: 9988c2ecf20Sopenharmony_ci kfree(phys_contig_mem_regions); 9998c2ecf20Sopenharmony_ci kfree(ne_mem_region->pages); 10008c2ecf20Sopenharmony_ci kfree(ne_mem_region); 10018c2ecf20Sopenharmony_ci 10028c2ecf20Sopenharmony_ci return rc; 10038c2ecf20Sopenharmony_ci} 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci/** 10068c2ecf20Sopenharmony_ci * ne_start_enclave_ioctl() - Trigger enclave start after the enclave resources, 10078c2ecf20Sopenharmony_ci * such as memory and CPU, have been set. 10088c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 10098c2ecf20Sopenharmony_ci * @enclave_start_info : Enclave info that includes enclave cid and flags. 10108c2ecf20Sopenharmony_ci * 10118c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 10128c2ecf20Sopenharmony_ci * Return: 10138c2ecf20Sopenharmony_ci * * 0 on success. 10148c2ecf20Sopenharmony_ci * * Negative return value on failure. 10158c2ecf20Sopenharmony_ci */ 10168c2ecf20Sopenharmony_cistatic int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave, 10178c2ecf20Sopenharmony_ci struct ne_enclave_start_info *enclave_start_info) 10188c2ecf20Sopenharmony_ci{ 10198c2ecf20Sopenharmony_ci struct ne_pci_dev_cmd_reply cmd_reply = {}; 10208c2ecf20Sopenharmony_ci unsigned int cpu = 0; 10218c2ecf20Sopenharmony_ci struct enclave_start_req enclave_start_req = {}; 10228c2ecf20Sopenharmony_ci unsigned int i = 0; 10238c2ecf20Sopenharmony_ci struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; 10248c2ecf20Sopenharmony_ci int rc = -EINVAL; 10258c2ecf20Sopenharmony_ci 10268c2ecf20Sopenharmony_ci if (!ne_enclave->nr_mem_regions) { 10278c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 10288c2ecf20Sopenharmony_ci "Enclave has no mem regions\n"); 10298c2ecf20Sopenharmony_ci 10308c2ecf20Sopenharmony_ci return -NE_ERR_NO_MEM_REGIONS_ADDED; 10318c2ecf20Sopenharmony_ci } 10328c2ecf20Sopenharmony_ci 10338c2ecf20Sopenharmony_ci if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) { 10348c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 10358c2ecf20Sopenharmony_ci "Enclave memory is less than %ld\n", 10368c2ecf20Sopenharmony_ci NE_MIN_ENCLAVE_MEM_SIZE); 10378c2ecf20Sopenharmony_ci 10388c2ecf20Sopenharmony_ci return -NE_ERR_ENCLAVE_MEM_MIN_SIZE; 10398c2ecf20Sopenharmony_ci } 10408c2ecf20Sopenharmony_ci 10418c2ecf20Sopenharmony_ci if (!ne_enclave->nr_vcpus) { 10428c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 10438c2ecf20Sopenharmony_ci "Enclave has no vCPUs\n"); 10448c2ecf20Sopenharmony_ci 10458c2ecf20Sopenharmony_ci return -NE_ERR_NO_VCPUS_ADDED; 10468c2ecf20Sopenharmony_ci } 10478c2ecf20Sopenharmony_ci 10488c2ecf20Sopenharmony_ci for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) 10498c2ecf20Sopenharmony_ci for_each_cpu(cpu, ne_enclave->threads_per_core[i]) 10508c2ecf20Sopenharmony_ci if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) { 10518c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 10528c2ecf20Sopenharmony_ci "Full CPU cores not used\n"); 10538c2ecf20Sopenharmony_ci 10548c2ecf20Sopenharmony_ci return -NE_ERR_FULL_CORES_NOT_USED; 10558c2ecf20Sopenharmony_ci } 10568c2ecf20Sopenharmony_ci 10578c2ecf20Sopenharmony_ci enclave_start_req.enclave_cid = enclave_start_info->enclave_cid; 10588c2ecf20Sopenharmony_ci enclave_start_req.flags = enclave_start_info->flags; 10598c2ecf20Sopenharmony_ci enclave_start_req.slot_uid = ne_enclave->slot_uid; 10608c2ecf20Sopenharmony_ci 10618c2ecf20Sopenharmony_ci rc = ne_do_request(pdev, ENCLAVE_START, 10628c2ecf20Sopenharmony_ci &enclave_start_req, sizeof(enclave_start_req), 10638c2ecf20Sopenharmony_ci &cmd_reply, sizeof(cmd_reply)); 10648c2ecf20Sopenharmony_ci if (rc < 0) { 10658c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 10668c2ecf20Sopenharmony_ci "Error in enclave start [rc=%d]\n", rc); 10678c2ecf20Sopenharmony_ci 10688c2ecf20Sopenharmony_ci return rc; 10698c2ecf20Sopenharmony_ci } 10708c2ecf20Sopenharmony_ci 10718c2ecf20Sopenharmony_ci ne_enclave->state = NE_STATE_RUNNING; 10728c2ecf20Sopenharmony_ci 10738c2ecf20Sopenharmony_ci enclave_start_info->enclave_cid = cmd_reply.enclave_cid; 10748c2ecf20Sopenharmony_ci 10758c2ecf20Sopenharmony_ci return 0; 10768c2ecf20Sopenharmony_ci} 10778c2ecf20Sopenharmony_ci 10788c2ecf20Sopenharmony_ci/** 10798c2ecf20Sopenharmony_ci * ne_enclave_ioctl() - Ioctl function provided by the enclave file. 10808c2ecf20Sopenharmony_ci * @file: File associated with this ioctl function. 10818c2ecf20Sopenharmony_ci * @cmd: The command that is set for the ioctl call. 10828c2ecf20Sopenharmony_ci * @arg: The argument that is provided for the ioctl call. 10838c2ecf20Sopenharmony_ci * 10848c2ecf20Sopenharmony_ci * Context: Process context. 10858c2ecf20Sopenharmony_ci * Return: 10868c2ecf20Sopenharmony_ci * * 0 on success. 10878c2ecf20Sopenharmony_ci * * Negative return value on failure. 10888c2ecf20Sopenharmony_ci */ 10898c2ecf20Sopenharmony_cistatic long ne_enclave_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 10908c2ecf20Sopenharmony_ci{ 10918c2ecf20Sopenharmony_ci struct ne_enclave *ne_enclave = file->private_data; 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci switch (cmd) { 10948c2ecf20Sopenharmony_ci case NE_ADD_VCPU: { 10958c2ecf20Sopenharmony_ci int rc = -EINVAL; 10968c2ecf20Sopenharmony_ci u32 vcpu_id = 0; 10978c2ecf20Sopenharmony_ci 10988c2ecf20Sopenharmony_ci if (copy_from_user(&vcpu_id, (void __user *)arg, sizeof(vcpu_id))) 10998c2ecf20Sopenharmony_ci return -EFAULT; 11008c2ecf20Sopenharmony_ci 11018c2ecf20Sopenharmony_ci mutex_lock(&ne_enclave->enclave_info_mutex); 11028c2ecf20Sopenharmony_ci 11038c2ecf20Sopenharmony_ci if (ne_enclave->state != NE_STATE_INIT) { 11048c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 11058c2ecf20Sopenharmony_ci "Enclave is not in init state\n"); 11068c2ecf20Sopenharmony_ci 11078c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11088c2ecf20Sopenharmony_ci 11098c2ecf20Sopenharmony_ci return -NE_ERR_NOT_IN_INIT_STATE; 11108c2ecf20Sopenharmony_ci } 11118c2ecf20Sopenharmony_ci 11128c2ecf20Sopenharmony_ci if (vcpu_id >= (ne_enclave->nr_parent_vm_cores * 11138c2ecf20Sopenharmony_ci ne_enclave->nr_threads_per_core)) { 11148c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 11158c2ecf20Sopenharmony_ci "vCPU id higher than max CPU id\n"); 11168c2ecf20Sopenharmony_ci 11178c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11188c2ecf20Sopenharmony_ci 11198c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_VCPU; 11208c2ecf20Sopenharmony_ci } 11218c2ecf20Sopenharmony_ci 11228c2ecf20Sopenharmony_ci if (!vcpu_id) { 11238c2ecf20Sopenharmony_ci /* Use the CPU pool for choosing a CPU for the enclave. */ 11248c2ecf20Sopenharmony_ci rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id); 11258c2ecf20Sopenharmony_ci if (rc < 0) { 11268c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 11278c2ecf20Sopenharmony_ci "Error in get CPU from pool [rc=%d]\n", 11288c2ecf20Sopenharmony_ci rc); 11298c2ecf20Sopenharmony_ci 11308c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11318c2ecf20Sopenharmony_ci 11328c2ecf20Sopenharmony_ci return rc; 11338c2ecf20Sopenharmony_ci } 11348c2ecf20Sopenharmony_ci } else { 11358c2ecf20Sopenharmony_ci /* Check if the provided vCPU is available in the NE CPU pool. */ 11368c2ecf20Sopenharmony_ci rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id); 11378c2ecf20Sopenharmony_ci if (rc < 0) { 11388c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 11398c2ecf20Sopenharmony_ci "Error in check CPU %d in pool [rc=%d]\n", 11408c2ecf20Sopenharmony_ci vcpu_id, rc); 11418c2ecf20Sopenharmony_ci 11428c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11438c2ecf20Sopenharmony_ci 11448c2ecf20Sopenharmony_ci return rc; 11458c2ecf20Sopenharmony_ci } 11468c2ecf20Sopenharmony_ci } 11478c2ecf20Sopenharmony_ci 11488c2ecf20Sopenharmony_ci rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id); 11498c2ecf20Sopenharmony_ci if (rc < 0) { 11508c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11518c2ecf20Sopenharmony_ci 11528c2ecf20Sopenharmony_ci return rc; 11538c2ecf20Sopenharmony_ci } 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11568c2ecf20Sopenharmony_ci 11578c2ecf20Sopenharmony_ci if (copy_to_user((void __user *)arg, &vcpu_id, sizeof(vcpu_id))) 11588c2ecf20Sopenharmony_ci return -EFAULT; 11598c2ecf20Sopenharmony_ci 11608c2ecf20Sopenharmony_ci return 0; 11618c2ecf20Sopenharmony_ci } 11628c2ecf20Sopenharmony_ci 11638c2ecf20Sopenharmony_ci case NE_GET_IMAGE_LOAD_INFO: { 11648c2ecf20Sopenharmony_ci struct ne_image_load_info image_load_info = {}; 11658c2ecf20Sopenharmony_ci 11668c2ecf20Sopenharmony_ci if (copy_from_user(&image_load_info, (void __user *)arg, sizeof(image_load_info))) 11678c2ecf20Sopenharmony_ci return -EFAULT; 11688c2ecf20Sopenharmony_ci 11698c2ecf20Sopenharmony_ci mutex_lock(&ne_enclave->enclave_info_mutex); 11708c2ecf20Sopenharmony_ci 11718c2ecf20Sopenharmony_ci if (ne_enclave->state != NE_STATE_INIT) { 11728c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 11738c2ecf20Sopenharmony_ci "Enclave is not in init state\n"); 11748c2ecf20Sopenharmony_ci 11758c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11768c2ecf20Sopenharmony_ci 11778c2ecf20Sopenharmony_ci return -NE_ERR_NOT_IN_INIT_STATE; 11788c2ecf20Sopenharmony_ci } 11798c2ecf20Sopenharmony_ci 11808c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 11818c2ecf20Sopenharmony_ci 11828c2ecf20Sopenharmony_ci if (!image_load_info.flags || 11838c2ecf20Sopenharmony_ci image_load_info.flags >= NE_IMAGE_LOAD_MAX_FLAG_VAL) { 11848c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 11858c2ecf20Sopenharmony_ci "Incorrect flag in enclave image load info\n"); 11868c2ecf20Sopenharmony_ci 11878c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_FLAG_VALUE; 11888c2ecf20Sopenharmony_ci } 11898c2ecf20Sopenharmony_ci 11908c2ecf20Sopenharmony_ci if (image_load_info.flags == NE_EIF_IMAGE) 11918c2ecf20Sopenharmony_ci image_load_info.memory_offset = NE_EIF_LOAD_OFFSET; 11928c2ecf20Sopenharmony_ci 11938c2ecf20Sopenharmony_ci if (copy_to_user((void __user *)arg, &image_load_info, sizeof(image_load_info))) 11948c2ecf20Sopenharmony_ci return -EFAULT; 11958c2ecf20Sopenharmony_ci 11968c2ecf20Sopenharmony_ci return 0; 11978c2ecf20Sopenharmony_ci } 11988c2ecf20Sopenharmony_ci 11998c2ecf20Sopenharmony_ci case NE_SET_USER_MEMORY_REGION: { 12008c2ecf20Sopenharmony_ci struct ne_user_memory_region mem_region = {}; 12018c2ecf20Sopenharmony_ci int rc = -EINVAL; 12028c2ecf20Sopenharmony_ci 12038c2ecf20Sopenharmony_ci if (copy_from_user(&mem_region, (void __user *)arg, sizeof(mem_region))) 12048c2ecf20Sopenharmony_ci return -EFAULT; 12058c2ecf20Sopenharmony_ci 12068c2ecf20Sopenharmony_ci if (mem_region.flags >= NE_MEMORY_REGION_MAX_FLAG_VAL) { 12078c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12088c2ecf20Sopenharmony_ci "Incorrect flag for user memory region\n"); 12098c2ecf20Sopenharmony_ci 12108c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_FLAG_VALUE; 12118c2ecf20Sopenharmony_ci } 12128c2ecf20Sopenharmony_ci 12138c2ecf20Sopenharmony_ci mutex_lock(&ne_enclave->enclave_info_mutex); 12148c2ecf20Sopenharmony_ci 12158c2ecf20Sopenharmony_ci if (ne_enclave->state != NE_STATE_INIT) { 12168c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12178c2ecf20Sopenharmony_ci "Enclave is not in init state\n"); 12188c2ecf20Sopenharmony_ci 12198c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 12208c2ecf20Sopenharmony_ci 12218c2ecf20Sopenharmony_ci return -NE_ERR_NOT_IN_INIT_STATE; 12228c2ecf20Sopenharmony_ci } 12238c2ecf20Sopenharmony_ci 12248c2ecf20Sopenharmony_ci rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region); 12258c2ecf20Sopenharmony_ci if (rc < 0) { 12268c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 12278c2ecf20Sopenharmony_ci 12288c2ecf20Sopenharmony_ci return rc; 12298c2ecf20Sopenharmony_ci } 12308c2ecf20Sopenharmony_ci 12318c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 12328c2ecf20Sopenharmony_ci 12338c2ecf20Sopenharmony_ci return 0; 12348c2ecf20Sopenharmony_ci } 12358c2ecf20Sopenharmony_ci 12368c2ecf20Sopenharmony_ci case NE_START_ENCLAVE: { 12378c2ecf20Sopenharmony_ci struct ne_enclave_start_info enclave_start_info = {}; 12388c2ecf20Sopenharmony_ci int rc = -EINVAL; 12398c2ecf20Sopenharmony_ci 12408c2ecf20Sopenharmony_ci if (copy_from_user(&enclave_start_info, (void __user *)arg, 12418c2ecf20Sopenharmony_ci sizeof(enclave_start_info))) 12428c2ecf20Sopenharmony_ci return -EFAULT; 12438c2ecf20Sopenharmony_ci 12448c2ecf20Sopenharmony_ci if (enclave_start_info.flags >= NE_ENCLAVE_START_MAX_FLAG_VAL) { 12458c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12468c2ecf20Sopenharmony_ci "Incorrect flag in enclave start info\n"); 12478c2ecf20Sopenharmony_ci 12488c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_FLAG_VALUE; 12498c2ecf20Sopenharmony_ci } 12508c2ecf20Sopenharmony_ci 12518c2ecf20Sopenharmony_ci /* 12528c2ecf20Sopenharmony_ci * Do not use well-known CIDs - 0, 1, 2 - for enclaves. 12538c2ecf20Sopenharmony_ci * VMADDR_CID_ANY = -1U 12548c2ecf20Sopenharmony_ci * VMADDR_CID_HYPERVISOR = 0 12558c2ecf20Sopenharmony_ci * VMADDR_CID_LOCAL = 1 12568c2ecf20Sopenharmony_ci * VMADDR_CID_HOST = 2 12578c2ecf20Sopenharmony_ci * Note: 0 is used as a placeholder to auto-generate an enclave CID. 12588c2ecf20Sopenharmony_ci * http://man7.org/linux/man-pages/man7/vsock.7.html 12598c2ecf20Sopenharmony_ci */ 12608c2ecf20Sopenharmony_ci if (enclave_start_info.enclave_cid > 0 && 12618c2ecf20Sopenharmony_ci enclave_start_info.enclave_cid <= VMADDR_CID_HOST) { 12628c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12638c2ecf20Sopenharmony_ci "Well-known CID value, not to be used for enclaves\n"); 12648c2ecf20Sopenharmony_ci 12658c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_ENCLAVE_CID; 12668c2ecf20Sopenharmony_ci } 12678c2ecf20Sopenharmony_ci 12688c2ecf20Sopenharmony_ci if (enclave_start_info.enclave_cid == U32_MAX) { 12698c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12708c2ecf20Sopenharmony_ci "Well-known CID value, not to be used for enclaves\n"); 12718c2ecf20Sopenharmony_ci 12728c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_ENCLAVE_CID; 12738c2ecf20Sopenharmony_ci } 12748c2ecf20Sopenharmony_ci 12758c2ecf20Sopenharmony_ci /* 12768c2ecf20Sopenharmony_ci * Do not use the CID of the primary / parent VM for enclaves. 12778c2ecf20Sopenharmony_ci */ 12788c2ecf20Sopenharmony_ci if (enclave_start_info.enclave_cid == NE_PARENT_VM_CID) { 12798c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12808c2ecf20Sopenharmony_ci "CID of the parent VM, not to be used for enclaves\n"); 12818c2ecf20Sopenharmony_ci 12828c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_ENCLAVE_CID; 12838c2ecf20Sopenharmony_ci } 12848c2ecf20Sopenharmony_ci 12858c2ecf20Sopenharmony_ci /* 64-bit CIDs are not yet supported for the vsock device. */ 12868c2ecf20Sopenharmony_ci if (enclave_start_info.enclave_cid > U32_MAX) { 12878c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12888c2ecf20Sopenharmony_ci "64-bit CIDs not yet supported for the vsock device\n"); 12898c2ecf20Sopenharmony_ci 12908c2ecf20Sopenharmony_ci return -NE_ERR_INVALID_ENCLAVE_CID; 12918c2ecf20Sopenharmony_ci } 12928c2ecf20Sopenharmony_ci 12938c2ecf20Sopenharmony_ci mutex_lock(&ne_enclave->enclave_info_mutex); 12948c2ecf20Sopenharmony_ci 12958c2ecf20Sopenharmony_ci if (ne_enclave->state != NE_STATE_INIT) { 12968c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 12978c2ecf20Sopenharmony_ci "Enclave is not in init state\n"); 12988c2ecf20Sopenharmony_ci 12998c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 13008c2ecf20Sopenharmony_ci 13018c2ecf20Sopenharmony_ci return -NE_ERR_NOT_IN_INIT_STATE; 13028c2ecf20Sopenharmony_ci } 13038c2ecf20Sopenharmony_ci 13048c2ecf20Sopenharmony_ci rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info); 13058c2ecf20Sopenharmony_ci if (rc < 0) { 13068c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 13078c2ecf20Sopenharmony_ci 13088c2ecf20Sopenharmony_ci return rc; 13098c2ecf20Sopenharmony_ci } 13108c2ecf20Sopenharmony_ci 13118c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 13128c2ecf20Sopenharmony_ci 13138c2ecf20Sopenharmony_ci if (copy_to_user((void __user *)arg, &enclave_start_info, 13148c2ecf20Sopenharmony_ci sizeof(enclave_start_info))) 13158c2ecf20Sopenharmony_ci return -EFAULT; 13168c2ecf20Sopenharmony_ci 13178c2ecf20Sopenharmony_ci return 0; 13188c2ecf20Sopenharmony_ci } 13198c2ecf20Sopenharmony_ci 13208c2ecf20Sopenharmony_ci default: 13218c2ecf20Sopenharmony_ci return -ENOTTY; 13228c2ecf20Sopenharmony_ci } 13238c2ecf20Sopenharmony_ci 13248c2ecf20Sopenharmony_ci return 0; 13258c2ecf20Sopenharmony_ci} 13268c2ecf20Sopenharmony_ci 13278c2ecf20Sopenharmony_ci/** 13288c2ecf20Sopenharmony_ci * ne_enclave_remove_all_mem_region_entries() - Remove all memory region entries 13298c2ecf20Sopenharmony_ci * from the enclave data structure. 13308c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 13318c2ecf20Sopenharmony_ci * 13328c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 13338c2ecf20Sopenharmony_ci */ 13348c2ecf20Sopenharmony_cistatic void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave) 13358c2ecf20Sopenharmony_ci{ 13368c2ecf20Sopenharmony_ci unsigned long i = 0; 13378c2ecf20Sopenharmony_ci struct ne_mem_region *ne_mem_region = NULL; 13388c2ecf20Sopenharmony_ci struct ne_mem_region *ne_mem_region_tmp = NULL; 13398c2ecf20Sopenharmony_ci 13408c2ecf20Sopenharmony_ci list_for_each_entry_safe(ne_mem_region, ne_mem_region_tmp, 13418c2ecf20Sopenharmony_ci &ne_enclave->mem_regions_list, 13428c2ecf20Sopenharmony_ci mem_region_list_entry) { 13438c2ecf20Sopenharmony_ci list_del(&ne_mem_region->mem_region_list_entry); 13448c2ecf20Sopenharmony_ci 13458c2ecf20Sopenharmony_ci for (i = 0; i < ne_mem_region->nr_pages; i++) 13468c2ecf20Sopenharmony_ci put_page(ne_mem_region->pages[i]); 13478c2ecf20Sopenharmony_ci 13488c2ecf20Sopenharmony_ci kfree(ne_mem_region->pages); 13498c2ecf20Sopenharmony_ci 13508c2ecf20Sopenharmony_ci kfree(ne_mem_region); 13518c2ecf20Sopenharmony_ci } 13528c2ecf20Sopenharmony_ci} 13538c2ecf20Sopenharmony_ci 13548c2ecf20Sopenharmony_ci/** 13558c2ecf20Sopenharmony_ci * ne_enclave_remove_all_vcpu_id_entries() - Remove all vCPU id entries from 13568c2ecf20Sopenharmony_ci * the enclave data structure. 13578c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 13588c2ecf20Sopenharmony_ci * 13598c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_enclave mutex held. 13608c2ecf20Sopenharmony_ci */ 13618c2ecf20Sopenharmony_cistatic void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave) 13628c2ecf20Sopenharmony_ci{ 13638c2ecf20Sopenharmony_ci unsigned int cpu = 0; 13648c2ecf20Sopenharmony_ci unsigned int i = 0; 13658c2ecf20Sopenharmony_ci 13668c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 13678c2ecf20Sopenharmony_ci 13688c2ecf20Sopenharmony_ci for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) { 13698c2ecf20Sopenharmony_ci for_each_cpu(cpu, ne_enclave->threads_per_core[i]) 13708c2ecf20Sopenharmony_ci /* Update the available NE CPU pool. */ 13718c2ecf20Sopenharmony_ci cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]); 13728c2ecf20Sopenharmony_ci 13738c2ecf20Sopenharmony_ci free_cpumask_var(ne_enclave->threads_per_core[i]); 13748c2ecf20Sopenharmony_ci } 13758c2ecf20Sopenharmony_ci 13768c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 13778c2ecf20Sopenharmony_ci 13788c2ecf20Sopenharmony_ci kfree(ne_enclave->threads_per_core); 13798c2ecf20Sopenharmony_ci 13808c2ecf20Sopenharmony_ci free_cpumask_var(ne_enclave->vcpu_ids); 13818c2ecf20Sopenharmony_ci} 13828c2ecf20Sopenharmony_ci 13838c2ecf20Sopenharmony_ci/** 13848c2ecf20Sopenharmony_ci * ne_pci_dev_remove_enclave_entry() - Remove the enclave entry from the data 13858c2ecf20Sopenharmony_ci * structure that is part of the NE PCI 13868c2ecf20Sopenharmony_ci * device private data. 13878c2ecf20Sopenharmony_ci * @ne_enclave : Private data associated with the current enclave. 13888c2ecf20Sopenharmony_ci * @ne_pci_dev : Private data associated with the PCI device. 13898c2ecf20Sopenharmony_ci * 13908c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_pci_dev enclave 13918c2ecf20Sopenharmony_ci * mutex held. 13928c2ecf20Sopenharmony_ci */ 13938c2ecf20Sopenharmony_cistatic void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave, 13948c2ecf20Sopenharmony_ci struct ne_pci_dev *ne_pci_dev) 13958c2ecf20Sopenharmony_ci{ 13968c2ecf20Sopenharmony_ci struct ne_enclave *ne_enclave_entry = NULL; 13978c2ecf20Sopenharmony_ci struct ne_enclave *ne_enclave_entry_tmp = NULL; 13988c2ecf20Sopenharmony_ci 13998c2ecf20Sopenharmony_ci list_for_each_entry_safe(ne_enclave_entry, ne_enclave_entry_tmp, 14008c2ecf20Sopenharmony_ci &ne_pci_dev->enclaves_list, enclave_list_entry) { 14018c2ecf20Sopenharmony_ci if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) { 14028c2ecf20Sopenharmony_ci list_del(&ne_enclave_entry->enclave_list_entry); 14038c2ecf20Sopenharmony_ci 14048c2ecf20Sopenharmony_ci break; 14058c2ecf20Sopenharmony_ci } 14068c2ecf20Sopenharmony_ci } 14078c2ecf20Sopenharmony_ci} 14088c2ecf20Sopenharmony_ci 14098c2ecf20Sopenharmony_ci/** 14108c2ecf20Sopenharmony_ci * ne_enclave_release() - Release function provided by the enclave file. 14118c2ecf20Sopenharmony_ci * @inode: Inode associated with this file release function. 14128c2ecf20Sopenharmony_ci * @file: File associated with this release function. 14138c2ecf20Sopenharmony_ci * 14148c2ecf20Sopenharmony_ci * Context: Process context. 14158c2ecf20Sopenharmony_ci * Return: 14168c2ecf20Sopenharmony_ci * * 0 on success. 14178c2ecf20Sopenharmony_ci * * Negative return value on failure. 14188c2ecf20Sopenharmony_ci */ 14198c2ecf20Sopenharmony_cistatic int ne_enclave_release(struct inode *inode, struct file *file) 14208c2ecf20Sopenharmony_ci{ 14218c2ecf20Sopenharmony_ci struct ne_pci_dev_cmd_reply cmd_reply = {}; 14228c2ecf20Sopenharmony_ci struct enclave_stop_req enclave_stop_request = {}; 14238c2ecf20Sopenharmony_ci struct ne_enclave *ne_enclave = file->private_data; 14248c2ecf20Sopenharmony_ci struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; 14258c2ecf20Sopenharmony_ci struct pci_dev *pdev = ne_pci_dev->pdev; 14268c2ecf20Sopenharmony_ci int rc = -EINVAL; 14278c2ecf20Sopenharmony_ci struct slot_free_req slot_free_req = {}; 14288c2ecf20Sopenharmony_ci 14298c2ecf20Sopenharmony_ci if (!ne_enclave) 14308c2ecf20Sopenharmony_ci return 0; 14318c2ecf20Sopenharmony_ci 14328c2ecf20Sopenharmony_ci /* 14338c2ecf20Sopenharmony_ci * Early exit in case there is an error in the enclave creation logic 14348c2ecf20Sopenharmony_ci * and fput() is called on the cleanup path. 14358c2ecf20Sopenharmony_ci */ 14368c2ecf20Sopenharmony_ci if (!ne_enclave->slot_uid) 14378c2ecf20Sopenharmony_ci return 0; 14388c2ecf20Sopenharmony_ci 14398c2ecf20Sopenharmony_ci /* 14408c2ecf20Sopenharmony_ci * Acquire the enclave list mutex before the enclave mutex 14418c2ecf20Sopenharmony_ci * in order to avoid deadlocks with @ref ne_event_work_handler. 14428c2ecf20Sopenharmony_ci */ 14438c2ecf20Sopenharmony_ci mutex_lock(&ne_pci_dev->enclaves_list_mutex); 14448c2ecf20Sopenharmony_ci mutex_lock(&ne_enclave->enclave_info_mutex); 14458c2ecf20Sopenharmony_ci 14468c2ecf20Sopenharmony_ci if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) { 14478c2ecf20Sopenharmony_ci enclave_stop_request.slot_uid = ne_enclave->slot_uid; 14488c2ecf20Sopenharmony_ci 14498c2ecf20Sopenharmony_ci rc = ne_do_request(pdev, ENCLAVE_STOP, 14508c2ecf20Sopenharmony_ci &enclave_stop_request, sizeof(enclave_stop_request), 14518c2ecf20Sopenharmony_ci &cmd_reply, sizeof(cmd_reply)); 14528c2ecf20Sopenharmony_ci if (rc < 0) { 14538c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 14548c2ecf20Sopenharmony_ci "Error in enclave stop [rc=%d]\n", rc); 14558c2ecf20Sopenharmony_ci 14568c2ecf20Sopenharmony_ci goto unlock_mutex; 14578c2ecf20Sopenharmony_ci } 14588c2ecf20Sopenharmony_ci 14598c2ecf20Sopenharmony_ci memset(&cmd_reply, 0, sizeof(cmd_reply)); 14608c2ecf20Sopenharmony_ci } 14618c2ecf20Sopenharmony_ci 14628c2ecf20Sopenharmony_ci slot_free_req.slot_uid = ne_enclave->slot_uid; 14638c2ecf20Sopenharmony_ci 14648c2ecf20Sopenharmony_ci rc = ne_do_request(pdev, SLOT_FREE, 14658c2ecf20Sopenharmony_ci &slot_free_req, sizeof(slot_free_req), 14668c2ecf20Sopenharmony_ci &cmd_reply, sizeof(cmd_reply)); 14678c2ecf20Sopenharmony_ci if (rc < 0) { 14688c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 14698c2ecf20Sopenharmony_ci "Error in slot free [rc=%d]\n", rc); 14708c2ecf20Sopenharmony_ci 14718c2ecf20Sopenharmony_ci goto unlock_mutex; 14728c2ecf20Sopenharmony_ci } 14738c2ecf20Sopenharmony_ci 14748c2ecf20Sopenharmony_ci ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev); 14758c2ecf20Sopenharmony_ci ne_enclave_remove_all_mem_region_entries(ne_enclave); 14768c2ecf20Sopenharmony_ci ne_enclave_remove_all_vcpu_id_entries(ne_enclave); 14778c2ecf20Sopenharmony_ci 14788c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 14798c2ecf20Sopenharmony_ci mutex_unlock(&ne_pci_dev->enclaves_list_mutex); 14808c2ecf20Sopenharmony_ci 14818c2ecf20Sopenharmony_ci kfree(ne_enclave); 14828c2ecf20Sopenharmony_ci 14838c2ecf20Sopenharmony_ci return 0; 14848c2ecf20Sopenharmony_ci 14858c2ecf20Sopenharmony_ciunlock_mutex: 14868c2ecf20Sopenharmony_ci mutex_unlock(&ne_enclave->enclave_info_mutex); 14878c2ecf20Sopenharmony_ci mutex_unlock(&ne_pci_dev->enclaves_list_mutex); 14888c2ecf20Sopenharmony_ci 14898c2ecf20Sopenharmony_ci return rc; 14908c2ecf20Sopenharmony_ci} 14918c2ecf20Sopenharmony_ci 14928c2ecf20Sopenharmony_ci/** 14938c2ecf20Sopenharmony_ci * ne_enclave_poll() - Poll functionality used for enclave out-of-band events. 14948c2ecf20Sopenharmony_ci * @file: File associated with this poll function. 14958c2ecf20Sopenharmony_ci * @wait: Poll table data structure. 14968c2ecf20Sopenharmony_ci * 14978c2ecf20Sopenharmony_ci * Context: Process context. 14988c2ecf20Sopenharmony_ci * Return: 14998c2ecf20Sopenharmony_ci * * Poll mask. 15008c2ecf20Sopenharmony_ci */ 15018c2ecf20Sopenharmony_cistatic __poll_t ne_enclave_poll(struct file *file, poll_table *wait) 15028c2ecf20Sopenharmony_ci{ 15038c2ecf20Sopenharmony_ci __poll_t mask = 0; 15048c2ecf20Sopenharmony_ci struct ne_enclave *ne_enclave = file->private_data; 15058c2ecf20Sopenharmony_ci 15068c2ecf20Sopenharmony_ci poll_wait(file, &ne_enclave->eventq, wait); 15078c2ecf20Sopenharmony_ci 15088c2ecf20Sopenharmony_ci if (ne_enclave->has_event) 15098c2ecf20Sopenharmony_ci mask |= EPOLLHUP; 15108c2ecf20Sopenharmony_ci 15118c2ecf20Sopenharmony_ci return mask; 15128c2ecf20Sopenharmony_ci} 15138c2ecf20Sopenharmony_ci 15148c2ecf20Sopenharmony_cistatic const struct file_operations ne_enclave_fops = { 15158c2ecf20Sopenharmony_ci .owner = THIS_MODULE, 15168c2ecf20Sopenharmony_ci .llseek = noop_llseek, 15178c2ecf20Sopenharmony_ci .poll = ne_enclave_poll, 15188c2ecf20Sopenharmony_ci .unlocked_ioctl = ne_enclave_ioctl, 15198c2ecf20Sopenharmony_ci .release = ne_enclave_release, 15208c2ecf20Sopenharmony_ci}; 15218c2ecf20Sopenharmony_ci 15228c2ecf20Sopenharmony_ci/** 15238c2ecf20Sopenharmony_ci * ne_create_vm_ioctl() - Alloc slot to be associated with an enclave. Create 15248c2ecf20Sopenharmony_ci * enclave file descriptor to be further used for enclave 15258c2ecf20Sopenharmony_ci * resources handling e.g. memory regions and CPUs. 15268c2ecf20Sopenharmony_ci * @ne_pci_dev : Private data associated with the PCI device. 15278c2ecf20Sopenharmony_ci * @slot_uid: User pointer to store the generated unique slot id 15288c2ecf20Sopenharmony_ci * associated with an enclave to. 15298c2ecf20Sopenharmony_ci * 15308c2ecf20Sopenharmony_ci * Context: Process context. This function is called with the ne_pci_dev enclave 15318c2ecf20Sopenharmony_ci * mutex held. 15328c2ecf20Sopenharmony_ci * Return: 15338c2ecf20Sopenharmony_ci * * Enclave fd on success. 15348c2ecf20Sopenharmony_ci * * Negative return value on failure. 15358c2ecf20Sopenharmony_ci */ 15368c2ecf20Sopenharmony_cistatic int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid) 15378c2ecf20Sopenharmony_ci{ 15388c2ecf20Sopenharmony_ci struct ne_pci_dev_cmd_reply cmd_reply = {}; 15398c2ecf20Sopenharmony_ci int enclave_fd = -1; 15408c2ecf20Sopenharmony_ci struct file *enclave_file = NULL; 15418c2ecf20Sopenharmony_ci unsigned int i = 0; 15428c2ecf20Sopenharmony_ci struct ne_enclave *ne_enclave = NULL; 15438c2ecf20Sopenharmony_ci struct pci_dev *pdev = ne_pci_dev->pdev; 15448c2ecf20Sopenharmony_ci int rc = -EINVAL; 15458c2ecf20Sopenharmony_ci struct slot_alloc_req slot_alloc_req = {}; 15468c2ecf20Sopenharmony_ci 15478c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 15488c2ecf20Sopenharmony_ci 15498c2ecf20Sopenharmony_ci for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) 15508c2ecf20Sopenharmony_ci if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) 15518c2ecf20Sopenharmony_ci break; 15528c2ecf20Sopenharmony_ci 15538c2ecf20Sopenharmony_ci if (i == ne_cpu_pool.nr_parent_vm_cores) { 15548c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 15558c2ecf20Sopenharmony_ci "No CPUs available in CPU pool\n"); 15568c2ecf20Sopenharmony_ci 15578c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 15588c2ecf20Sopenharmony_ci 15598c2ecf20Sopenharmony_ci return -NE_ERR_NO_CPUS_AVAIL_IN_POOL; 15608c2ecf20Sopenharmony_ci } 15618c2ecf20Sopenharmony_ci 15628c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 15638c2ecf20Sopenharmony_ci 15648c2ecf20Sopenharmony_ci ne_enclave = kzalloc(sizeof(*ne_enclave), GFP_KERNEL); 15658c2ecf20Sopenharmony_ci if (!ne_enclave) 15668c2ecf20Sopenharmony_ci return -ENOMEM; 15678c2ecf20Sopenharmony_ci 15688c2ecf20Sopenharmony_ci mutex_lock(&ne_cpu_pool.mutex); 15698c2ecf20Sopenharmony_ci 15708c2ecf20Sopenharmony_ci ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores; 15718c2ecf20Sopenharmony_ci ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core; 15728c2ecf20Sopenharmony_ci ne_enclave->numa_node = ne_cpu_pool.numa_node; 15738c2ecf20Sopenharmony_ci 15748c2ecf20Sopenharmony_ci mutex_unlock(&ne_cpu_pool.mutex); 15758c2ecf20Sopenharmony_ci 15768c2ecf20Sopenharmony_ci ne_enclave->threads_per_core = kcalloc(ne_enclave->nr_parent_vm_cores, 15778c2ecf20Sopenharmony_ci sizeof(*ne_enclave->threads_per_core), GFP_KERNEL); 15788c2ecf20Sopenharmony_ci if (!ne_enclave->threads_per_core) { 15798c2ecf20Sopenharmony_ci rc = -ENOMEM; 15808c2ecf20Sopenharmony_ci 15818c2ecf20Sopenharmony_ci goto free_ne_enclave; 15828c2ecf20Sopenharmony_ci } 15838c2ecf20Sopenharmony_ci 15848c2ecf20Sopenharmony_ci for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) 15858c2ecf20Sopenharmony_ci if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) { 15868c2ecf20Sopenharmony_ci rc = -ENOMEM; 15878c2ecf20Sopenharmony_ci 15888c2ecf20Sopenharmony_ci goto free_cpumask; 15898c2ecf20Sopenharmony_ci } 15908c2ecf20Sopenharmony_ci 15918c2ecf20Sopenharmony_ci if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) { 15928c2ecf20Sopenharmony_ci rc = -ENOMEM; 15938c2ecf20Sopenharmony_ci 15948c2ecf20Sopenharmony_ci goto free_cpumask; 15958c2ecf20Sopenharmony_ci } 15968c2ecf20Sopenharmony_ci 15978c2ecf20Sopenharmony_ci enclave_fd = get_unused_fd_flags(O_CLOEXEC); 15988c2ecf20Sopenharmony_ci if (enclave_fd < 0) { 15998c2ecf20Sopenharmony_ci rc = enclave_fd; 16008c2ecf20Sopenharmony_ci 16018c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 16028c2ecf20Sopenharmony_ci "Error in getting unused fd [rc=%d]\n", rc); 16038c2ecf20Sopenharmony_ci 16048c2ecf20Sopenharmony_ci goto free_cpumask; 16058c2ecf20Sopenharmony_ci } 16068c2ecf20Sopenharmony_ci 16078c2ecf20Sopenharmony_ci enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR); 16088c2ecf20Sopenharmony_ci if (IS_ERR(enclave_file)) { 16098c2ecf20Sopenharmony_ci rc = PTR_ERR(enclave_file); 16108c2ecf20Sopenharmony_ci 16118c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 16128c2ecf20Sopenharmony_ci "Error in anon inode get file [rc=%d]\n", rc); 16138c2ecf20Sopenharmony_ci 16148c2ecf20Sopenharmony_ci goto put_fd; 16158c2ecf20Sopenharmony_ci } 16168c2ecf20Sopenharmony_ci 16178c2ecf20Sopenharmony_ci rc = ne_do_request(pdev, SLOT_ALLOC, 16188c2ecf20Sopenharmony_ci &slot_alloc_req, sizeof(slot_alloc_req), 16198c2ecf20Sopenharmony_ci &cmd_reply, sizeof(cmd_reply)); 16208c2ecf20Sopenharmony_ci if (rc < 0) { 16218c2ecf20Sopenharmony_ci dev_err_ratelimited(ne_misc_dev.this_device, 16228c2ecf20Sopenharmony_ci "Error in slot alloc [rc=%d]\n", rc); 16238c2ecf20Sopenharmony_ci 16248c2ecf20Sopenharmony_ci goto put_file; 16258c2ecf20Sopenharmony_ci } 16268c2ecf20Sopenharmony_ci 16278c2ecf20Sopenharmony_ci init_waitqueue_head(&ne_enclave->eventq); 16288c2ecf20Sopenharmony_ci ne_enclave->has_event = false; 16298c2ecf20Sopenharmony_ci mutex_init(&ne_enclave->enclave_info_mutex); 16308c2ecf20Sopenharmony_ci ne_enclave->max_mem_regions = cmd_reply.mem_regions; 16318c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&ne_enclave->mem_regions_list); 16328c2ecf20Sopenharmony_ci ne_enclave->mm = current->mm; 16338c2ecf20Sopenharmony_ci ne_enclave->slot_uid = cmd_reply.slot_uid; 16348c2ecf20Sopenharmony_ci ne_enclave->state = NE_STATE_INIT; 16358c2ecf20Sopenharmony_ci 16368c2ecf20Sopenharmony_ci list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list); 16378c2ecf20Sopenharmony_ci 16388c2ecf20Sopenharmony_ci if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) { 16398c2ecf20Sopenharmony_ci /* 16408c2ecf20Sopenharmony_ci * As we're holding the only reference to 'enclave_file', fput() 16418c2ecf20Sopenharmony_ci * will call ne_enclave_release() which will do a proper cleanup 16428c2ecf20Sopenharmony_ci * of all so far allocated resources, leaving only the unused fd 16438c2ecf20Sopenharmony_ci * for us to free. 16448c2ecf20Sopenharmony_ci */ 16458c2ecf20Sopenharmony_ci fput(enclave_file); 16468c2ecf20Sopenharmony_ci put_unused_fd(enclave_fd); 16478c2ecf20Sopenharmony_ci 16488c2ecf20Sopenharmony_ci return -EFAULT; 16498c2ecf20Sopenharmony_ci } 16508c2ecf20Sopenharmony_ci 16518c2ecf20Sopenharmony_ci fd_install(enclave_fd, enclave_file); 16528c2ecf20Sopenharmony_ci 16538c2ecf20Sopenharmony_ci return enclave_fd; 16548c2ecf20Sopenharmony_ci 16558c2ecf20Sopenharmony_ciput_file: 16568c2ecf20Sopenharmony_ci fput(enclave_file); 16578c2ecf20Sopenharmony_ciput_fd: 16588c2ecf20Sopenharmony_ci put_unused_fd(enclave_fd); 16598c2ecf20Sopenharmony_cifree_cpumask: 16608c2ecf20Sopenharmony_ci free_cpumask_var(ne_enclave->vcpu_ids); 16618c2ecf20Sopenharmony_ci for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) 16628c2ecf20Sopenharmony_ci free_cpumask_var(ne_enclave->threads_per_core[i]); 16638c2ecf20Sopenharmony_ci kfree(ne_enclave->threads_per_core); 16648c2ecf20Sopenharmony_cifree_ne_enclave: 16658c2ecf20Sopenharmony_ci kfree(ne_enclave); 16668c2ecf20Sopenharmony_ci 16678c2ecf20Sopenharmony_ci return rc; 16688c2ecf20Sopenharmony_ci} 16698c2ecf20Sopenharmony_ci 16708c2ecf20Sopenharmony_ci/** 16718c2ecf20Sopenharmony_ci * ne_ioctl() - Ioctl function provided by the NE misc device. 16728c2ecf20Sopenharmony_ci * @file: File associated with this ioctl function. 16738c2ecf20Sopenharmony_ci * @cmd: The command that is set for the ioctl call. 16748c2ecf20Sopenharmony_ci * @arg: The argument that is provided for the ioctl call. 16758c2ecf20Sopenharmony_ci * 16768c2ecf20Sopenharmony_ci * Context: Process context. 16778c2ecf20Sopenharmony_ci * Return: 16788c2ecf20Sopenharmony_ci * * Ioctl result (e.g. enclave file descriptor) on success. 16798c2ecf20Sopenharmony_ci * * Negative return value on failure. 16808c2ecf20Sopenharmony_ci */ 16818c2ecf20Sopenharmony_cistatic long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 16828c2ecf20Sopenharmony_ci{ 16838c2ecf20Sopenharmony_ci switch (cmd) { 16848c2ecf20Sopenharmony_ci case NE_CREATE_VM: { 16858c2ecf20Sopenharmony_ci int enclave_fd = -1; 16868c2ecf20Sopenharmony_ci struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; 16878c2ecf20Sopenharmony_ci u64 __user *slot_uid = (void __user *)arg; 16888c2ecf20Sopenharmony_ci 16898c2ecf20Sopenharmony_ci mutex_lock(&ne_pci_dev->enclaves_list_mutex); 16908c2ecf20Sopenharmony_ci enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid); 16918c2ecf20Sopenharmony_ci mutex_unlock(&ne_pci_dev->enclaves_list_mutex); 16928c2ecf20Sopenharmony_ci 16938c2ecf20Sopenharmony_ci return enclave_fd; 16948c2ecf20Sopenharmony_ci } 16958c2ecf20Sopenharmony_ci 16968c2ecf20Sopenharmony_ci default: 16978c2ecf20Sopenharmony_ci return -ENOTTY; 16988c2ecf20Sopenharmony_ci } 16998c2ecf20Sopenharmony_ci 17008c2ecf20Sopenharmony_ci return 0; 17018c2ecf20Sopenharmony_ci} 17028c2ecf20Sopenharmony_ci 17038c2ecf20Sopenharmony_cistatic int __init ne_init(void) 17048c2ecf20Sopenharmony_ci{ 17058c2ecf20Sopenharmony_ci mutex_init(&ne_cpu_pool.mutex); 17068c2ecf20Sopenharmony_ci 17078c2ecf20Sopenharmony_ci return pci_register_driver(&ne_pci_driver); 17088c2ecf20Sopenharmony_ci} 17098c2ecf20Sopenharmony_ci 17108c2ecf20Sopenharmony_cistatic void __exit ne_exit(void) 17118c2ecf20Sopenharmony_ci{ 17128c2ecf20Sopenharmony_ci pci_unregister_driver(&ne_pci_driver); 17138c2ecf20Sopenharmony_ci 17148c2ecf20Sopenharmony_ci ne_teardown_cpu_pool(); 17158c2ecf20Sopenharmony_ci} 17168c2ecf20Sopenharmony_ci 17178c2ecf20Sopenharmony_cimodule_init(ne_init); 17188c2ecf20Sopenharmony_cimodule_exit(ne_exit); 17198c2ecf20Sopenharmony_ci 17208c2ecf20Sopenharmony_ciMODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); 17218c2ecf20Sopenharmony_ciMODULE_DESCRIPTION("Nitro Enclaves Driver"); 17228c2ecf20Sopenharmony_ciMODULE_LICENSE("GPL v2"); 1723