18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Generic ASID allocator. 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Based on arch/arm/mm/context.c 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 88c2ecf20Sopenharmony_ci * Copyright (C) 2012 ARM Ltd. 98c2ecf20Sopenharmony_ci */ 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_ci#include <linux/slab.h> 128c2ecf20Sopenharmony_ci#include <linux/mm_types.h> 138c2ecf20Sopenharmony_ci 148c2ecf20Sopenharmony_ci#include <asm/asid.h> 158c2ecf20Sopenharmony_ci 168c2ecf20Sopenharmony_ci#define reserved_asid(info, cpu) *per_cpu_ptr((info)->reserved, cpu) 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_ci#define ASID_MASK(info) (~GENMASK((info)->bits - 1, 0)) 198c2ecf20Sopenharmony_ci#define ASID_FIRST_VERSION(info) (1UL << ((info)->bits)) 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci#define asid2idx(info, asid) (((asid) & ~ASID_MASK(info)) >> (info)->ctxt_shift) 228c2ecf20Sopenharmony_ci#define idx2asid(info, idx) (((idx) << (info)->ctxt_shift) & ~ASID_MASK(info)) 238c2ecf20Sopenharmony_ci 248c2ecf20Sopenharmony_cistatic void flush_context(struct asid_info *info) 258c2ecf20Sopenharmony_ci{ 268c2ecf20Sopenharmony_ci int i; 278c2ecf20Sopenharmony_ci u64 asid; 288c2ecf20Sopenharmony_ci 298c2ecf20Sopenharmony_ci /* Update the list of reserved ASIDs and the ASID bitmap. */ 308c2ecf20Sopenharmony_ci bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); 318c2ecf20Sopenharmony_ci 328c2ecf20Sopenharmony_ci for_each_possible_cpu(i) { 338c2ecf20Sopenharmony_ci asid = atomic64_xchg_relaxed(&active_asid(info, i), 0); 348c2ecf20Sopenharmony_ci /* 358c2ecf20Sopenharmony_ci * If this CPU has already been through a 368c2ecf20Sopenharmony_ci * rollover, but hasn't run another task in 378c2ecf20Sopenharmony_ci * the meantime, we must preserve its reserved 388c2ecf20Sopenharmony_ci * ASID, as this is the only trace we have of 398c2ecf20Sopenharmony_ci * the process it is still running. 408c2ecf20Sopenharmony_ci */ 418c2ecf20Sopenharmony_ci if (asid == 0) 428c2ecf20Sopenharmony_ci asid = reserved_asid(info, i); 438c2ecf20Sopenharmony_ci __set_bit(asid2idx(info, asid), info->map); 448c2ecf20Sopenharmony_ci reserved_asid(info, i) = asid; 458c2ecf20Sopenharmony_ci } 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci /* 488c2ecf20Sopenharmony_ci * Queue a TLB invalidation for each CPU to perform on next 498c2ecf20Sopenharmony_ci * context-switch 508c2ecf20Sopenharmony_ci */ 518c2ecf20Sopenharmony_ci cpumask_setall(&info->flush_pending); 528c2ecf20Sopenharmony_ci} 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_cistatic bool check_update_reserved_asid(struct asid_info *info, u64 asid, 558c2ecf20Sopenharmony_ci u64 newasid) 568c2ecf20Sopenharmony_ci{ 578c2ecf20Sopenharmony_ci int cpu; 588c2ecf20Sopenharmony_ci bool hit = false; 598c2ecf20Sopenharmony_ci 608c2ecf20Sopenharmony_ci /* 618c2ecf20Sopenharmony_ci * Iterate over the set of reserved ASIDs looking for a match. 628c2ecf20Sopenharmony_ci * If we find one, then we can update our mm to use newasid 638c2ecf20Sopenharmony_ci * (i.e. the same ASID in the current generation) but we can't 648c2ecf20Sopenharmony_ci * exit the loop early, since we need to ensure that all copies 658c2ecf20Sopenharmony_ci * of the old ASID are updated to reflect the mm. Failure to do 668c2ecf20Sopenharmony_ci * so could result in us missing the reserved ASID in a future 678c2ecf20Sopenharmony_ci * generation. 688c2ecf20Sopenharmony_ci */ 698c2ecf20Sopenharmony_ci for_each_possible_cpu(cpu) { 708c2ecf20Sopenharmony_ci if (reserved_asid(info, cpu) == asid) { 718c2ecf20Sopenharmony_ci hit = true; 728c2ecf20Sopenharmony_ci reserved_asid(info, cpu) = newasid; 738c2ecf20Sopenharmony_ci } 748c2ecf20Sopenharmony_ci } 758c2ecf20Sopenharmony_ci 768c2ecf20Sopenharmony_ci return hit; 778c2ecf20Sopenharmony_ci} 788c2ecf20Sopenharmony_ci 798c2ecf20Sopenharmony_cistatic u64 new_context(struct asid_info *info, atomic64_t *pasid, 808c2ecf20Sopenharmony_ci struct mm_struct *mm) 818c2ecf20Sopenharmony_ci{ 828c2ecf20Sopenharmony_ci static u32 cur_idx = 1; 838c2ecf20Sopenharmony_ci u64 asid = atomic64_read(pasid); 848c2ecf20Sopenharmony_ci u64 generation = atomic64_read(&info->generation); 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_ci if (asid != 0) { 878c2ecf20Sopenharmony_ci u64 newasid = generation | (asid & ~ASID_MASK(info)); 888c2ecf20Sopenharmony_ci 898c2ecf20Sopenharmony_ci /* 908c2ecf20Sopenharmony_ci * If our current ASID was active during a rollover, we 918c2ecf20Sopenharmony_ci * can continue to use it and this was just a false alarm. 928c2ecf20Sopenharmony_ci */ 938c2ecf20Sopenharmony_ci if (check_update_reserved_asid(info, asid, newasid)) 948c2ecf20Sopenharmony_ci return newasid; 958c2ecf20Sopenharmony_ci 968c2ecf20Sopenharmony_ci /* 978c2ecf20Sopenharmony_ci * We had a valid ASID in a previous life, so try to re-use 988c2ecf20Sopenharmony_ci * it if possible. 998c2ecf20Sopenharmony_ci */ 1008c2ecf20Sopenharmony_ci if (!__test_and_set_bit(asid2idx(info, asid), info->map)) 1018c2ecf20Sopenharmony_ci return newasid; 1028c2ecf20Sopenharmony_ci } 1038c2ecf20Sopenharmony_ci 1048c2ecf20Sopenharmony_ci /* 1058c2ecf20Sopenharmony_ci * Allocate a free ASID. If we can't find one, take a note of the 1068c2ecf20Sopenharmony_ci * currently active ASIDs and mark the TLBs as requiring flushes. We 1078c2ecf20Sopenharmony_ci * always count from ASID #2 (index 1), as we use ASID #0 when setting 1088c2ecf20Sopenharmony_ci * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd 1098c2ecf20Sopenharmony_ci * pairs. 1108c2ecf20Sopenharmony_ci */ 1118c2ecf20Sopenharmony_ci asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); 1128c2ecf20Sopenharmony_ci if (asid != NUM_CTXT_ASIDS(info)) 1138c2ecf20Sopenharmony_ci goto set_asid; 1148c2ecf20Sopenharmony_ci 1158c2ecf20Sopenharmony_ci /* We're out of ASIDs, so increment the global generation count */ 1168c2ecf20Sopenharmony_ci generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION(info), 1178c2ecf20Sopenharmony_ci &info->generation); 1188c2ecf20Sopenharmony_ci flush_context(info); 1198c2ecf20Sopenharmony_ci 1208c2ecf20Sopenharmony_ci /* We have more ASIDs than CPUs, so this will always succeed */ 1218c2ecf20Sopenharmony_ci asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); 1228c2ecf20Sopenharmony_ci 1238c2ecf20Sopenharmony_ciset_asid: 1248c2ecf20Sopenharmony_ci __set_bit(asid, info->map); 1258c2ecf20Sopenharmony_ci cur_idx = asid; 1268c2ecf20Sopenharmony_ci cpumask_clear(mm_cpumask(mm)); 1278c2ecf20Sopenharmony_ci return idx2asid(info, asid) | generation; 1288c2ecf20Sopenharmony_ci} 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci/* 1318c2ecf20Sopenharmony_ci * Generate a new ASID for the context. 1328c2ecf20Sopenharmony_ci * 1338c2ecf20Sopenharmony_ci * @pasid: Pointer to the current ASID batch allocated. It will be updated 1348c2ecf20Sopenharmony_ci * with the new ASID batch. 1358c2ecf20Sopenharmony_ci * @cpu: current CPU ID. Must have been acquired through get_cpu() 1368c2ecf20Sopenharmony_ci */ 1378c2ecf20Sopenharmony_civoid asid_new_context(struct asid_info *info, atomic64_t *pasid, 1388c2ecf20Sopenharmony_ci unsigned int cpu, struct mm_struct *mm) 1398c2ecf20Sopenharmony_ci{ 1408c2ecf20Sopenharmony_ci unsigned long flags; 1418c2ecf20Sopenharmony_ci u64 asid; 1428c2ecf20Sopenharmony_ci 1438c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&info->lock, flags); 1448c2ecf20Sopenharmony_ci /* Check that our ASID belongs to the current generation. */ 1458c2ecf20Sopenharmony_ci asid = atomic64_read(pasid); 1468c2ecf20Sopenharmony_ci if ((asid ^ atomic64_read(&info->generation)) >> info->bits) { 1478c2ecf20Sopenharmony_ci asid = new_context(info, pasid, mm); 1488c2ecf20Sopenharmony_ci atomic64_set(pasid, asid); 1498c2ecf20Sopenharmony_ci } 1508c2ecf20Sopenharmony_ci 1518c2ecf20Sopenharmony_ci if (cpumask_test_and_clear_cpu(cpu, &info->flush_pending)) 1528c2ecf20Sopenharmony_ci info->flush_cpu_ctxt_cb(); 1538c2ecf20Sopenharmony_ci 1548c2ecf20Sopenharmony_ci atomic64_set(&active_asid(info, cpu), asid); 1558c2ecf20Sopenharmony_ci cpumask_set_cpu(cpu, mm_cpumask(mm)); 1568c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&info->lock, flags); 1578c2ecf20Sopenharmony_ci} 1588c2ecf20Sopenharmony_ci 1598c2ecf20Sopenharmony_ci/* 1608c2ecf20Sopenharmony_ci * Initialize the ASID allocator 1618c2ecf20Sopenharmony_ci * 1628c2ecf20Sopenharmony_ci * @info: Pointer to the asid allocator structure 1638c2ecf20Sopenharmony_ci * @bits: Number of ASIDs available 1648c2ecf20Sopenharmony_ci * @asid_per_ctxt: Number of ASIDs to allocate per-context. ASIDs are 1658c2ecf20Sopenharmony_ci * allocated contiguously for a given context. This value should be a power of 1668c2ecf20Sopenharmony_ci * 2. 1678c2ecf20Sopenharmony_ci */ 1688c2ecf20Sopenharmony_ciint asid_allocator_init(struct asid_info *info, 1698c2ecf20Sopenharmony_ci u32 bits, unsigned int asid_per_ctxt, 1708c2ecf20Sopenharmony_ci void (*flush_cpu_ctxt_cb)(void)) 1718c2ecf20Sopenharmony_ci{ 1728c2ecf20Sopenharmony_ci info->bits = bits; 1738c2ecf20Sopenharmony_ci info->ctxt_shift = ilog2(asid_per_ctxt); 1748c2ecf20Sopenharmony_ci info->flush_cpu_ctxt_cb = flush_cpu_ctxt_cb; 1758c2ecf20Sopenharmony_ci /* 1768c2ecf20Sopenharmony_ci * Expect allocation after rollover to fail if we don't have at least 1778c2ecf20Sopenharmony_ci * one more ASID than CPUs. ASID #0 is always reserved. 1788c2ecf20Sopenharmony_ci */ 1798c2ecf20Sopenharmony_ci WARN_ON(NUM_CTXT_ASIDS(info) - 1 <= num_possible_cpus()); 1808c2ecf20Sopenharmony_ci atomic64_set(&info->generation, ASID_FIRST_VERSION(info)); 1818c2ecf20Sopenharmony_ci info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), 1828c2ecf20Sopenharmony_ci sizeof(*info->map), GFP_KERNEL); 1838c2ecf20Sopenharmony_ci if (!info->map) 1848c2ecf20Sopenharmony_ci return -ENOMEM; 1858c2ecf20Sopenharmony_ci 1868c2ecf20Sopenharmony_ci raw_spin_lock_init(&info->lock); 1878c2ecf20Sopenharmony_ci 1888c2ecf20Sopenharmony_ci return 0; 1898c2ecf20Sopenharmony_ci} 190