1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (c) 2006, Intel Corporation. 4 * 5 * Copyright (C) 2006-2008 Intel Corporation 6 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 7 */ 8 9#ifndef _IOVA_H_ 10#define _IOVA_H_ 11 12#include <linux/types.h> 13#include <linux/kernel.h> 14#include <linux/rbtree.h> 15#include <linux/atomic.h> 16#include <linux/dma-mapping.h> 17 18/* iova structure */ 19struct iova { 20 struct rb_node node; 21 unsigned long pfn_hi; /* Highest allocated pfn */ 22 unsigned long pfn_lo; /* Lowest allocated pfn */ 23}; 24 25struct iova_magazine; 26struct iova_cpu_rcache; 27 28#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ 29#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ 30 31struct iova_rcache { 32 spinlock_t lock; 33 unsigned long depot_size; 34 struct iova_magazine *depot[MAX_GLOBAL_MAGS]; 35 struct iova_cpu_rcache __percpu *cpu_rcaches; 36}; 37 38struct iova_domain; 39 40/* Call-Back from IOVA code into IOMMU drivers */ 41typedef void (*iova_flush_cb)(struct iova_domain *domain); 42 43/* Destructor for per-entry data */ 44typedef void (*iova_entry_dtor)(unsigned long data); 45 46/* Number of entries per Flush Queue */ 47#define IOVA_FQ_SIZE 256 48 49/* Timeout (in ms) after which entries are flushed from the Flush-Queue */ 50#define IOVA_FQ_TIMEOUT 10 51 52/* Flush Queue entry for defered flushing */ 53struct iova_fq_entry { 54 unsigned long iova_pfn; 55 unsigned long pages; 56 unsigned long data; 57 u64 counter; /* Flush counter when this entrie was added */ 58}; 59 60/* Per-CPU Flush Queue structure */ 61struct iova_fq { 62 struct iova_fq_entry entries[IOVA_FQ_SIZE]; 63 unsigned head, tail; 64 spinlock_t lock; 65}; 66 67/* holds all the iova translations for a domain */ 68struct iova_domain { 69 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 70 struct rb_root rbroot; /* iova domain rbtree root */ 71 struct rb_node *cached_node; /* Save last alloced node */ 72 struct rb_node *cached32_node; /* Save last 32-bit alloced node */ 73 unsigned long granule; /* pfn granularity for this domain */ 74 unsigned long start_pfn; /* Lower limit for this domain */ 75 unsigned long dma_32bit_pfn; 76 unsigned long max32_alloc_size; /* Size of last failed allocation */ 77 struct iova_fq __percpu *fq; /* Flush Queue */ 78 79 atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that 80 have been started */ 81 82 atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that 83 have been finished */ 84 85 struct iova anchor; /* rbtree lookup anchor */ 86 struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ 87 88 iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU 89 TLBs */ 90 91 iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for 92 iova entry */ 93 94 struct timer_list fq_timer; /* Timer to regularily empty the 95 flush-queues */ 96 atomic_t fq_timer_on; /* 1 when timer is active, 0 97 when not */ 98 bool best_fit; 99}; 100 101static inline unsigned long iova_size(struct iova *iova) 102{ 103 return iova->pfn_hi - iova->pfn_lo + 1; 104} 105 106static inline unsigned long iova_shift(struct iova_domain *iovad) 107{ 108 return __ffs(iovad->granule); 109} 110 111static inline unsigned long iova_mask(struct iova_domain *iovad) 112{ 113 return iovad->granule - 1; 114} 115 116static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) 117{ 118 return iova & iova_mask(iovad); 119} 120 121static inline size_t iova_align(struct iova_domain *iovad, size_t size) 122{ 123 return ALIGN(size, iovad->granule); 124} 125 126static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) 127{ 128 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); 129} 130 131static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) 132{ 133 return iova >> iova_shift(iovad); 134} 135 136#if IS_ENABLED(CONFIG_IOMMU_IOVA) 137int iova_cache_get(void); 138void iova_cache_put(void); 139 140struct iova *alloc_iova_mem(void); 141void free_iova_mem(struct iova *iova); 142void free_iova(struct iova_domain *iovad, unsigned long pfn); 143void __free_iova(struct iova_domain *iovad, struct iova *iova); 144struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned); 145void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); 146void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, unsigned long data); 147unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, 148 bool flush_rcache); 149struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); 150void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 151void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); 152bool has_iova_flush_queue(struct iova_domain *iovad); 153int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); 154struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 155void put_iova_domain(struct iova_domain *iovad); 156struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, 157 unsigned long pfn_hi); 158void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); 159#else 160static inline int iova_cache_get(void) 161{ 162 return -ENOTSUPP; 163} 164 165static inline void iova_cache_put(void) 166{ 167} 168 169static inline struct iova *alloc_iova_mem(void) 170{ 171 return NULL; 172} 173 174static inline void free_iova_mem(struct iova *iova) 175{ 176} 177 178static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) 179{ 180} 181 182static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) 183{ 184} 185 186static inline struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, 187 bool size_aligned) 188{ 189 return NULL; 190} 191 192static inline void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) 193{ 194} 195 196static inline void queue_iova(struct iova_domain *iovad, unsigned long pfn, unsigned long pages, unsigned long data) 197{ 198} 199 200static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, 201 bool flush_rcache) 202{ 203 return 0; 204} 205 206static inline struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) 207{ 208 return NULL; 209} 210 211static inline void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) 212{ 213} 214 215static inline void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn) 216{ 217} 218 219static inline bool has_iova_flush_queue(struct iova_domain *iovad) 220{ 221 return false; 222} 223 224static inline int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor) 225{ 226 return -ENODEV; 227} 228 229static inline struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) 230{ 231 return NULL; 232} 233 234static inline void put_iova_domain(struct iova_domain *iovad) 235{ 236} 237 238static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, unsigned long pfn_lo, 239 unsigned long pfn_hi) 240{ 241 return NULL; 242} 243 244static inline void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) 245{ 246} 247#endif 248 249#endif 250