Searched refs:VTD_PAGE_SIZE (Results 1 - 8 of 8) sorted by relevance
/kernel/linux/linux-6.6/drivers/iommu/intel/ |
H A D | iommu.h | 33 #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) macro 35 #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) 794 return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE); in first_pte_in_page() 800 (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; in nr_pte_to_next_page()
|
H A D | dmar.c | 893 addr = ioremap(drhd->address, VTD_PAGE_SIZE); in dmar_validate_one_drhd() 895 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); in dmar_validate_one_drhd() 907 early_iounmap(addr, VTD_PAGE_SIZE); in dmar_validate_one_drhd() 1621 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) in qi_flush_dev_iotlb_pasid()
|
H A D | iommu.c | 35 #define ROOT_SIZE VTD_PAGE_SIZE 36 #define CONTEXT_SIZE VTD_PAGE_SIZE 145 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) 947 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte() 2259 pteval += lvl_pages * VTD_PAGE_SIZE; in __domain_mapping() 2611 VTD_PAGE_SIZE); in copy_context_table() 2663 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table() 4198 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) in intel_iommu_unmap() 4199 size = VTD_PAGE_SIZE << level_to_offset_bits(level); in intel_iommu_unmap()
|
H A D | pasid.c | 222 clflush_cache_range(entries, VTD_PAGE_SIZE); in intel_pasid_get_entry()
|
/kernel/linux/linux-5.10/drivers/iommu/intel/ |
H A D | iommu.c | 53 #define ROOT_SIZE VTD_PAGE_SIZE 54 #define CONTEXT_SIZE VTD_PAGE_SIZE 185 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) 1051 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); in pfn_to_dma_pte() 2024 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN); in dmar_init_reserved_ranges() 2489 pteval += lvl_pages * VTD_PAGE_SIZE; in __domain_mapping() 3083 VTD_PAGE_SIZE); in copy_context_table() 3153 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE); in copy_context_table() 3875 size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); in bounce_map_single() 3916 if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { in bounce_map_single() [all...] |
H A D | dmar.c | 887 addr = ioremap(drhd->address, VTD_PAGE_SIZE); in dmar_validate_one_drhd() 889 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); in dmar_validate_one_drhd() 901 early_iounmap(addr, VTD_PAGE_SIZE); in dmar_validate_one_drhd() 967 iommu->reg_size = VTD_PAGE_SIZE; in map_iommu() 1502 if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) in qi_flush_dev_iotlb_pasid()
|
H A D | pasid.c | 283 clflush_cache_range(entries, VTD_PAGE_SIZE); in intel_pasid_get_entry()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | intel-iommu.h | 31 #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) macro 33 #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
|
Completed in 24 milliseconds