/kernel/linux/linux-5.10/drivers/iommu/ |
H A D | omap-iopgtable.h | 91 #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) 93 #define iopgd_page_paddr(iopgd) (*iopgd & ~((1 << 10) - 1)) 94 #define iopgd_page_vaddr(iopgd) ((u32 *)phys_to_virt(iopgd_page_paddr(iopgd))) 98 #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
|
H A D | omap-iommu.c | 159 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) in omap2_iommu_enable() 162 pa = virt_to_phys(obj->iopgd); in omap2_iommu_enable() 485 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, in iopte_alloc() argument 492 if (*iopgd) in iopte_alloc() 502 if (!*iopgd) { in iopte_alloc() 526 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; in iopte_alloc() 536 iopte = iopte_offset(iopgd, da); in iopte_alloc() 537 *pt_dma = iopgd_page_paddr(iopgd); in iopte_alloc() 540 __func__, da, iopgd, *iopg in iopte_alloc() 547 u32 *iopgd = iopgd_offset(obj, da); iopgd_alloc_section() local 563 u32 *iopgd = iopgd_offset(obj, da); iopgd_alloc_super() local 581 u32 *iopgd = iopgd_offset(obj, da); iopte_alloc_page() local 600 u32 *iopgd = iopgd_offset(obj, da); iopte_alloc_large() local 688 u32 *iopgd, *iopte = NULL; iopgtable_lookup_entry() local 704 u32 *iopgd = iopgd_offset(obj, da); iopgtable_clear_entry_core() local 781 u32 *iopgd; iopgtable_clear_entry_all() local 808 u32 *iopgd, *iopte; iommu_fault_handler() local 847 omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) omap_iommu_attach() argument [all...] |
H A D | omap-iommu-debug.c | 184 u32 *iopgd, *iopte; in dump_ioptable() local 189 iopgd = iopgd_offset(obj, 0); in dump_ioptable() 190 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { in dump_ioptable() 191 if (!*iopgd) in dump_ioptable() 194 if (!(*iopgd & IOPGD_TABLE)) { in dump_ioptable() 196 seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); in dump_ioptable() 200 iopte = iopte_offset(iopgd, 0); in dump_ioptable()
|
H A D | omap-iommu.h | 65 * We don't change iopgd for a situation like pgd for a task, 68 u32 *iopgd; member 69 spinlock_t page_table_lock; /* protect iopgd */
|
/kernel/linux/linux-6.6/drivers/iommu/ |
H A D | omap-iopgtable.h | 91 #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) 93 #define iopgd_page_paddr(iopgd) (*iopgd & ~((1 << 10) - 1)) 94 #define iopgd_page_vaddr(iopgd) ((u32 *)phys_to_virt(iopgd_page_paddr(iopgd))) 98 #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
|
H A D | omap-iommu.c | 158 if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) in omap2_iommu_enable() 161 pa = virt_to_phys(obj->iopgd); in omap2_iommu_enable() 484 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, in iopte_alloc() argument 491 if (*iopgd) in iopte_alloc() 501 if (!*iopgd) { in iopte_alloc() 525 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; in iopte_alloc() 535 iopte = iopte_offset(iopgd, da); in iopte_alloc() 536 *pt_dma = iopgd_page_paddr(iopgd); in iopte_alloc() 539 __func__, da, iopgd, *iopg in iopte_alloc() 546 u32 *iopgd = iopgd_offset(obj, da); iopgd_alloc_section() local 562 u32 *iopgd = iopgd_offset(obj, da); iopgd_alloc_super() local 580 u32 *iopgd = iopgd_offset(obj, da); iopte_alloc_page() local 599 u32 *iopgd = iopgd_offset(obj, da); iopte_alloc_large() local 687 u32 *iopgd, *iopte = NULL; iopgtable_lookup_entry() local 703 u32 *iopgd = iopgd_offset(obj, da); iopgtable_clear_entry_core() local 780 u32 *iopgd; iopgtable_clear_entry_all() local 807 u32 *iopgd, *iopte; iommu_fault_handler() local 846 omap_iommu_attach(struct omap_iommu *obj, u32 *iopgd) omap_iommu_attach() argument [all...] |
H A D | omap-iommu-debug.c | 184 u32 *iopgd, *iopte; in dump_ioptable() local 189 iopgd = iopgd_offset(obj, 0); in dump_ioptable() 190 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { in dump_ioptable() 191 if (!*iopgd) in dump_ioptable() 194 if (!(*iopgd & IOPGD_TABLE)) { in dump_ioptable() 196 seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd); in dump_ioptable() 200 iopte = iopte_offset(iopgd, 0); in dump_ioptable()
|
H A D | omap-iommu.h | 65 * We don't change iopgd for a situation like pgd for a task, 68 u32 *iopgd; member 69 spinlock_t page_table_lock; /* protect iopgd */
|