Lines Matching defs:table
151 /* Writes the specific IOMMU for a device into the PCI segment rlookup table */
251 * The IVRS alias stored in the alias table may not be
272 * bus. The IVRS table may know about a quirk that we don't.
1338 * Command send function for invalidating a device table entry
1509 * about domains is the page table mapping the DMA address space they
1621 /* First mask out possible old values for GCR3 table */
1628 /* Encode GCR3 table into DTE */
1668 /* remove entry from the device table seen by the hardware */
1702 /* Update device table */
1813 * In case of using AMD_IOMMU_V1 page table mode and the device
1814 * is enabling for PPR/ATS support (using v2 table),
1996 /* Update device table */
2099 * Force IOMMU v1 page table when iommu=pt and
2154 /* V2 with 4/5 level page table */
2530 /* Number of GCR3 table levels required */
2871 struct irq_remap_table *table)
2878 dte |= iommu_virt_to_phys(table->table);
2888 struct irq_remap_table *table;
2896 table = pci_seg->irq_lookup_table[devid];
2897 if (WARN_ONCE(!table, "%s: no table for devid %x:%x\n",
2901 return table;
2906 struct irq_remap_table *table;
2908 table = kzalloc(sizeof(*table), GFP_KERNEL);
2909 if (!table)
2912 table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_KERNEL);
2913 if (!table->table) {
2914 kfree(table);
2917 raw_spin_lock_init(&table->lock);
2920 memset(table->table, 0,
2923 memset(table->table, 0,
2925 return table;
2929 struct irq_remap_table *table)
2933 pci_seg->irq_lookup_table[devid] = table;
2934 set_dte_irq_entry(iommu, devid, table);
2941 struct irq_remap_table *table = data;
2949 pci_seg->irq_lookup_table[alias] = table;
2950 set_dte_irq_entry(iommu, alias, table);
2959 struct irq_remap_table *table = NULL;
2968 table = pci_seg->irq_lookup_table[devid];
2969 if (table)
2973 table = pci_seg->irq_lookup_table[alias];
2974 if (table) {
2975 set_remap_table_entry(iommu, devid, table);
2980 /* Nothing there yet, allocate new irq remapping table */
2987 table = pci_seg->irq_lookup_table[devid];
2988 if (table)
2991 table = pci_seg->irq_lookup_table[alias];
2992 if (table) {
2993 set_remap_table_entry(iommu, devid, table);
2997 table = new_table;
3002 table);
3004 set_remap_table_entry(iommu, devid, table);
3007 set_remap_table_entry(iommu, alias, table);
3016 kmem_cache_free(amd_iommu_irq_cache, new_table->table);
3019 return table;
3025 struct irq_remap_table *table;
3029 table = alloc_irq_table(iommu, devid, pdev);
3030 if (!table)
3036 raw_spin_lock_irqsave(&table->lock, flags);
3038 /* Scan table for free entries */
3039 for (index = ALIGN(table->min_index, alignment), c = 0;
3041 if (!iommu->irte_ops->is_allocated(table, index)) {
3051 iommu->irte_ops->set_allocated(table, index - c + 1);
3063 raw_spin_unlock_irqrestore(&table->lock, flags);
3071 struct irq_remap_table *table;
3076 table = get_irq_table(iommu, devid);
3077 if (!table)
3080 raw_spin_lock_irqsave(&table->lock, flags);
3082 entry = (struct irte_ga *)table->table;
3094 raw_spin_unlock_irqrestore(&table->lock, flags);
3104 struct irq_remap_table *table;
3107 table = get_irq_table(iommu, devid);
3108 if (!table)
3111 raw_spin_lock_irqsave(&table->lock, flags);
3112 table->table[index] = irte->val;
3113 raw_spin_unlock_irqrestore(&table->lock, flags);
3122 struct irq_remap_table *table;
3125 table = get_irq_table(iommu, devid);
3126 if (!table)
3129 raw_spin_lock_irqsave(&table->lock, flags);
3130 iommu->irte_ops->clear_allocated(table, index);
3131 raw_spin_unlock_irqrestore(&table->lock, flags);
3224 static void irte_set_allocated(struct irq_remap_table *table, int index)
3226 table->table[index] = IRTE_ALLOCATED;
3229 static void irte_ga_set_allocated(struct irq_remap_table *table, int index)
3231 struct irte_ga *ptr = (struct irte_ga *)table->table;
3239 static bool irte_is_allocated(struct irq_remap_table *table, int index)
3241 union irte *ptr = (union irte *)table->table;
3247 static bool irte_ga_is_allocated(struct irq_remap_table *table, int index)
3249 struct irte_ga *ptr = (struct irte_ga *)table->table;
3255 static void irte_clear_allocated(struct irq_remap_table *table, int index)
3257 table->table[index] = 0;
3260 static void irte_ga_clear_allocated(struct irq_remap_table *table, int index)
3262 struct irte_ga *ptr = (struct irte_ga *)table->table;
3383 struct irq_remap_table *table;
3385 table = alloc_irq_table(iommu, devid, NULL);
3386 if (table) {
3387 if (!table->min_index) {
3392 table->min_index = 32;
3394 iommu->irte_ops->set_allocated(table, i);
3396 WARN_ON(table->min_index != 32);