Lines Matching defs:gmap
23 #include <asm/gmap.h>
42 * @limit: maximum address of the gmap address space
46 static struct gmap *gmap_alloc(unsigned long limit)
48 struct gmap *gmap;
70 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
71 if (!gmap)
73 INIT_LIST_HEAD(&gmap->crst_list);
74 INIT_LIST_HEAD(&gmap->children);
75 INIT_LIST_HEAD(&gmap->pt_list);
76 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
77 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
78 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
79 spin_lock_init(&gmap->guest_table_lock);
80 spin_lock_init(&gmap->shadow_lock);
81 refcount_set(&gmap->ref_count, 1);
86 list_add(&page->lru, &gmap->crst_list);
89 gmap->table = table;
90 gmap->asce = atype | _ASCE_TABLE_LENGTH |
92 gmap->asce_end = limit;
93 return gmap;
96 kfree(gmap);
104 * @limit: maximum size of the gmap address space
108 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
110 struct gmap *gmap;
113 gmap = gmap_alloc(limit);
114 if (!gmap)
116 gmap->mm = mm;
118 list_add_rcu(&gmap->list, &mm->context.gmap_list);
120 gmap_asce = gmap->asce;
125 return gmap;
129 static void gmap_flush_tlb(struct gmap *gmap)
132 __tlb_flush_idte(gmap->asce);
190 * @gmap: pointer to the guest address space structure
192 * No locks required. There are no references to this gmap anymore.
194 static void gmap_free(struct gmap *gmap)
199 if (!(gmap_is_shadow(gmap) && gmap->removed))
200 gmap_flush_tlb(gmap);
202 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
204 gmap_radix_tree_free(&gmap->guest_to_host);
205 gmap_radix_tree_free(&gmap->host_to_guest);
207 /* Free additional data for a shadow gmap */
208 if (gmap_is_shadow(gmap)) {
210 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
212 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
214 gmap_put(gmap->parent);
217 kfree(gmap);
222 * @gmap: pointer to the guest address space structure
224 * Returns the gmap pointer
226 struct gmap *gmap_get(struct gmap *gmap)
228 refcount_inc(&gmap->ref_count);
229 return gmap;
235 * @gmap: pointer to the guest address space structure
239 void gmap_put(struct gmap *gmap)
241 if (refcount_dec_and_test(&gmap->ref_count))
242 gmap_free(gmap);
248 * @gmap: pointer to the guest address space structure
250 void gmap_remove(struct gmap *gmap)
252 struct gmap *sg, *next;
255 /* Remove all shadow gmaps linked to this gmap */
256 if (!list_empty(&gmap->children)) {
257 spin_lock(&gmap->shadow_lock);
258 list_for_each_entry_safe(sg, next, &gmap->children, list) {
262 spin_unlock(&gmap->shadow_lock);
264 /* Remove gmap from the pre-mm list */
265 spin_lock(&gmap->mm->context.lock);
266 list_del_rcu(&gmap->list);
267 if (list_empty(&gmap->mm->context.gmap_list))
269 else if (list_is_singular(&gmap->mm->context.gmap_list))
270 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
271 struct gmap, list)->asce;
274 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
275 spin_unlock(&gmap->mm->context.lock);
278 gmap_put(gmap);
284 * @gmap: pointer to the guest address space structure
286 void gmap_enable(struct gmap *gmap)
288 S390_lowcore.gmap = (unsigned long) gmap;
294 * @gmap: pointer to the guest address space structure
296 void gmap_disable(struct gmap *gmap)
298 S390_lowcore.gmap = 0UL;
303 * gmap_get_enabled - get a pointer to the currently enabled gmap
305 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
307 struct gmap *gmap_get_enabled(void)
309 return (struct gmap *) S390_lowcore.gmap;
316 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
322 /* since we dont free the gmap table until gmap_free we can unlock */
328 spin_lock(&gmap->guest_table_lock);
330 list_add(&page->lru, &gmap->crst_list);
336 spin_unlock(&gmap->guest_table_lock);
361 * @gmap: pointer to the guest address space structure
366 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
371 BUG_ON(gmap_is_shadow(gmap));
372 spin_lock(&gmap->guest_table_lock);
373 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
378 spin_unlock(&gmap->guest_table_lock);
384 * @gmap: pointer to the guest address space structure
389 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
393 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
395 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
400 * @gmap: pointer to the guest address space structure
406 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
411 BUG_ON(gmap_is_shadow(gmap));
418 mmap_write_lock(gmap->mm);
420 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
421 mmap_write_unlock(gmap->mm);
423 gmap_flush_tlb(gmap);
430 * @gmap: pointer to the guest address space structure
437 int gmap_map_segment(struct gmap *gmap, unsigned long from,
443 BUG_ON(gmap_is_shadow(gmap));
447 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
451 mmap_write_lock(gmap->mm);
454 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
456 if (radix_tree_insert(&gmap->guest_to_host,
461 mmap_write_unlock(gmap->mm);
463 gmap_flush_tlb(gmap);
466 gmap_unmap_segment(gmap, to, len);
473 * @gmap: pointer to guest mapping meta data structure
484 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
489 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
490 /* Note: guest_to_host is empty for a shadow gmap */
497 * @gmap: pointer to guest mapping meta data structure
504 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
508 mmap_read_lock(gmap->mm);
509 rc = __gmap_translate(gmap, gaddr);
510 mmap_read_unlock(gmap->mm);
516 * gmap_unlink - disconnect a page table from the gmap shadow tables
524 struct gmap *gmap;
528 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
529 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
531 gmap_flush_tlb(gmap);
536 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
541 * @gmap: pointer to guest mapping meta data structure
550 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
562 BUG_ON(gmap_is_shadow(gmap));
563 /* Create higher level tables in the gmap page table */
564 table = gmap->table;
565 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
568 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
573 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
576 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
581 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
584 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
591 mm = gmap->mm;
604 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
606 /* Link gmap segment table entry location to page table. */
611 spin_lock(&gmap->guest_table_lock);
613 rc = radix_tree_insert(&gmap->host_to_guest,
629 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
631 spin_unlock(&gmap->guest_table_lock);
639 * @gmap: pointer to guest mapping meta data structure
646 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
653 mmap_read_lock(gmap->mm);
657 vmaddr = __gmap_translate(gmap, gaddr);
662 if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
674 rc = __gmap_link(gmap, gaddr, vmaddr);
676 mmap_read_unlock(gmap->mm);
684 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
692 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
697 vma = vma_lookup(gmap->mm, vmaddr);
702 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
704 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
711 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
716 mmap_read_lock(gmap->mm);
721 radix_tree_lookup(&gmap->guest_to_host,
727 vma = find_vma(gmap->mm, vmaddr);
739 mmap_read_unlock(gmap->mm);
748 * @nb: pointer to the gmap notifier block
760 * @nb: pointer to the gmap notifier block
773 * @gmap: pointer to guest mapping meta data structure
777 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
783 nb->notifier_call(gmap, start, end);
787 * gmap_table_walk - walk the gmap page tables
788 * @gmap: pointer to guest mapping meta data structure
799 * Returns NULL if the gmap page tables could not be walked to the
804 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
807 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
808 unsigned long *table = gmap->table;
810 if (gmap_is_shadow(gmap) && gmap->removed)
858 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
860 * @gmap: pointer to guest mapping meta data structure
866 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
871 BUG_ON(gmap_is_shadow(gmap));
872 /* Walk the gmap page table, lock and get pte pointer */
873 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
876 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
880 * gmap_pte_op_fixup - force a page in and connect the gmap page table
881 * @gmap: pointer to guest mapping meta data structure
888 * up or connecting the gmap page table.
890 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
893 struct mm_struct *mm = gmap->mm;
897 BUG_ON(gmap_is_shadow(gmap));
905 return __gmap_link(gmap, gaddr, vmaddr);
919 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
921 * @gmap: pointer to guest mapping meta data structure
926 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
930 BUG_ON(gmap_is_shadow(gmap));
931 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
936 if (!gmap->mm->context.allow_gmap_hpage_1m)
939 spin_lock(&gmap->guest_table_lock);
941 spin_unlock(&gmap->guest_table_lock);
947 spin_unlock(&gmap->guest_table_lock);
953 * @gmap: pointer to the guest mapping meta data structure
956 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
959 spin_unlock(&gmap->guest_table_lock);
976 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
989 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
995 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
1010 * @gmap: pointer to guest mapping meta data structure
1021 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1032 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1039 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1046 * @gmap: pointer to guest mapping meta data structure
1057 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1064 BUG_ON(gmap_is_shadow(gmap));
1067 pmdp = gmap_pmd_op_walk(gmap, gaddr);
1070 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1077 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1085 gmap_pmd_op_end(gmap, pmdp);
1091 /* -EAGAIN, fixup of userspace mm and gmap */
1092 vmaddr = __gmap_translate(gmap, gaddr);
1095 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1106 * @gmap: pointer to guest mapping meta data structure
1111 * Returns 0 if for each page in the given range a gmap mapping exists,
1113 * If the gmap mapping is missing for one or more pages -EFAULT is
1117 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1122 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1126 mmap_read_lock(gmap->mm);
1127 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1128 mmap_read_unlock(gmap->mm);
1136 * @gmap: pointer to guest mapping meta data structure
1141 * if reading using the virtual address failed. -EINVAL if called on a gmap
1144 * Called with gmap->mm->mmap_lock in read.
1146 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1153 if (gmap_is_shadow(gmap))
1158 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1173 vmaddr = __gmap_translate(gmap, gaddr);
1178 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1194 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1222 * @raddr: rmap address in the shadow gmap
1229 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1232 struct gmap *parent;
1310 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1330 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1347 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1377 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1405 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1435 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1463 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1493 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1521 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1551 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1582 static void gmap_unshadow(struct gmap *sg)
1611 * @parent: pointer to the parent gmap
1615 * Returns the pointer to a gmap if a shadow table with the given asce is
1619 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1622 struct gmap *sg;
1643 * Returns 1 if the gmap shadow is still valid and matches the given
1645 * caller has to request a new shadow gmap in this case.
1648 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1658 * @parent: pointer to the parent gmap
1669 * parent gmap table could not be protected.
1671 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1674 struct gmap *sg, *new;
1685 /* Create a new shadow gmap */
1707 /* only allow one real-space gmap shadow */
1749 * @saddr: faulting address in the shadow gmap
1750 * @r2t: parent gmap address of the region 2 table to get shadowed
1754 * four pages of the source table are made read-only in the parent gmap
1760 * -EFAULT if an address in the parent gmap could not be resolved.
1764 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1810 /* Make r2t read-only in parent gmap page table */
1838 * @saddr: faulting address in the shadow gmap
1839 * @r3t: parent gmap address of the region 3 table to get shadowed
1844 * -EFAULT if an address in the parent gmap could not be resolved.
1848 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1894 /* Make r3t read-only in parent gmap page table */
1922 * @saddr: faulting address in the shadow gmap
1923 * @sgt: parent gmap address of the segment table to get shadowed
1928 * -EFAULT if an address in the parent gmap could not be resolved.
1932 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1978 /* Make sgt read-only in parent gmap page table */
2007 * @pgt: parent gmap address of the page table to get shadowed
2016 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
2046 * @saddr: faulting address in the shadow gmap
2047 * @pgt: parent gmap address of the page table to get shadowed
2052 * -EFAULT if an address in the parent gmap could not be resolved and
2054 * Called with gmap->mm->mmap_lock in read
2056 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2099 /* Make pgt read-only in parent gmap page table (not the pgste) */
2126 * @saddr: faulting address in the shadow gmap
2127 * @pte: pte in parent gmap address space to get shadowed
2131 * -EFAULT if an address in the parent gmap could not be resolved.
2135 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2137 struct gmap *parent;
2199 * gmap_shadow_notify - handle notifications for shadow gmap
2203 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2270 struct gmap *gmap, *sg, *next;
2275 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2276 spin_lock(&gmap->guest_table_lock);
2277 table = radix_tree_lookup(&gmap->host_to_guest,
2281 spin_unlock(&gmap->guest_table_lock);
2285 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2286 spin_lock(&gmap->shadow_lock);
2288 &gmap->children, list)
2290 spin_unlock(&gmap->shadow_lock);
2293 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2299 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2303 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2307 * gmap_pmdp_xchg - exchange a gmap pmd with another
2308 * @gmap: pointer to the guest address space structure
2316 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2320 pmdp_notify_gmap(gmap, pmdp, gaddr);
2323 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2336 struct gmap *gmap;
2340 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2341 spin_lock(&gmap->guest_table_lock);
2342 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2346 pmdp_notify_gmap(gmap, pmdp, gaddr);
2353 spin_unlock(&gmap->guest_table_lock);
2389 struct gmap *gmap;
2393 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2394 spin_lock(&gmap->guest_table_lock);
2395 entry = radix_tree_delete(&gmap->host_to_guest,
2400 pmdp_notify_gmap(gmap, pmdp, gaddr);
2405 gmap->asce, IDTE_LOCAL);
2410 spin_unlock(&gmap->guest_table_lock);
2424 struct gmap *gmap;
2428 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2429 spin_lock(&gmap->guest_table_lock);
2430 entry = radix_tree_delete(&gmap->host_to_guest,
2435 pmdp_notify_gmap(gmap, pmdp, gaddr);
2440 gmap->asce, IDTE_GLOBAL);
2447 spin_unlock(&gmap->guest_table_lock);
2455 * @gmap: pointer to guest address space
2462 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2475 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2481 * @gmap: pointer to guest address space
2489 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2497 pmdp = gmap_pmd_op_walk(gmap, gaddr);
2502 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2506 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2509 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2514 gmap_pmd_op_end(gmap, pmdp);
2805 * list of page tables of the gmap.
2806 * @gmap: the gmap whose table is to be removed
2809 * gmap (the CRST list). This list is used at tear down time to free all
2820 void s390_unlist_old_asce(struct gmap *gmap)
2824 old = virt_to_page(gmap->table);
2825 spin_lock(&gmap->guest_table_lock);
2842 spin_unlock(&gmap->guest_table_lock);
2847 * s390_replace_asce - Try to replace the current ASCE of a gmap with a copy
2848 * @gmap: the gmap whose ASCE needs to be replaced
2855 * In any case, the old ASCE is always removed from the gmap CRST list.
2859 int s390_replace_asce(struct gmap *gmap)
2865 s390_unlist_old_asce(gmap);
2868 if ((gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
2876 memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT));
2883 spin_lock(&gmap->guest_table_lock);
2884 list_add(&page->lru, &gmap->crst_list);
2885 spin_unlock(&gmap->guest_table_lock);
2888 asce = (gmap->asce & ~_ASCE_ORIGIN) | __pa(table);
2889 WRITE_ONCE(gmap->asce, asce);
2890 WRITE_ONCE(gmap->mm->context.gmap_asce, asce);
2891 WRITE_ONCE(gmap->table, table);