Lines Matching refs:meta

250 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
252 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
258 if (KFENCE_WARN_ON(meta < kfence_metadata ||
259 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
266 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
277 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
281 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
283 lockdep_assert_held(&meta->lock);
305 WRITE_ONCE(meta->state, next);
311 struct kfence_metadata *meta;
319 meta = addr_to_metadata((unsigned long)addr);
320 raw_spin_lock_irqsave(&meta->lock, flags);
321 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
322 raw_spin_unlock_irqrestore(&meta->lock, flags);
327 static inline void set_canary(const struct kfence_metadata *meta)
329 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
336 for (; addr < meta->addr; addr += sizeof(u64))
339 addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
344 static inline void check_canary(const struct kfence_metadata *meta)
346 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
359 for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
369 for (; addr < meta->addr; addr++) {
375 for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
394 struct kfence_metadata *meta = NULL;
405 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
406 list_del_init(&meta->list);
409 if (!meta) {
414 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
417 * use-after-free, which locked meta->lock, and the reporting
426 list_add_tail(&meta->list, &kfence_freelist);
432 meta->addr = metadata_to_pageaddr(meta);
434 if (meta->state == KFENCE_OBJECT_FREED)
435 kfence_unprotect(meta->addr);
447 meta->addr += PAGE_SIZE - size;
448 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
451 addr = (void *)meta->addr;
454 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
456 WRITE_ONCE(meta->cache, cache);
457 meta->size = size;
458 meta->alloc_stack_hash = alloc_stack_hash;
459 raw_spin_unlock_irqrestore(&meta->lock, flags);
464 slab = virt_to_slab((void *)meta->addr);
473 set_canary(meta);
486 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
494 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
500 raw_spin_lock_irqsave(&meta->lock, flags);
502 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
505 kfence_report_error((unsigned long)addr, false, NULL, meta,
507 raw_spin_unlock_irqrestore(&meta->lock, flags);
520 if (meta->unprotected_page) {
521 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
522 kfence_protect(meta->unprotected_page);
523 meta->unprotected_page = 0;
527 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
528 init = slab_want_init_on_free(meta->cache);
529 raw_spin_unlock_irqrestore(&meta->lock, flags);
531 alloc_covered_add(meta->alloc_stack_hash, -1);
534 check_canary(meta);
542 memzero_explicit(addr, meta->size);
551 KFENCE_WARN_ON(!list_empty(&meta->list));
552 list_add_tail(&meta->list, &kfence_freelist);
565 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
567 kfence_guarded_free((void *)meta->addr, meta, false);
622 struct kfence_metadata *meta = &kfence_metadata_init[i];
625 INIT_LIST_HEAD(&meta->list);
626 raw_spin_lock_init(&meta->lock);
627 meta->state = KFENCE_OBJECT_UNUSED;
628 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
629 list_add_tail(&meta->list, &kfence_freelist);
737 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
740 raw_spin_lock_irqsave(&meta->lock, flags);
741 kfence_print_object(seq, meta);
742 raw_spin_unlock_irqrestore(&meta->lock, flags);
778 struct kfence_metadata *meta = &kfence_metadata[i];
780 if (meta->state == KFENCE_OBJECT_ALLOCATED)
781 check_canary(meta);
987 struct kfence_metadata *meta;
997 meta = &kfence_metadata[i];
1006 if (READ_ONCE(meta->cache) != s ||
1007 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
1010 raw_spin_lock_irqsave(&meta->lock, flags);
1011 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
1012 raw_spin_unlock_irqrestore(&meta->lock, flags);
1029 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1034 meta = &kfence_metadata[i];
1037 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1040 raw_spin_lock_irqsave(&meta->lock, flags);
1041 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1042 meta->cache = NULL;
1043 raw_spin_unlock_irqrestore(&meta->lock, flags);
1122 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1128 return meta ? meta->size : 0;
1133 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1139 return meta ? (void *)meta->addr : NULL;
1144 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1147 KFENCE_WARN_ON(meta->objcg);
1152 * objects once it has been freed. meta->cache may be NULL if the cache
1155 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1156 call_rcu(&meta->rcu_head, rcu_guarded_free);
1158 kfence_guarded_free(addr, meta, false);
1178 struct kfence_metadata *meta;
1181 meta = addr_to_metadata(addr - PAGE_SIZE);
1182 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1183 to_report = meta;
1185 distance = addr - data_race(meta->addr + meta->size);
1188 meta = addr_to_metadata(addr + PAGE_SIZE);
1189 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1191 if (!to_report || distance > data_race(meta->addr) - addr)
1192 to_report = meta;