Lines Matching defs:handle

22  *		to store handle.
80 * a single (unsigned long) handle value.
105 * header keeps handle which is 4byte-aligned address so we
129 /* each chunk includes extra space to keep handle */
216 unsigned long handle;
328 static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
330 kmem_cache_free(pool->handle_cachep, (void *)handle);
344 /* pool->lock(which owns the handle) synchronizes races */
345 static void record_obj(unsigned long handle, unsigned long obj)
347 *(unsigned long *)handle = obj;
370 unsigned long *handle)
372 *handle = zs_malloc(pool, size, gfp);
374 if (IS_ERR_VALUE(*handle))
375 return PTR_ERR((void *)*handle);
378 static void zs_zpool_free(void *pool, unsigned long handle)
380 zs_free(pool, handle);
383 static void *zs_zpool_map(void *pool, unsigned long handle,
401 return zs_map_object(pool, handle, zs_mm);
403 static void zs_zpool_unmap(void *pool, unsigned long handle)
405 zs_unmap_object(pool, handle);
793 static unsigned long handle_to_obj(unsigned long handle)
795 return *(unsigned long *)handle;
801 unsigned long handle;
806 handle = page->index;
808 handle = *(unsigned long *)obj;
810 if (!(handle & OBJ_ALLOCATED_TAG))
813 /* Clear all tags before returning the handle */
814 *phandle = handle & ~OBJ_TAG_MASK;
1178 * zs_map_object - get address of allocated object from handle.
1180 * @handle: handle returned from zs_malloc
1192 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
1212 /* It guarantees it can get zspage from handle safely */
1214 obj = handle_to_obj(handle);
1254 void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1264 obj = handle_to_obj(handle);
1308 struct zspage *zspage, unsigned long handle)
1320 handle |= OBJ_ALLOCATED_TAG;
1335 /* record handle in the header of allocated chunk */
1336 link->handle = handle;
1338 /* record handle to page->index */
1339 zspage->first_page->index = handle;
1356 * On success, handle to the allocated object is returned,
1362 unsigned long handle, obj;
1370 handle = cache_alloc_handle(pool, gfp);
1371 if (!handle)
1374 /* extra space in chunk to keep the handle */
1382 obj = obj_malloc(pool, zspage, handle);
1385 record_obj(handle, obj);
1395 cache_free_handle(pool, handle);
1400 obj = obj_malloc(pool, zspage, handle);
1404 record_obj(handle, obj);
1414 return handle;
1445 void zs_free(struct zs_pool *pool, unsigned long handle)
1453 if (IS_ERR_OR_NULL((void *)handle))
1458 * so it's safe to get the page from handle.
1461 obj = handle_to_obj(handle);
1474 cache_free_handle(pool, handle);
1550 * return handle.
1557 unsigned long handle = 0;
1564 if (obj_allocated(page, addr + offset, &handle))
1575 return handle;
1582 unsigned long handle;
1588 handle = find_alloced_obj(class, s_page, &obj_idx);
1589 if (!handle) {
1597 used_obj = handle_to_obj(handle);
1598 free_obj = obj_malloc(pool, dst_zspage, handle);
1601 record_obj(handle, free_obj);
1807 unsigned long handle;
1847 if (obj_allocated(page, addr, &handle)) {
1849 old_obj = handle_to_obj(handle);
1853 record_obj(handle, new_obj);
2218 * handle. We need to subtract it, because zs_malloc()
2219 * unconditionally adds handle size before it performs