Lines Matching refs:it

19 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
23 kunmap(it->page);
25 kunmap_atomic(it->kaddr);
27 unlock_page(it->page);
28 put_page(it->page);
31 static inline void xattr_iter_end_final(struct xattr_iter *it)
33 if (!it->page)
36 xattr_iter_end(it, true);
42 struct xattr_iter it;
70 * 1) it is not enough to contain erofs_xattr_ibody_header then
71 * ->xattr_isize should be 0 (it means no xattr);
72 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
95 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
96 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
98 it.page = erofs_get_meta_page(sb, it.blkaddr);
99 if (IS_ERR(it.page)) {
100 ret = PTR_ERR(it.page);
105 it.kaddr = kmap(it.page);
108 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
114 xattr_iter_end(&it, atomic_map);
120 it.ofs += sizeof(struct erofs_xattr_ibody_header);
123 if (it.ofs >= EROFS_BLKSIZ) {
125 DBG_BUGON(it.ofs != EROFS_BLKSIZ);
126 xattr_iter_end(&it, atomic_map);
128 it.page = erofs_get_meta_page(sb, ++it.blkaddr);
129 if (IS_ERR(it.page)) {
132 ret = PTR_ERR(it.page);
136 it.kaddr = kmap_atomic(it.page);
138 it.ofs = 0;
141 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
142 it.ofs += sizeof(__le32);
144 xattr_iter_end(&it, atomic_map);
171 static inline int xattr_iter_fixup(struct xattr_iter *it)
173 if (it->ofs < EROFS_BLKSIZ)
176 xattr_iter_end(it, true);
178 it->blkaddr += erofs_blknr(it->ofs);
180 it->page = erofs_get_meta_page(it->sb, it->blkaddr);
181 if (IS_ERR(it->page)) {
182 int err = PTR_ERR(it->page);
184 it->page = NULL;
188 it->kaddr = kmap_atomic(it->page);
189 it->ofs = erofs_blkoff(it->ofs);
193 static int inline_xattr_iter_begin(struct xattr_iter *it,
208 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
209 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
211 it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
212 if (IS_ERR(it->page))
213 return PTR_ERR(it->page);
215 it->kaddr = kmap_atomic(it->page);
223 static int xattr_foreach(struct xattr_iter *it,
232 err = xattr_iter_fixup(it);
241 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
253 it->ofs += sizeof(struct erofs_xattr_entry);
257 err = op->entry(it, &entry);
259 it->ofs += entry.e_name_len + value_sz;
267 if (it->ofs >= EROFS_BLKSIZ) {
268 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
270 err = xattr_iter_fixup(it);
273 it->ofs = 0;
276 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
280 err = op->name(it, processed, it->kaddr + it->ofs, slice);
282 it->ofs += entry.e_name_len - processed + value_sz;
286 it->ofs += slice;
294 err = op->alloc_buffer(it, value_sz);
296 it->ofs += value_sz;
302 if (it->ofs >= EROFS_BLKSIZ) {
303 DBG_BUGON(it->ofs > EROFS_BLKSIZ);
305 err = xattr_iter_fixup(it);
308 it->ofs = 0;
311 slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
313 op->value(it, processed, it->kaddr + it->ofs, slice);
314 it->ofs += slice;
320 it->ofs = EROFS_XATTR_ALIGN(it->ofs);
325 struct xattr_iter it;
335 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
337 return (it->index != entry->e_name_index ||
338 it->name.len != entry->e_name_len) ? -ENOATTR : 0;
344 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
346 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
352 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
353 int err = it->buffer_size < value_sz ? -ERANGE : 0;
355 it->buffer_size = value_sz;
356 return !it->buffer ? 1 : err;
363 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
365 memcpy(it->buffer + processed, buf, len);
375 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
380 ret = inline_xattr_iter_begin(&it->it, inode);
386 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
390 xattr_iter_end_final(&it->it);
392 return ret ? ret : it->buffer_size;
395 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
407 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
409 if (!i || blkaddr != it->it.blkaddr) {
411 xattr_iter_end(&it->it, true);
413 it->it.page = erofs_get_meta_page(sb, blkaddr);
414 if (IS_ERR(it->it.page))
415 return PTR_ERR(it->it.page);
417 it->it.kaddr = kmap_atomic(it->it.page);
418 it->it.blkaddr = blkaddr;
421 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
426 xattr_iter_end_final(&it->it);
428 return ret ? ret : it->buffer_size;
446 struct getxattr_iter it;
455 it.index = index;
457 it.name.len = strlen(name);
458 if (it.name.len > EROFS_NAME_LEN)
460 it.name.name = name;
462 it.buffer = buffer;
463 it.buffer_size = buffer_size;
465 it.it.sb = inode->i_sb;
466 ret = inline_getxattr(inode, &it);
468 ret = shared_getxattr(inode, &it);
530 struct xattr_iter it;
540 struct listxattr_iter *it =
541 container_of(_it, struct listxattr_iter, it);
548 if (!h || (h->list && !h->list(it->dentry)))
554 if (!it->buffer) {
555 it->buffer_ofs += prefix_len + entry->e_name_len + 1;
559 if (it->buffer_ofs + prefix_len
560 + entry->e_name_len + 1 > it->buffer_size)
563 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
564 it->buffer_ofs += prefix_len;
571 struct listxattr_iter *it =
572 container_of(_it, struct listxattr_iter, it);
574 memcpy(it->buffer + it->buffer_ofs, buf, len);
575 it->buffer_ofs += len;
582 struct listxattr_iter *it =
583 container_of(_it, struct listxattr_iter, it);
585 it->buffer[it->buffer_ofs++] = '\0';
596 static int inline_listxattr(struct listxattr_iter *it)
601 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
607 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
611 xattr_iter_end_final(&it->it);
612 return ret ? ret : it->buffer_ofs;
615 static int shared_listxattr(struct listxattr_iter *it)
617 struct inode *const inode = d_inode(it->dentry);
628 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
629 if (!i || blkaddr != it->it.blkaddr) {
631 xattr_iter_end(&it->it, true);
633 it->it.page = erofs_get_meta_page(sb, blkaddr);
634 if (IS_ERR(it->it.page))
635 return PTR_ERR(it->it.page);
637 it->it.kaddr = kmap_atomic(it->it.page);
638 it->it.blkaddr = blkaddr;
641 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
646 xattr_iter_end_final(&it->it);
648 return ret ? ret : it->buffer_ofs;
655 struct listxattr_iter it;
663 it.dentry = dentry;
664 it.buffer = buffer;
665 it.buffer_size = buffer_size;
666 it.buffer_ofs = 0;
668 it.it.sb = dentry->d_sb;
670 ret = inline_listxattr(&it);
673 return shared_listxattr(&it);