xref: /kernel/linux/linux-5.10/fs/erofs/xattr.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 *             https://www.huawei.com/
5 * Created by Gao Xiang <gaoxiang25@huawei.com>
6 */
7#include <linux/security.h>
8#include "xattr.h"
9
10struct xattr_iter {
11	struct super_block *sb;
12	struct page *page;
13	void *kaddr;
14
15	erofs_blk_t blkaddr;
16	unsigned int ofs;
17};
18
19static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
20{
21	/* the only user of kunmap() is 'init_inode_xattrs' */
22	if (!atomic)
23		kunmap(it->page);
24	else
25		kunmap_atomic(it->kaddr);
26
27	unlock_page(it->page);
28	put_page(it->page);
29}
30
31static inline void xattr_iter_end_final(struct xattr_iter *it)
32{
33	if (!it->page)
34		return;
35
36	xattr_iter_end(it, true);
37}
38
39static int init_inode_xattrs(struct inode *inode)
40{
41	struct erofs_inode *const vi = EROFS_I(inode);
42	struct xattr_iter it;
43	unsigned int i;
44	struct erofs_xattr_ibody_header *ih;
45	struct super_block *sb;
46	struct erofs_sb_info *sbi;
47	bool atomic_map;
48	int ret = 0;
49
50	/* the most case is that xattrs of this inode are initialized. */
51	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
52		/*
53		 * paired with smp_mb() at the end of the function to ensure
54		 * fields will only be observed after the bit is set.
55		 */
56		smp_mb();
57		return 0;
58	}
59
60	if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
61		return -ERESTARTSYS;
62
63	/* someone has initialized xattrs for us? */
64	if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
65		goto out_unlock;
66
67	/*
68	 * bypass all xattr operations if ->xattr_isize is not greater than
69	 * sizeof(struct erofs_xattr_ibody_header), in detail:
70	 * 1) it is not enough to contain erofs_xattr_ibody_header then
71	 *    ->xattr_isize should be 0 (it means no xattr);
72	 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
73	 *    undefined right now (maybe use later with some new sb feature).
74	 */
75	if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
76		erofs_err(inode->i_sb,
77			  "xattr_isize %d of nid %llu is not supported yet",
78			  vi->xattr_isize, vi->nid);
79		ret = -EOPNOTSUPP;
80		goto out_unlock;
81	} else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
82		if (vi->xattr_isize) {
83			erofs_err(inode->i_sb,
84				  "bogus xattr ibody @ nid %llu", vi->nid);
85			DBG_BUGON(1);
86			ret = -EFSCORRUPTED;
87			goto out_unlock;	/* xattr ondisk layout error */
88		}
89		ret = -ENOATTR;
90		goto out_unlock;
91	}
92
93	sb = inode->i_sb;
94	sbi = EROFS_SB(sb);
95	it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
96	it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
97
98	it.page = erofs_get_meta_page(sb, it.blkaddr);
99	if (IS_ERR(it.page)) {
100		ret = PTR_ERR(it.page);
101		goto out_unlock;
102	}
103
104	/* read in shared xattr array (non-atomic, see kmalloc below) */
105	it.kaddr = kmap(it.page);
106	atomic_map = false;
107
108	ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
109
110	vi->xattr_shared_count = ih->h_shared_count;
111	vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
112						sizeof(uint), GFP_KERNEL);
113	if (!vi->xattr_shared_xattrs) {
114		xattr_iter_end(&it, atomic_map);
115		ret = -ENOMEM;
116		goto out_unlock;
117	}
118
119	/* let's skip ibody header */
120	it.ofs += sizeof(struct erofs_xattr_ibody_header);
121
122	for (i = 0; i < vi->xattr_shared_count; ++i) {
123		if (it.ofs >= EROFS_BLKSIZ) {
124			/* cannot be unaligned */
125			DBG_BUGON(it.ofs != EROFS_BLKSIZ);
126			xattr_iter_end(&it, atomic_map);
127
128			it.page = erofs_get_meta_page(sb, ++it.blkaddr);
129			if (IS_ERR(it.page)) {
130				kfree(vi->xattr_shared_xattrs);
131				vi->xattr_shared_xattrs = NULL;
132				ret = PTR_ERR(it.page);
133				goto out_unlock;
134			}
135
136			it.kaddr = kmap_atomic(it.page);
137			atomic_map = true;
138			it.ofs = 0;
139		}
140		vi->xattr_shared_xattrs[i] =
141			le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
142		it.ofs += sizeof(__le32);
143	}
144	xattr_iter_end(&it, atomic_map);
145
146	/* paired with smp_mb() at the beginning of the function. */
147	smp_mb();
148	set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
149
150out_unlock:
151	clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
152	return ret;
153}
154
155/*
156 * the general idea for these return values is
157 * if    0 is returned, go on processing the current xattr;
158 *       1 (> 0) is returned, skip this round to process the next xattr;
159 *    -err (< 0) is returned, an error (maybe ENOXATTR) occurred
160 *                            and need to be handled
161 */
162struct xattr_iter_handlers {
163	int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
164	int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
165		    unsigned int len);
166	int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
167	void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
168		      unsigned int len);
169};
170
171static inline int xattr_iter_fixup(struct xattr_iter *it)
172{
173	if (it->ofs < EROFS_BLKSIZ)
174		return 0;
175
176	xattr_iter_end(it, true);
177
178	it->blkaddr += erofs_blknr(it->ofs);
179
180	it->page = erofs_get_meta_page(it->sb, it->blkaddr);
181	if (IS_ERR(it->page)) {
182		int err = PTR_ERR(it->page);
183
184		it->page = NULL;
185		return err;
186	}
187
188	it->kaddr = kmap_atomic(it->page);
189	it->ofs = erofs_blkoff(it->ofs);
190	return 0;
191}
192
193static int inline_xattr_iter_begin(struct xattr_iter *it,
194				   struct inode *inode)
195{
196	struct erofs_inode *const vi = EROFS_I(inode);
197	struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
198	unsigned int xattr_header_sz, inline_xattr_ofs;
199
200	xattr_header_sz = inlinexattr_header_size(inode);
201	if (xattr_header_sz >= vi->xattr_isize) {
202		DBG_BUGON(xattr_header_sz > vi->xattr_isize);
203		return -ENOATTR;
204	}
205
206	inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
207
208	it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
209	it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
210
211	it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
212	if (IS_ERR(it->page))
213		return PTR_ERR(it->page);
214
215	it->kaddr = kmap_atomic(it->page);
216	return vi->xattr_isize - xattr_header_sz;
217}
218
219/*
220 * Regardless of success or failure, `xattr_foreach' will end up with
221 * `ofs' pointing to the next xattr item rather than an arbitrary position.
222 */
223static int xattr_foreach(struct xattr_iter *it,
224			 const struct xattr_iter_handlers *op,
225			 unsigned int *tlimit)
226{
227	struct erofs_xattr_entry entry;
228	unsigned int value_sz, processed, slice;
229	int err;
230
231	/* 0. fixup blkaddr, ofs, ipage */
232	err = xattr_iter_fixup(it);
233	if (err)
234		return err;
235
236	/*
237	 * 1. read xattr entry to the memory,
238	 *    since we do EROFS_XATTR_ALIGN
239	 *    therefore entry should be in the page
240	 */
241	entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
242	if (tlimit) {
243		unsigned int entry_sz = erofs_xattr_entry_size(&entry);
244
245		/* xattr on-disk corruption: xattr entry beyond xattr_isize */
246		if (*tlimit < entry_sz) {
247			DBG_BUGON(1);
248			return -EFSCORRUPTED;
249		}
250		*tlimit -= entry_sz;
251	}
252
253	it->ofs += sizeof(struct erofs_xattr_entry);
254	value_sz = le16_to_cpu(entry.e_value_size);
255
256	/* handle entry */
257	err = op->entry(it, &entry);
258	if (err) {
259		it->ofs += entry.e_name_len + value_sz;
260		goto out;
261	}
262
263	/* 2. handle xattr name (ofs will finally be at the end of name) */
264	processed = 0;
265
266	while (processed < entry.e_name_len) {
267		if (it->ofs >= EROFS_BLKSIZ) {
268			DBG_BUGON(it->ofs > EROFS_BLKSIZ);
269
270			err = xattr_iter_fixup(it);
271			if (err)
272				goto out;
273			it->ofs = 0;
274		}
275
276		slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
277			      entry.e_name_len - processed);
278
279		/* handle name */
280		err = op->name(it, processed, it->kaddr + it->ofs, slice);
281		if (err) {
282			it->ofs += entry.e_name_len - processed + value_sz;
283			goto out;
284		}
285
286		it->ofs += slice;
287		processed += slice;
288	}
289
290	/* 3. handle xattr value */
291	processed = 0;
292
293	if (op->alloc_buffer) {
294		err = op->alloc_buffer(it, value_sz);
295		if (err) {
296			it->ofs += value_sz;
297			goto out;
298		}
299	}
300
301	while (processed < value_sz) {
302		if (it->ofs >= EROFS_BLKSIZ) {
303			DBG_BUGON(it->ofs > EROFS_BLKSIZ);
304
305			err = xattr_iter_fixup(it);
306			if (err)
307				goto out;
308			it->ofs = 0;
309		}
310
311		slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
312			      value_sz - processed);
313		op->value(it, processed, it->kaddr + it->ofs, slice);
314		it->ofs += slice;
315		processed += slice;
316	}
317
318out:
319	/* xattrs should be 4-byte aligned (on-disk constraint) */
320	it->ofs = EROFS_XATTR_ALIGN(it->ofs);
321	return err < 0 ? err : 0;
322}
323
324struct getxattr_iter {
325	struct xattr_iter it;
326
327	char *buffer;
328	int buffer_size, index;
329	struct qstr name;
330};
331
332static int xattr_entrymatch(struct xattr_iter *_it,
333			    struct erofs_xattr_entry *entry)
334{
335	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
336
337	return (it->index != entry->e_name_index ||
338		it->name.len != entry->e_name_len) ? -ENOATTR : 0;
339}
340
341static int xattr_namematch(struct xattr_iter *_it,
342			   unsigned int processed, char *buf, unsigned int len)
343{
344	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
345
346	return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
347}
348
349static int xattr_checkbuffer(struct xattr_iter *_it,
350			     unsigned int value_sz)
351{
352	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
353	int err = it->buffer_size < value_sz ? -ERANGE : 0;
354
355	it->buffer_size = value_sz;
356	return !it->buffer ? 1 : err;
357}
358
359static void xattr_copyvalue(struct xattr_iter *_it,
360			    unsigned int processed,
361			    char *buf, unsigned int len)
362{
363	struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
364
365	memcpy(it->buffer + processed, buf, len);
366}
367
368static const struct xattr_iter_handlers find_xattr_handlers = {
369	.entry = xattr_entrymatch,
370	.name = xattr_namematch,
371	.alloc_buffer = xattr_checkbuffer,
372	.value = xattr_copyvalue
373};
374
375static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
376{
377	int ret;
378	unsigned int remaining;
379
380	ret = inline_xattr_iter_begin(&it->it, inode);
381	if (ret < 0)
382		return ret;
383
384	remaining = ret;
385	while (remaining) {
386		ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
387		if (ret != -ENOATTR)
388			break;
389	}
390	xattr_iter_end_final(&it->it);
391
392	return ret ? ret : it->buffer_size;
393}
394
395static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
396{
397	struct erofs_inode *const vi = EROFS_I(inode);
398	struct super_block *const sb = inode->i_sb;
399	struct erofs_sb_info *const sbi = EROFS_SB(sb);
400	unsigned int i;
401	int ret = -ENOATTR;
402
403	for (i = 0; i < vi->xattr_shared_count; ++i) {
404		erofs_blk_t blkaddr =
405			xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
406
407		it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
408
409		if (!i || blkaddr != it->it.blkaddr) {
410			if (i)
411				xattr_iter_end(&it->it, true);
412
413			it->it.page = erofs_get_meta_page(sb, blkaddr);
414			if (IS_ERR(it->it.page))
415				return PTR_ERR(it->it.page);
416
417			it->it.kaddr = kmap_atomic(it->it.page);
418			it->it.blkaddr = blkaddr;
419		}
420
421		ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
422		if (ret != -ENOATTR)
423			break;
424	}
425	if (vi->xattr_shared_count)
426		xattr_iter_end_final(&it->it);
427
428	return ret ? ret : it->buffer_size;
429}
430
431static bool erofs_xattr_user_list(struct dentry *dentry)
432{
433	return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER);
434}
435
436static bool erofs_xattr_trusted_list(struct dentry *dentry)
437{
438	return capable(CAP_SYS_ADMIN);
439}
440
441int erofs_getxattr(struct inode *inode, int index,
442		   const char *name,
443		   void *buffer, size_t buffer_size)
444{
445	int ret;
446	struct getxattr_iter it;
447
448	if (!name)
449		return -EINVAL;
450
451	ret = init_inode_xattrs(inode);
452	if (ret)
453		return ret;
454
455	it.index = index;
456
457	it.name.len = strlen(name);
458	if (it.name.len > EROFS_NAME_LEN)
459		return -ERANGE;
460	it.name.name = name;
461
462	it.buffer = buffer;
463	it.buffer_size = buffer_size;
464
465	it.it.sb = inode->i_sb;
466	ret = inline_getxattr(inode, &it);
467	if (ret == -ENOATTR)
468		ret = shared_getxattr(inode, &it);
469	return ret;
470}
471
472static int erofs_xattr_generic_get(const struct xattr_handler *handler,
473				   struct dentry *unused, struct inode *inode,
474				   const char *name, void *buffer, size_t size)
475{
476	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
477
478	switch (handler->flags) {
479	case EROFS_XATTR_INDEX_USER:
480		if (!test_opt(&sbi->ctx, XATTR_USER))
481			return -EOPNOTSUPP;
482		break;
483	case EROFS_XATTR_INDEX_TRUSTED:
484		break;
485	case EROFS_XATTR_INDEX_SECURITY:
486		break;
487	default:
488		return -EINVAL;
489	}
490
491	return erofs_getxattr(inode, handler->flags, name, buffer, size);
492}
493
494const struct xattr_handler erofs_xattr_user_handler = {
495	.prefix	= XATTR_USER_PREFIX,
496	.flags	= EROFS_XATTR_INDEX_USER,
497	.list	= erofs_xattr_user_list,
498	.get	= erofs_xattr_generic_get,
499};
500
501const struct xattr_handler erofs_xattr_trusted_handler = {
502	.prefix	= XATTR_TRUSTED_PREFIX,
503	.flags	= EROFS_XATTR_INDEX_TRUSTED,
504	.list	= erofs_xattr_trusted_list,
505	.get	= erofs_xattr_generic_get,
506};
507
508#ifdef CONFIG_EROFS_FS_SECURITY
509const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
510	.prefix	= XATTR_SECURITY_PREFIX,
511	.flags	= EROFS_XATTR_INDEX_SECURITY,
512	.get	= erofs_xattr_generic_get,
513};
514#endif
515
516const struct xattr_handler *erofs_xattr_handlers[] = {
517	&erofs_xattr_user_handler,
518#ifdef CONFIG_EROFS_FS_POSIX_ACL
519	&posix_acl_access_xattr_handler,
520	&posix_acl_default_xattr_handler,
521#endif
522	&erofs_xattr_trusted_handler,
523#ifdef CONFIG_EROFS_FS_SECURITY
524	&erofs_xattr_security_handler,
525#endif
526	NULL,
527};
528
529struct listxattr_iter {
530	struct xattr_iter it;
531
532	struct dentry *dentry;
533	char *buffer;
534	int buffer_size, buffer_ofs;
535};
536
537static int xattr_entrylist(struct xattr_iter *_it,
538			   struct erofs_xattr_entry *entry)
539{
540	struct listxattr_iter *it =
541		container_of(_it, struct listxattr_iter, it);
542	unsigned int prefix_len;
543	const char *prefix;
544
545	const struct xattr_handler *h =
546		erofs_xattr_handler(entry->e_name_index);
547
548	if (!h || (h->list && !h->list(it->dentry)))
549		return 1;
550
551	prefix = xattr_prefix(h);
552	prefix_len = strlen(prefix);
553
554	if (!it->buffer) {
555		it->buffer_ofs += prefix_len + entry->e_name_len + 1;
556		return 1;
557	}
558
559	if (it->buffer_ofs + prefix_len
560		+ entry->e_name_len + 1 > it->buffer_size)
561		return -ERANGE;
562
563	memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
564	it->buffer_ofs += prefix_len;
565	return 0;
566}
567
568static int xattr_namelist(struct xattr_iter *_it,
569			  unsigned int processed, char *buf, unsigned int len)
570{
571	struct listxattr_iter *it =
572		container_of(_it, struct listxattr_iter, it);
573
574	memcpy(it->buffer + it->buffer_ofs, buf, len);
575	it->buffer_ofs += len;
576	return 0;
577}
578
579static int xattr_skipvalue(struct xattr_iter *_it,
580			   unsigned int value_sz)
581{
582	struct listxattr_iter *it =
583		container_of(_it, struct listxattr_iter, it);
584
585	it->buffer[it->buffer_ofs++] = '\0';
586	return 1;
587}
588
589static const struct xattr_iter_handlers list_xattr_handlers = {
590	.entry = xattr_entrylist,
591	.name = xattr_namelist,
592	.alloc_buffer = xattr_skipvalue,
593	.value = NULL
594};
595
596static int inline_listxattr(struct listxattr_iter *it)
597{
598	int ret;
599	unsigned int remaining;
600
601	ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
602	if (ret < 0)
603		return ret;
604
605	remaining = ret;
606	while (remaining) {
607		ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
608		if (ret)
609			break;
610	}
611	xattr_iter_end_final(&it->it);
612	return ret ? ret : it->buffer_ofs;
613}
614
615static int shared_listxattr(struct listxattr_iter *it)
616{
617	struct inode *const inode = d_inode(it->dentry);
618	struct erofs_inode *const vi = EROFS_I(inode);
619	struct super_block *const sb = inode->i_sb;
620	struct erofs_sb_info *const sbi = EROFS_SB(sb);
621	unsigned int i;
622	int ret = 0;
623
624	for (i = 0; i < vi->xattr_shared_count; ++i) {
625		erofs_blk_t blkaddr =
626			xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
627
628		it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
629		if (!i || blkaddr != it->it.blkaddr) {
630			if (i)
631				xattr_iter_end(&it->it, true);
632
633			it->it.page = erofs_get_meta_page(sb, blkaddr);
634			if (IS_ERR(it->it.page))
635				return PTR_ERR(it->it.page);
636
637			it->it.kaddr = kmap_atomic(it->it.page);
638			it->it.blkaddr = blkaddr;
639		}
640
641		ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
642		if (ret)
643			break;
644	}
645	if (vi->xattr_shared_count)
646		xattr_iter_end_final(&it->it);
647
648	return ret ? ret : it->buffer_ofs;
649}
650
651ssize_t erofs_listxattr(struct dentry *dentry,
652			char *buffer, size_t buffer_size)
653{
654	int ret;
655	struct listxattr_iter it;
656
657	ret = init_inode_xattrs(d_inode(dentry));
658	if (ret == -ENOATTR)
659		return 0;
660	if (ret)
661		return ret;
662
663	it.dentry = dentry;
664	it.buffer = buffer;
665	it.buffer_size = buffer_size;
666	it.buffer_ofs = 0;
667
668	it.it.sb = dentry->d_sb;
669
670	ret = inline_listxattr(&it);
671	if (ret < 0 && ret != -ENOATTR)
672		return ret;
673	return shared_listxattr(&it);
674}
675
676#ifdef CONFIG_EROFS_FS_POSIX_ACL
677struct posix_acl *erofs_get_acl(struct inode *inode, int type)
678{
679	struct posix_acl *acl;
680	int prefix, rc;
681	char *value = NULL;
682
683	switch (type) {
684	case ACL_TYPE_ACCESS:
685		prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
686		break;
687	case ACL_TYPE_DEFAULT:
688		prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
689		break;
690	default:
691		return ERR_PTR(-EINVAL);
692	}
693
694	rc = erofs_getxattr(inode, prefix, "", NULL, 0);
695	if (rc > 0) {
696		value = kmalloc(rc, GFP_KERNEL);
697		if (!value)
698			return ERR_PTR(-ENOMEM);
699		rc = erofs_getxattr(inode, prefix, "", value, rc);
700	}
701
702	if (rc == -ENOATTR)
703		acl = NULL;
704	else if (rc < 0)
705		acl = ERR_PTR(rc);
706	else
707		acl = posix_acl_from_xattr(&init_user_ns, value, rc);
708	kfree(value);
709	return acl;
710}
711#endif
712
713