Home
last modified time | relevance | path

Searched refs:m_len (Results 1 - 25 of 41) sorted by relevance

12

/kernel/linux/linux-5.10/lib/lzo/
H A Dlzo1x_compress.c41 size_t t, m_len, m_off; in lzo1x_1_do_compress() local
164 m_len = 4; in lzo1x_1_do_compress()
168 v = get_unaligned((const u64 *) (ip + m_len)) ^ in lzo1x_1_do_compress()
169 get_unaligned((const u64 *) (m_pos + m_len)); in lzo1x_1_do_compress()
172 m_len += 8; in lzo1x_1_do_compress()
173 v = get_unaligned((const u64 *) (ip + m_len)) ^ in lzo1x_1_do_compress()
174 get_unaligned((const u64 *) (m_pos + m_len)); in lzo1x_1_do_compress()
175 if (unlikely(ip + m_len >= ip_end)) in lzo1x_1_do_compress()
180 m_len += (unsigned) __builtin_ctzll(v) / 8; in lzo1x_1_do_compress()
182 m_len in lzo1x_1_do_compress()
[all...]
/kernel/linux/linux-6.6/lib/lzo/
H A Dlzo1x_compress.c41 size_t t, m_len, m_off; in lzo1x_1_do_compress() local
162 m_len = 4; in lzo1x_1_do_compress()
166 v = get_unaligned((const u64 *) (ip + m_len)) ^ in lzo1x_1_do_compress()
167 get_unaligned((const u64 *) (m_pos + m_len)); in lzo1x_1_do_compress()
170 m_len += 8; in lzo1x_1_do_compress()
171 v = get_unaligned((const u64 *) (ip + m_len)) ^ in lzo1x_1_do_compress()
172 get_unaligned((const u64 *) (m_pos + m_len)); in lzo1x_1_do_compress()
173 if (unlikely(ip + m_len >= ip_end)) in lzo1x_1_do_compress()
178 m_len += (unsigned) __builtin_ctzll(v) / 8; in lzo1x_1_do_compress()
180 m_len in lzo1x_1_do_compress()
[all...]
/kernel/linux/linux-5.10/fs/ext4/
H A Dreadpage.c247 map.m_len = 0; in ext4_mpage_readpages()
276 block_in_file < (map.m_lblk + map.m_len)) { in ext4_mpage_readpages()
278 unsigned last = map.m_len - map_offset; in ext4_mpage_readpages()
302 map.m_len = last_block - block_in_file; in ext4_mpage_readpages()
328 if (relative_block == map.m_len) { in ext4_mpage_readpages()
391 (relative_block == map.m_len)) || in ext4_mpage_readpages()
H A Dextents.c3344 int allocated = map->m_len; in ext4_split_extent()
3352 if (map->m_lblk + map->m_len < ee_block + ee_len) { in ext4_split_extent()
3361 map->m_lblk + map->m_len, split_flag1, flags1); in ext4_split_extent()
3415 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3420 * It is guaranteed to be >= map->m_len.
3435 unsigned int ee_len, depth, map_len = map->m_len; in ext4_ext_convert_to_initialized()
3604 split_map.m_len = map->m_len; in ext4_ext_convert_to_initialized()
3606 if (max_zeroout && (allocated > split_map.m_len)) { in ext4_ext_convert_to_initialized()
3611 split_map.m_len); in ext4_ext_convert_to_initialized()
[all...]
H A Dinode.c416 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { in __check_block_validity()
420 map->m_pblk, map->m_len); in __check_block_validity()
470 * We don't check m_len because extent will be collpased in status in ext4_map_blocks_es_recheck()
471 * tree. So the m_len might not equal. in ext4_map_blocks_es_recheck()
479 inode->i_ino, es_map->m_lblk, es_map->m_len, in ext4_map_blocks_es_recheck()
481 map->m_len, map->m_pblk, map->m_flags, in ext4_map_blocks_es_recheck()
504 * that case, @map is returned as unmapped but we still do fill map->m_len to
523 flags, map->m_len, (unsigned long) map->m_lblk); in ext4_map_blocks()
526 * ext4_map_blocks returns an int, and m_len is an unsigned int in ext4_map_blocks()
528 if (unlikely(map->m_len > INT_MA in ext4_map_blocks()
[all...]
H A Dfast_commit.c833 map.m_len = new_blk_size - cur_lblk_off + 1; in ext4_fc_write_inode_data()
838 if (map.m_len == 0) { in ext4_fc_write_inode_data()
846 lrange.fc_len = cpu_to_le32(map.m_len); in ext4_fc_write_inode_data()
855 map.m_len = min(max, map.m_len); in ext4_fc_write_inode_data()
860 ex->ee_len = cpu_to_le16(map.m_len); in ext4_fc_write_inode_data()
871 cur_lblk_off += map.m_len; in ext4_fc_write_inode_data()
1668 map.m_len = remaining; in ext4_fc_replay_add_range()
1684 newex.ee_len = cpu_to_le16(map.m_len); in ext4_fc_replay_add_range()
1704 ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, in ext4_fc_replay_add_range()
[all...]
H A Dindirect.c543 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); in ext4_ind_map_blocks()
559 while (count < map->m_len && count <= blocks_to_boundary) { in ext4_ind_map_blocks()
589 map->m_len = min_t(unsigned int, map->m_len, count); in ext4_ind_map_blocks()
628 map->m_len, blocks_to_boundary); in ext4_ind_map_blocks()
663 map->m_len = count; in ext4_ind_map_blocks()
H A Ddir.c166 map.m_len = 1; in ext4_readdir()
169 /* m_len should never be zero but let's avoid in ext4_readdir()
171 if (map.m_len == 0) in ext4_readdir()
172 map.m_len = 1; in ext4_readdir()
173 ctx->pos += map.m_len * sb->s_blocksize; in ext4_readdir()
H A Dblock_validity.c167 map.m_len = num - i; in ext4_protect_reserved_inode()
182 map.m_pblk + map.m_len - 1); in ext4_protect_reserved_inode()
H A Dfile.c200 map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); in ext4_overwrite_io()
201 blklen = map.m_len; in ext4_overwrite_io()
/kernel/linux/linux-6.6/fs/ext4/
H A Dreadpage.c241 map.m_len = 0; in ext4_mpage_readpages()
269 block_in_file < (map.m_lblk + map.m_len)) { in ext4_mpage_readpages()
271 unsigned last = map.m_len - map_offset; in ext4_mpage_readpages()
295 map.m_len = last_block - block_in_file; in ext4_mpage_readpages()
321 if (relative_block == map.m_len) { in ext4_mpage_readpages()
378 (relative_block == map.m_len)) || in ext4_mpage_readpages()
H A Dinode.c385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { in __check_block_validity()
389 map->m_pblk, map->m_len); in __check_block_validity()
439 * We don't check m_len because extent will be collpased in status in ext4_map_blocks_es_recheck()
440 * tree. So the m_len might not equal. in ext4_map_blocks_es_recheck()
448 inode->i_ino, es_map->m_lblk, es_map->m_len, in ext4_map_blocks_es_recheck()
450 map->m_len, map->m_pblk, map->m_flags, in ext4_map_blocks_es_recheck()
473 * that case, @map is returned as unmapped but we still do fill map->m_len to
492 flags, map->m_len, (unsigned long) map->m_lblk); in ext4_map_blocks()
495 * ext4_map_blocks returns an int, and m_len is an unsigned int in ext4_map_blocks()
497 if (unlikely(map->m_len > INT_MA in ext4_map_blocks()
[all...]
H A Dextents.c3314 int allocated = map->m_len; in ext4_split_extent()
3322 if (map->m_lblk + map->m_len < ee_block + ee_len) { in ext4_split_extent()
3331 map->m_lblk + map->m_len, split_flag1, flags1); in ext4_split_extent()
3384 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3389 * It is guaranteed to be >= map->m_len.
3404 unsigned int ee_len, depth, map_len = map->m_len; in ext4_ext_convert_to_initialized()
3573 split_map.m_len = map->m_len; in ext4_ext_convert_to_initialized()
3575 if (max_zeroout && (allocated > split_map.m_len)) { in ext4_ext_convert_to_initialized()
3580 split_map.m_len); in ext4_ext_convert_to_initialized()
[all...]
H A Dfast_commit.c915 map.m_len = new_blk_size - cur_lblk_off + 1; in ext4_fc_write_inode_data()
920 if (map.m_len == 0) { in ext4_fc_write_inode_data()
928 lrange.fc_len = cpu_to_le32(map.m_len); in ext4_fc_write_inode_data()
937 map.m_len = min(max, map.m_len); in ext4_fc_write_inode_data()
942 ex->ee_len = cpu_to_le16(map.m_len); in ext4_fc_write_inode_data()
953 cur_lblk_off += map.m_len; in ext4_fc_write_inode_data()
1760 map.m_len = remaining; in ext4_fc_replay_add_range()
1776 newex.ee_len = cpu_to_le16(map.m_len); in ext4_fc_replay_add_range()
1795 ret = ext4_ext_replay_update_ex(inode, cur, map.m_len, in ext4_fc_replay_add_range()
[all...]
H A Ddir.c180 map.m_len = 1; in ext4_readdir()
183 /* m_len should never be zero but let's avoid in ext4_readdir()
185 if (map.m_len == 0) in ext4_readdir()
186 map.m_len = 1; in ext4_readdir()
187 ctx->pos += map.m_len * sb->s_blocksize; in ext4_readdir()
H A Dblock_validity.c167 map.m_len = num - i; in ext4_protect_reserved_inode()
182 map.m_pblk + map.m_len - 1); in ext4_protect_reserved_inode()
H A Dindirect.c545 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); in ext4_ind_map_blocks()
561 while (count < map->m_len && count <= blocks_to_boundary) { in ext4_ind_map_blocks()
591 map->m_len = min_t(unsigned int, map->m_len, count); in ext4_ind_map_blocks()
630 map->m_len, blocks_to_boundary); in ext4_ind_map_blocks()
665 map->m_len = count; in ext4_ind_map_blocks()
/kernel/linux/linux-6.6/fs/f2fs/
H A Ddata.c1551 unsigned int maxblocks = map->m_len; in f2fs_map_blocks_cached()
1559 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff); in f2fs_map_blocks_cached()
1562 *map->m_next_extent = pgoff + map->m_len; in f2fs_map_blocks_cached()
1567 map->m_pblk, map->m_len); in f2fs_map_blocks_cached()
1575 map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk); in f2fs_map_blocks_cached()
1589 unsigned int maxblocks = map->m_len; in f2fs_map_blocks()
1612 map->m_len = 0; in f2fs_map_blocks()
1720 if (map->m_len == 0) { in f2fs_map_blocks()
1727 map->m_len in f2fs_map_blocks()
[all...]
H A Dfile.c1705 map.m_len = pg_end - pg_start; in f2fs_expand_inode_data()
1707 map.m_len++; in f2fs_expand_inode_data()
1709 if (!map.m_len) in f2fs_expand_inode_data()
1714 block_t sec_len = roundup(map.m_len, sec_blks); in f2fs_expand_inode_data()
1716 map.m_len = sec_blks; in f2fs_expand_inode_data()
1739 expanded += map.m_len; in f2fs_expand_inode_data()
1740 sec_len -= map.m_len; in f2fs_expand_inode_data()
1741 map.m_lblk += map.m_len; in f2fs_expand_inode_data()
1745 map.m_len = expanded; in f2fs_expand_inode_data()
1748 expanded = map.m_len; in f2fs_expand_inode_data()
[all...]
/kernel/linux/linux-5.10/fs/f2fs/
H A Ddata.c1444 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); in f2fs_preallocate_blocks()
1445 if (map.m_len > map.m_lblk) in f2fs_preallocate_blocks()
1446 map.m_len -= map.m_lblk; in f2fs_preallocate_blocks()
1448 map.m_len = 0; in f2fs_preallocate_blocks()
1474 if (map.m_len > 0 && err == -ENOSPC) { in f2fs_preallocate_blocks()
1505 unsigned int maxblocks = map->m_len; in f2fs_map_blocks()
1524 map->m_len = 0; in f2fs_map_blocks()
1537 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); in f2fs_map_blocks()
1540 *map->m_next_extent = pgofs + map->m_len; in f2fs_map_blocks()
1545 map->m_pblk, map->m_len); in f2fs_map_blocks()
[all...]
H A Dfile.c1645 map.m_len = pg_end - pg_start; in expand_inode_data()
1647 map.m_len++; in expand_inode_data()
1649 if (!map.m_len) in expand_inode_data()
1654 block_t sec_len = roundup(map.m_len, sec_blks); in expand_inode_data()
1656 map.m_len = sec_blks; in expand_inode_data()
1677 expanded += map.m_len; in expand_inode_data()
1678 sec_len -= map.m_len; in expand_inode_data()
1679 map.m_lblk += map.m_len; in expand_inode_data()
1683 map.m_len = expanded; in expand_inode_data()
1686 expanded = map.m_len; in expand_inode_data()
[all...]
/kernel/linux/linux-5.10/drivers/net/fddi/skfp/h/
H A Dmbuf.h23 #define sm_len m_len
/kernel/linux/linux-6.6/drivers/net/fddi/skfp/h/
H A Dmbuf.h23 #define sm_len m_len
/kernel/linux/linux-5.10/include/trace/events/
H A Dext4.h523 __entry->len = map->m_len;
1605 __field( unsigned, m_len )
1615 __entry->m_len = map->m_len;
1621 TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
1625 __entry->m_lblk, __entry->m_len,
1643 __field( unsigned, m_len )
1656 __entry->m_len = map->m_len;
1665 TP_printk("dev %d,%d ino %lu m_lblk %u m_len
[all...]
/kernel/linux/linux-6.6/include/trace/events/
H A Dext4.h532 __entry->len = map->m_len;
1549 __field( unsigned, m_len )
1559 __entry->m_len = map->m_len;
1565 TP_printk("dev %d,%d ino %lu m_lblk %u m_len %u u_lblk %u u_len %u "
1569 __entry->m_lblk, __entry->m_len,
1587 __field( unsigned, m_len )
1600 __entry->m_len = map->m_len;
1609 TP_printk("dev %d,%d ino %lu m_lblk %u m_len
[all...]

Completed in 65 milliseconds

12