Lines Matching defs:map
246 * status. Erase i_data so that it becomes a valid empty block map.
379 struct ext4_map_blocks *map)
385 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
386 ext4_error_inode(inode, func, line, map->m_pblk,
388 "(length %d)", (unsigned long) map->m_lblk,
389 map->m_pblk, map->m_len);
410 #define check_block_validity(inode, map) \
411 __check_block_validity((inode), __func__, __LINE__, (map))
417 struct ext4_map_blocks *map,
422 map->m_flags = 0;
432 retval = ext4_ext_map_blocks(handle, inode, map, 0);
434 retval = ext4_ind_map_blocks(handle, inode, map, 0);
442 if (es_map->m_lblk != map->m_lblk ||
443 es_map->m_flags != map->m_flags ||
444 es_map->m_pblk != map->m_pblk) {
449 es_map->m_pblk, es_map->m_flags, map->m_lblk,
450 map->m_len, map->m_pblk, map->m_flags,
469 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
470 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
473 * that case, @map is returned as unmapped but we still do fill map->m_len to
474 * indicate the length of a hole starting at map->m_lblk.
479 struct ext4_map_blocks *map, int flags)
487 memcpy(&orig_map, map, sizeof(*map));
490 map->m_flags = 0;
492 flags, map->m_len, (unsigned long) map->m_lblk);
497 if (unlikely(map->m_len > INT_MAX))
498 map->m_len = INT_MAX;
501 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
506 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
508 map->m_pblk = ext4_es_pblock(&es) +
509 map->m_lblk - es.es_lblk;
510 map->m_flags |= ext4_es_is_written(&es) ?
512 retval = es.es_len - (map->m_lblk - es.es_lblk);
513 if (retval > map->m_len)
514 retval = map->m_len;
515 map->m_len = retval;
517 map->m_pblk = 0;
518 retval = es.es_len - (map->m_lblk - es.es_lblk);
519 if (retval > map->m_len)
520 retval = map->m_len;
521 map->m_len = retval;
530 ext4_map_blocks_es_recheck(handle, inode, map,
548 retval = ext4_ext_map_blocks(handle, inode, map, 0);
550 retval = ext4_ind_map_blocks(handle, inode, map, 0);
555 if (unlikely(retval != map->m_len)) {
558 "%lu: retval %d != map->m_len %d",
559 inode->i_ino, retval, map->m_len);
563 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
567 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
568 map->m_lblk + map->m_len - 1))
570 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
571 map->m_pblk, status);
576 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
577 ret = check_block_validity(inode, map);
593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
606 map->m_flags &= ~EXT4_MAP_FLAGS;
621 retval = ext4_ext_map_blocks(handle, inode, map, flags);
623 retval = ext4_ind_map_blocks(handle, inode, map, flags);
625 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
638 if (unlikely(retval != map->m_len)) {
641 "%lu: retval %d != map->m_len %d",
642 inode->i_ino, retval, map->m_len);
654 map->m_flags & EXT4_MAP_MAPPED &&
655 map->m_flags & EXT4_MAP_NEW) {
656 ret = ext4_issue_zeroout(inode, map->m_lblk,
657 map->m_pblk, map->m_len);
669 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
673 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
677 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
678 map->m_lblk + map->m_len - 1))
680 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
681 map->m_pblk, status);
686 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
687 ret = check_block_validity(inode, map);
696 if (map->m_flags & EXT4_MAP_NEW &&
697 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
702 (loff_t)map->m_lblk << inode->i_blkbits;
703 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
715 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
716 map->m_flags & EXT4_MAP_MAPPED))
717 ext4_fc_track_range(handle, inode, map->m_lblk,
718 map->m_lblk + map->m_len - 1);
754 struct ext4_map_blocks map;
760 map.m_lblk = iblock;
761 map.m_len = bh->b_size >> inode->i_blkbits;
763 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
766 map_bh(bh, inode->i_sb, map.m_pblk);
767 ext4_update_bh_state(bh, map.m_flags);
768 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
772 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
810 /* Maximum number of blocks we map for direct IO at once. */
819 struct ext4_map_blocks map;
829 map.m_lblk = block;
830 map.m_len = 1;
831 err = ext4_map_blocks(handle, inode, &map, map_flags);
839 return sb_find_get_block(inode->i_sb, map.m_pblk);
841 bh = sb_getblk(inode->i_sb, map.m_pblk);
844 if (map.m_flags & EXT4_MAP_NEW) {
1526 unsigned int can_map:1; /* Can writepages call map blocks? */
1533 * Extent to map - this can be after first_page because that can be
1537 struct ext4_map_blocks map;
1687 struct ext4_map_blocks *map,
1696 memcpy(&orig_map, map, sizeof(*map));
1702 map->m_flags = 0;
1703 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1704 (unsigned long) map->m_lblk);
1725 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1727 if (retval > map->m_len)
1728 retval = map->m_len;
1729 map->m_len = retval;
1731 map->m_flags |= EXT4_MAP_MAPPED;
1733 map->m_flags |= EXT4_MAP_UNWRITTEN;
1738 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1751 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1753 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1764 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1776 if (unlikely(retval != map->m_len)) {
1779 "%lu: retval %d != map->m_len %d",
1780 inode->i_ino, retval, map->m_len);
1784 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1786 ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1787 map->m_pblk, status);
1811 struct ext4_map_blocks map;
1817 map.m_lblk = iblock;
1818 map.m_len = 1;
1825 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1829 map_bh(bh, inode->i_sb, map.m_pblk);
1830 ext4_update_bh_state(bh, map.m_flags);
1894 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1902 * extent of buffers to map yet, the function returns 'true' immediately - the
1910 struct ext4_map_blocks *map = &mpd->map;
1915 /* So far no extent to map => we write the buffer right away */
1916 if (map->m_len == 0)
1922 if (map->m_len == 0) {
1923 /* We cannot map unless handle is started... */
1926 map->m_lblk = lblk;
1927 map->m_len = 1;
1928 map->m_flags = bh->b_state & BH_FLAGS;
1933 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1937 if (lblk == map->m_lblk + map->m_len &&
1938 (bh->b_state & BH_FLAGS) == map->m_flags) {
1939 map->m_len++;
1955 * accumulated extent of buffers to map or add buffers in the page to the
1956 * extent of buffers to map. The function returns 1 if the caller can continue
1958 * extent to map because we cannot extend it anymore. It can also return value
1978 /* Found extent to map? */
1979 if (mpd->map.m_len)
1989 if (mpd->map.m_len == 0) {
2005 * @mpd: description of extent to map, on return next extent to map
2014 * We map delalloc buffers to their physical location, clear unwritten bits.
2033 if (lblk < mpd->map.m_lblk)
2035 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2038 * Find next buffer in the folio to map.
2040 mpd->map.m_len = 0;
2041 mpd->map.m_flags = 0;
2047 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2053 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2078 * @mpd - description of extent to map, on return next extent to map
2082 * We map delalloc buffers to their physical location, clear unwritten bits,
2085 * mapped, we update @map to the next extent in the last page that needs
2100 start = mpd->map.m_lblk >> bpp_bits;
2101 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2103 pblock = mpd->map.m_pblk;
2131 mpd->map.m_len = 0;
2132 mpd->map.m_flags = 0;
2142 struct ext4_map_blocks *map = &mpd->map;
2146 trace_ext4_da_write_pages_extent(inode, map);
2168 if (map->m_flags & BIT(BH_Delay))
2171 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2174 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2183 BUG_ON(map->m_len == 0);
2188 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2192 * @mpd - extent to map
2200 * extent. Note that we need not map all the described range since allocation
2202 * cannot map more because we are limited by reserved transaction credits. On
2212 struct ext4_map_blocks *map = &mpd->map;
2222 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2246 (unsigned long long)map->m_lblk,
2247 (unsigned)map->m_len, -err);
2260 * extent to map
2265 } while (map->m_len);
2298 * iteration. This is called from ext4_writepages(). We map an extent of
2300 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2357 * IO immediately. If we cannot map blocks, we submit just already mapped
2358 * buffers in the page for IO and keep page dirty. When we can map blocks and
2390 mpd->map.m_len = 0;
2418 mpd->map.m_len >> (PAGE_SHIFT - blkbits))
2422 if (mpd->map.m_len > 0 && mpd->next_page != folio->index)
2467 if (mpd->map.m_len == 0)
2660 * We have two constraints: We find one extent to map and we
2686 if (!ret && mpd->map.m_len)
3076 * which will map the blocks, and start the I/O, but not
3224 struct ext4_map_blocks *map, loff_t offset,
3239 if (map->m_flags & EXT4_MAP_NEW)
3246 iomap->offset = (u64) map->m_lblk << blkbits;
3247 iomap->length = (u64) map->m_len << blkbits;
3249 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3262 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3264 iomap->addr = (u64) map->m_pblk << blkbits;
3267 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3269 iomap->addr = (u64) map->m_pblk << blkbits;
3278 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3286 * Trim the mapping request to the maximum value that we can map at
3289 if (map->m_len > DIO_MAX_BLOCKS)
3290 map->m_len = DIO_MAX_BLOCKS;
3291 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3317 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3322 ret = ext4_map_blocks(handle, inode, map, m_flags);
3344 struct ext4_map_blocks map;
3356 map.m_lblk = offset >> blkbits;
3357 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3358 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3368 ret = ext4_map_blocks(NULL, inode, &map, 0);
3369 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3372 ret = ext4_iomap_alloc(inode, &map, flags);
3374 ret = ext4_map_blocks(NULL, inode, &map, 0);
3385 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3387 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
3435 struct ext4_map_blocks *map)
3438 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3441 map->m_lblk, end, &es);
3446 if (es.es_lblk > map->m_lblk) {
3447 map->m_len = es.es_lblk - map->m_lblk;
3451 offset = map->m_lblk - es.es_lblk;
3452 map->m_len = es.es_len - offset;
3463 struct ext4_map_blocks map;
3481 map.m_lblk = offset >> blkbits;
3482 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3483 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3495 map.m_flags = 0;
3500 ret = ext4_map_blocks(NULL, inode, &map, 0);
3504 delalloc = ext4_iomap_is_delalloc(inode, &map);
3507 ext4_set_iomap(inode, iomap, &map, offset, length, flags);
5652 * How many index blocks need to touch to map @lblocks logical blocks
5706 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.