Lines Matching defs:map

277 	 * status. Erase i_data so that it becomes a valid empty block map.
410 struct ext4_map_blocks *map)
416 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
417 ext4_error_inode(inode, func, line, map->m_pblk,
419 "(length %d)", (unsigned long) map->m_lblk,
420 map->m_pblk, map->m_len);
441 #define check_block_validity(inode, map) \
442 __check_block_validity((inode), __func__, __LINE__, (map))
448 struct ext4_map_blocks *map,
453 map->m_flags = 0;
463 retval = ext4_ext_map_blocks(handle, inode, map, 0);
465 retval = ext4_ind_map_blocks(handle, inode, map, 0);
473 if (es_map->m_lblk != map->m_lblk ||
474 es_map->m_flags != map->m_flags ||
475 es_map->m_pblk != map->m_pblk) {
480 es_map->m_pblk, es_map->m_flags, map->m_lblk,
481 map->m_len, map->m_pblk, map->m_flags,
500 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
501 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
504 * that case, @map is returned as unmapped but we still do fill map->m_len to
505 * indicate the length of a hole starting at map->m_lblk.
510 struct ext4_map_blocks *map, int flags)
518 memcpy(&orig_map, map, sizeof(*map));
521 map->m_flags = 0;
523 flags, map->m_len, (unsigned long) map->m_lblk);
528 if (unlikely(map->m_len > INT_MAX))
529 map->m_len = INT_MAX;
532 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
537 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
539 map->m_pblk = ext4_es_pblock(&es) +
540 map->m_lblk - es.es_lblk;
541 map->m_flags |= ext4_es_is_written(&es) ?
543 retval = es.es_len - (map->m_lblk - es.es_lblk);
544 if (retval > map->m_len)
545 retval = map->m_len;
546 map->m_len = retval;
548 map->m_pblk = 0;
549 retval = es.es_len - (map->m_lblk - es.es_lblk);
550 if (retval > map->m_len)
551 retval = map->m_len;
552 map->m_len = retval;
558 ext4_map_blocks_es_recheck(handle, inode, map,
570 retval = ext4_ext_map_blocks(handle, inode, map, 0);
572 retval = ext4_ind_map_blocks(handle, inode, map, 0);
577 if (unlikely(retval != map->m_len)) {
580 "%lu: retval %d != map->m_len %d",
581 inode->i_ino, retval, map->m_len);
585 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
589 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
590 map->m_lblk + map->m_len - 1))
592 ret = ext4_es_insert_extent(inode, map->m_lblk,
593 map->m_len, map->m_pblk, status);
600 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
601 ret = check_block_validity(inode, map);
617 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
630 map->m_flags &= ~EXT4_MAP_FLAGS;
645 retval = ext4_ext_map_blocks(handle, inode, map, flags);
647 retval = ext4_ind_map_blocks(handle, inode, map, flags);
649 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
662 if (unlikely(retval != map->m_len)) {
665 "%lu: retval %d != map->m_len %d",
666 inode->i_ino, retval, map->m_len);
678 map->m_flags & EXT4_MAP_MAPPED &&
679 map->m_flags & EXT4_MAP_NEW) {
680 ret = ext4_issue_zeroout(inode, map->m_lblk,
681 map->m_pblk, map->m_len);
693 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
697 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
701 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
702 map->m_lblk + map->m_len - 1))
704 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
705 map->m_pblk, status);
714 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
715 ret = check_block_validity(inode, map);
724 if (map->m_flags & EXT4_MAP_NEW &&
725 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
730 (loff_t)map->m_lblk << inode->i_blkbits;
731 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
743 if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
744 map->m_flags & EXT4_MAP_MAPPED))
745 ext4_fc_track_range(handle, inode, map->m_lblk,
746 map->m_lblk + map->m_len - 1);
783 struct ext4_map_blocks map;
789 map.m_lblk = iblock;
790 map.m_len = bh->b_size >> inode->i_blkbits;
792 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
795 map_bh(bh, inode->i_sb, map.m_pblk);
796 ext4_update_bh_state(bh, map.m_flags);
797 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
801 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
827 /* Maximum number of blocks we map for direct IO at once. */
836 struct ext4_map_blocks map;
844 map.m_lblk = block;
845 map.m_len = 1;
846 err = ext4_map_blocks(handle, inode, &map, map_flags);
853 bh = sb_getblk(inode->i_sb, map.m_pblk);
856 if (map.m_flags & EXT4_MAP_NEW) {
1537 * Extent to map - this can be after first_page because that can be
1541 struct ext4_map_blocks map;
1697 struct ext4_map_blocks *map,
1706 memcpy(&orig_map, map, sizeof(*map));
1712 map->m_flags = 0;
1713 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1714 (unsigned long) map->m_lblk);
1735 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1737 if (retval > map->m_len)
1738 retval = map->m_len;
1739 map->m_len = retval;
1741 map->m_flags |= EXT4_MAP_MAPPED;
1743 map->m_flags |= EXT4_MAP_UNWRITTEN;
1748 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1761 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1763 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1774 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1787 if (unlikely(retval != map->m_len)) {
1790 "%lu: retval %d != map->m_len %d",
1791 inode->i_ino, retval, map->m_len);
1795 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1797 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1798 map->m_pblk, status);
1824 struct ext4_map_blocks map;
1830 map.m_lblk = iblock;
1831 map.m_len = 1;
1838 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1842 map_bh(bh, inode->i_sb, map.m_pblk);
1843 ext4_update_bh_state(bh, map.m_flags);
2151 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2159 * extent of buffers to map yet, the function returns 'true' immediately - the
2167 struct ext4_map_blocks *map = &mpd->map;
2172 /* So far no extent to map => we write the buffer right away */
2173 if (map->m_len == 0)
2179 if (map->m_len == 0) {
2180 /* We cannot map unless handle is started... */
2183 map->m_lblk = lblk;
2184 map->m_len = 1;
2185 map->m_flags = bh->b_state & BH_FLAGS;
2190 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2194 if (lblk == map->m_lblk + map->m_len &&
2195 (bh->b_state & BH_FLAGS) == map->m_flags) {
2196 map->m_len++;
2212 * accumulated extent of buffers to map or add buffers in the page to the
2213 * extent of buffers to map. The function returns 1 if the caller can continue
2215 * extent to map because we cannot extend it anymore. It can also return value
2235 /* Found extent to map? */
2236 if (mpd->map.m_len)
2246 if (mpd->map.m_len == 0) {
2262 * @mpd - description of extent to map, on return next extent to map
2269 * We map delalloc buffers to their physical location, clear unwritten bits.
2270 * If the given page is not fully mapped, we update @map to the next extent in
2288 if (lblk < mpd->map.m_lblk)
2290 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2293 * Find next buffer in the page to map.
2295 mpd->map.m_len = 0;
2296 mpd->map.m_flags = 0;
2303 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2309 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2335 * @mpd - description of extent to map, on return next extent to map
2339 * We map delalloc buffers to their physical location, clear unwritten bits,
2342 * mapped, we update @map to the next extent in the last page that needs
2357 start = mpd->map.m_lblk >> bpp_bits;
2358 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2360 pblock = mpd->map.m_pblk;
2388 mpd->map.m_len = 0;
2389 mpd->map.m_flags = 0;
2399 struct ext4_map_blocks *map = &mpd->map;
2403 trace_ext4_da_write_pages_extent(inode, map);
2425 if (map->m_flags & BIT(BH_Delay))
2428 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2431 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2440 BUG_ON(map->m_len == 0);
2445 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2449 * @mpd - extent to map
2457 * extent. Note that we need not map all the described range since allocation
2459 * cannot map more because we are limited by reserved transaction credits. On
2469 struct ext4_map_blocks *map = &mpd->map;
2479 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2504 (unsigned long long)map->m_lblk,
2505 (unsigned)map->m_len, -err);
2518 * extent to map
2523 } while (map->m_len);
2556 * iteration. This is called from ext4_writepages(). We map an extent of
2558 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2571 * and underlying extent to map
2607 mpd->map.m_len = 0;
2630 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2674 if (mpd->map.m_len == 0)
2828 * We have two constraints: We find one extent to map and we
2854 if (!ret && mpd.map.m_len)
3160 * which will map the blocks, and start the I/O, but not
3339 struct ext4_map_blocks *map, loff_t offset,
3354 if (map->m_flags & EXT4_MAP_NEW)
3359 iomap->offset = (u64) map->m_lblk << blkbits;
3360 iomap->length = (u64) map->m_len << blkbits;
3362 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3375 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3377 iomap->addr = (u64) map->m_pblk << blkbits;
3378 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3380 iomap->addr = (u64) map->m_pblk << blkbits;
3387 static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3395 * Trim the mapping request to the maximum value that we can map at
3398 if (map->m_len > DIO_MAX_BLOCKS)
3399 map->m_len = DIO_MAX_BLOCKS;
3400 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3426 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3431 ret = ext4_map_blocks(handle, inode, map, m_flags);
3453 struct ext4_map_blocks map;
3465 map.m_lblk = offset >> blkbits;
3466 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3467 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3477 ret = ext4_map_blocks(NULL, inode, &map, 0);
3478 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3481 ret = ext4_iomap_alloc(inode, &map, flags);
3483 ret = ext4_map_blocks(NULL, inode, &map, 0);
3489 ext4_set_iomap(inode, iomap, &map, offset, length);
3537 struct ext4_map_blocks *map)
3540 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3543 map->m_lblk, end, &es);
3548 if (es.es_lblk > map->m_lblk) {
3549 map->m_len = es.es_lblk - map->m_lblk;
3553 offset = map->m_lblk - es.es_lblk;
3554 map->m_len = es.es_len - offset;
3565 struct ext4_map_blocks map;
3583 map.m_lblk = offset >> blkbits;
3584 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3585 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3597 map.m_flags = 0;
3602 ret = ext4_map_blocks(NULL, inode, &map, 0);
3606 delalloc = ext4_iomap_is_delalloc(inode, &map);
3609 ext4_set_iomap(inode, iomap, &map, offset, length);
5713 * How many index blocks need to touch to map @lblocks logical blocks
5767 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.