Lines Matching refs:lba

877 			    unsigned long long lba)
881 lba = do_div(lba, sdebug_store_sectors);
886 return lsip->storep + lba * sdebug_sector_size;
2719 unsigned long long lba)
2721 u32 zno = lba >> devip->zsize_shift;
2734 if (lba >= zsp->z_start + zsp->z_size)
2736 WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
2843 unsigned long long lba, unsigned int num)
2845 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2859 if (lba != zsp->z_wp)
2862 end = lba + num;
2864 n = zend - lba;
2876 lba += n;
2885 unsigned long long lba, unsigned int num, bool write)
2889 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2890 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2938 if (lba != zsp->z_wp) {
2962 (struct scsi_cmnd *scp, unsigned long long lba,
2968 if (lba + num > sdebug_capacity) {
2983 return check_zbc_access_params(scp, lba, num, write);
3006 u32 sg_skip, u64 lba, u32 num, bool do_write)
3027 block = do_div(lba, sdebug_store_sectors);
3060 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3061 * arr into sip->storep+lba and return true. If comparison fails then
3063 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3072 block = do_div(lba, store_blks);
3288 u64 lba;
3295 lba = get_unaligned_be64(cmd + 2);
3301 lba = get_unaligned_be32(cmd + 2);
3307 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3314 lba = get_unaligned_be32(cmd + 2);
3320 lba = get_unaligned_be32(cmd + 2);
3325 lba = get_unaligned_be64(cmd + 12);
3349 ret = check_device_access_params(scp, lba, num, false);
3353 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3354 ((lba + num) > sdebug_medium_error_start))) {
3360 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3361 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3372 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3398 ret = do_device_access(sip, scp, 0, lba, num, false);
3497 static unsigned long lba_to_map_index(sector_t lba)
3500 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3501 sector_div(lba, sdebug_unmap_granularity);
3502 return lba;
3507 sector_t lba = index * sdebug_unmap_granularity;
3510 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3511 return lba;
3514 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3522 index = lba_to_map_index(lba);
3531 *num = end - lba;
3535 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3538 sector_t end = lba + len;
3540 while (lba < end) {
3541 unsigned long index = lba_to_map_index(lba);
3546 lba = map_index_to_lba(index + 1);
3550 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3553 sector_t end = lba + len;
3556 while (lba < end) {
3557 unsigned long index = lba_to_map_index(lba);
3559 if (lba == map_index_to_lba(index) &&
3560 lba + sdebug_unmap_granularity <= end &&
3564 memset(fsp + lba * sdebug_sector_size,
3570 memset(sip->dif_storep + lba, 0xff,
3575 lba = map_index_to_lba(index + 1);
3585 u64 lba;
3592 lba = get_unaligned_be64(cmd + 2);
3598 lba = get_unaligned_be32(cmd + 2);
3604 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3611 lba = get_unaligned_be32(cmd + 2);
3617 lba = get_unaligned_be32(cmd + 2);
3622 lba = get_unaligned_be64(cmd + 12);
3642 ret = check_device_access_params(scp, lba, num, true);
3650 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3676 ret = do_device_access(sip, scp, 0, lba, num, true);
3678 map_region(sip, lba, num);
3681 zbc_inc_wp(devip, lba, num);
3727 u64 lba;
3794 lba = get_unaligned_be64(up + 0);
3799 my_name, __func__, k, lba, num, sg_off);
3802 ret = check_device_access_params(scp, lba, num, true);
3821 int prot_ret = prot_verify_write(scp, lba, num,
3832 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3835 zbc_inc_wp(devip, lba, num);
3837 map_region(sip, lba, num);
3877 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3893 ret = check_device_access_params(scp, lba, num, true);
3900 unmap_region(sip, lba, num);
3903 lbaa = lba;
3924 lbaa = lba + i;
3929 map_region(sip, lba, num);
3932 zbc_inc_wp(devip, lba, num);
3943 u32 lba;
3955 lba = get_unaligned_be32(cmd + 2);
3961 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3968 u64 lba;
3983 lba = get_unaligned_be64(cmd + 2);
3989 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4047 u64 lba;
4054 lba = get_unaligned_be64(cmd + 2);
4068 ret = check_device_access_params(scp, lba, num, false);
4089 if (!comp_write_worker(sip, lba, num, arr, false)) {
4095 map_region(sip, lba, num);
4103 __be64 lba;
4144 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4147 ret = check_device_access_params(scp, lba, num, true);
4151 unmap_region(sip, lba, num);
4169 u64 lba;
4174 lba = get_unaligned_be64(cmd + 2);
4180 ret = check_device_access_params(scp, lba, 1, false);
4187 mapped = map_state(sip, lba, &num);
4192 if (sdebug_capacity - lba <= 0xffffffff)
4193 num = sdebug_capacity - lba;
4200 put_unaligned_be64(lba, arr + 8); /* LBA */
4211 u64 lba;
4216 lba = get_unaligned_be32(cmd + 2);
4219 lba = get_unaligned_be64(cmd + 2);
4222 if (lba + num_blocks > sdebug_capacity) {
4244 u64 lba;
4252 lba = get_unaligned_be32(cmd + 2);
4255 lba = get_unaligned_be64(cmd + 2);
4258 if (lba + nblks > sdebug_capacity) {
4265 block = do_div(lba, sdebug_store_sectors);
4392 u64 lba;
4408 lba = get_unaligned_be64(cmd + 2);
4412 lba = get_unaligned_be32(cmd + 2);
4423 ret = check_device_access_params(scp, lba, a_num, false);
4450 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4471 u64 lba, zs_lba;
4505 for (lba = zs_lba; lba < sdebug_capacity;
4506 lba = zsp->z_start + zsp->z_size) {
4507 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4509 zsp = zbc_zone(devip, lba);
5805 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5832 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");