Lines Matching defs:rmd
87 struct pqi_scsi_dev_raid_map_data *rmd);
91 struct pqi_scsi_dev_raid_map_data *rmd);
2647 struct pqi_scsi_dev_raid_map_data *rmd)
2651 switch (rmd->raid_level) {
2655 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2656 rmd->data_length > ctrl_info->max_write_raid_1_10_2drive))
2660 if (rmd->is_write && (!ctrl_info->enable_r1_writes ||
2661 rmd->data_length > ctrl_info->max_write_raid_1_10_3drive))
2665 if (rmd->is_write && (!ctrl_info->enable_r5_writes ||
2666 rmd->data_length > ctrl_info->max_write_raid_5_6))
2670 if (rmd->is_write && (!ctrl_info->enable_r6_writes ||
2671 rmd->data_length > ctrl_info->max_write_raid_5_6))
2685 struct pqi_scsi_dev_raid_map_data *rmd)
2690 rmd->is_write = true;
2693 rmd->first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2695 rmd->block_cnt = (u32)scmd->cmnd[4];
2696 if (rmd->block_cnt == 0)
2697 rmd->block_cnt = 256;
2700 rmd->is_write = true;
2703 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2704 rmd->block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2707 rmd->is_write = true;
2710 rmd->first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2711 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2714 rmd->is_write = true;
2717 rmd->first_block = get_unaligned_be64(&scmd->cmnd[2]);
2718 rmd->block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2725 put_unaligned_le32(scsi_bufflen(scmd), &rmd->data_length);
2731 struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
2737 rmd->last_block = rmd->first_block + rmd->block_cnt - 1;
2740 if (rmd->last_block >=
2742 rmd->last_block < rmd->first_block)
2745 rmd->data_disks_per_row =
2747 rmd->strip_size = get_unaligned_le16(&raid_map->strip_size);
2748 rmd->layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2751 rmd->blocks_per_row = rmd->data_disks_per_row * rmd->strip_size;
2752 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2755 tmpdiv = rmd->first_block;
2756 do_div(tmpdiv, rmd->blocks_per_row);
2757 rmd->first_row = tmpdiv;
2758 tmpdiv = rmd->last_block;
2759 do_div(tmpdiv, rmd->blocks_per_row);
2760 rmd->last_row = tmpdiv;
2761 rmd->first_row_offset = (u32)(rmd->first_block - (rmd->first_row * rmd->blocks_per_row));
2762 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row * rmd->blocks_per_row));
2763 tmpdiv = rmd->first_row_offset;
2764 do_div(tmpdiv, rmd->strip_size);
2765 rmd->first_column = tmpdiv;
2766 tmpdiv = rmd->last_row_offset;
2767 do_div(tmpdiv, rmd->strip_size);
2768 rmd->last_column = tmpdiv;
2770 rmd->first_row = rmd->first_block / rmd->blocks_per_row;
2771 rmd->last_row = rmd->last_block / rmd->blocks_per_row;
2772 rmd->first_row_offset = (u32)(rmd->first_block -
2773 (rmd->first_row * rmd->blocks_per_row));
2774 rmd->last_row_offset = (u32)(rmd->last_block - (rmd->last_row *
2775 rmd->blocks_per_row));
2776 rmd->first_column = rmd->first_row_offset / rmd->strip_size;
2777 rmd->last_column = rmd->last_row_offset / rmd->strip_size;
2781 if (rmd->first_row != rmd->last_row ||
2782 rmd->first_column != rmd->last_column)
2786 rmd->total_disks_per_row = rmd->data_disks_per_row +
2788 rmd->map_row = ((u32)(rmd->first_row >>
2791 rmd->map_index = (rmd->map_row * rmd->total_disks_per_row) +
2792 rmd->first_column;
2797 static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
2804 if (rmd->blocks_per_row == 0) /* Used as a divisor in many calculations */
2809 rmd->stripesize = rmd->blocks_per_row * rmd->layout_map_count;
2811 tmpdiv = rmd->first_block;
2812 rmd->first_group = do_div(tmpdiv, rmd->stripesize);
2813 tmpdiv = rmd->first_group;
2814 do_div(tmpdiv, rmd->blocks_per_row);
2815 rmd->first_group = tmpdiv;
2816 tmpdiv = rmd->last_block;
2817 rmd->last_group = do_div(tmpdiv, rmd->stripesize);
2818 tmpdiv = rmd->last_group;
2819 do_div(tmpdiv, rmd->blocks_per_row);
2820 rmd->last_group = tmpdiv;
2822 rmd->first_group = (rmd->first_block % rmd->stripesize) / rmd->blocks_per_row;
2823 rmd->last_group = (rmd->last_block % rmd->stripesize) / rmd->blocks_per_row;
2825 if (rmd->first_group != rmd->last_group)
2830 tmpdiv = rmd->first_block;
2831 do_div(tmpdiv, rmd->stripesize);
2832 rmd->first_row = tmpdiv;
2833 rmd->r5or6_first_row = tmpdiv;
2834 tmpdiv = rmd->last_block;
2835 do_div(tmpdiv, rmd->stripesize);
2836 rmd->r5or6_last_row = tmpdiv;
2838 rmd->first_row = rmd->r5or6_first_row =
2839 rmd->first_block / rmd->stripesize;
2840 rmd->r5or6_last_row = rmd->last_block / rmd->stripesize;
2842 if (rmd->r5or6_first_row != rmd->r5or6_last_row)
2847 tmpdiv = rmd->first_block;
2848 rmd->first_row_offset = do_div(tmpdiv, rmd->stripesize);
2849 tmpdiv = rmd->first_row_offset;
2850 rmd->first_row_offset = (u32)do_div(tmpdiv, rmd->blocks_per_row);
2851 rmd->r5or6_first_row_offset = rmd->first_row_offset;
2852 tmpdiv = rmd->last_block;
2853 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->stripesize);
2854 tmpdiv = rmd->r5or6_last_row_offset;
2855 rmd->r5or6_last_row_offset = do_div(tmpdiv, rmd->blocks_per_row);
2856 tmpdiv = rmd->r5or6_first_row_offset;
2857 do_div(tmpdiv, rmd->strip_size);
2858 rmd->first_column = rmd->r5or6_first_column = tmpdiv;
2859 tmpdiv = rmd->r5or6_last_row_offset;
2860 do_div(tmpdiv, rmd->strip_size);
2861 rmd->r5or6_last_column = tmpdiv;
2863 rmd->first_row_offset = rmd->r5or6_first_row_offset =
2864 (u32)((rmd->first_block % rmd->stripesize) %
2865 rmd->blocks_per_row);
2867 rmd->r5or6_last_row_offset =
2868 (u32)((rmd->last_block % rmd->stripesize) %
2869 rmd->blocks_per_row);
2871 rmd->first_column =
2872 rmd->r5or6_first_row_offset / rmd->strip_size;
2873 rmd->r5or6_first_column = rmd->first_column;
2874 rmd->r5or6_last_column = rmd->r5or6_last_row_offset / rmd->strip_size;
2876 if (rmd->r5or6_first_column != rmd->r5or6_last_column)
2880 rmd->map_row =
2881 ((u32)(rmd->first_row >> raid_map->parity_rotation_shift)) %
2884 rmd->map_index = (rmd->first_group *
2886 rmd->total_disks_per_row)) +
2887 (rmd->map_row * rmd->total_disks_per_row) + rmd->first_column;
2889 if (rmd->is_write) {
2901 index = DIV_ROUND_UP(rmd->map_index + 1, rmd->total_disks_per_row);
2902 index *= rmd->total_disks_per_row;
2905 rmd->p_parity_it_nexus = raid_map->disk_data[index].aio_handle;
2906 if (rmd->raid_level == SA_RAID_6) {
2907 rmd->q_parity_it_nexus = raid_map->disk_data[index + 1].aio_handle;
2908 rmd->xor_mult = raid_map->disk_data[rmd->map_index].xor_mult[1];
2911 tmpdiv = rmd->first_block;
2912 do_div(tmpdiv, rmd->blocks_per_row);
2913 rmd->row = tmpdiv;
2915 rmd->row = rmd->first_block / rmd->blocks_per_row;
2922 static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
2925 if (rmd->disk_block > 0xffffffff) {
2926 rmd->cdb[0] = rmd->is_write ? WRITE_16 : READ_16;
2927 rmd->cdb[1] = 0;
2928 put_unaligned_be64(rmd->disk_block, &rmd->cdb[2]);
2929 put_unaligned_be32(rmd->disk_block_cnt, &rmd->cdb[10]);
2930 rmd->cdb[14] = 0;
2931 rmd->cdb[15] = 0;
2932 rmd->cdb_length = 16;
2934 rmd->cdb[0] = rmd->is_write ? WRITE_10 : READ_10;
2935 rmd->cdb[1] = 0;
2936 put_unaligned_be32((u32)rmd->disk_block, &rmd->cdb[2]);
2937 rmd->cdb[6] = 0;
2938 put_unaligned_be16((u16)rmd->disk_block_cnt, &rmd->cdb[7]);
2939 rmd->cdb[9] = 0;
2940 rmd->cdb_length = 10;
2945 struct pqi_scsi_dev_raid_map_data *rmd)
2950 group = rmd->map_index / rmd->data_disks_per_row;
2952 index = rmd->map_index - (group * rmd->data_disks_per_row);
2953 rmd->it_nexus[0] = raid_map->disk_data[index].aio_handle;
2954 index += rmd->data_disks_per_row;
2955 rmd->it_nexus[1] = raid_map->disk_data[index].aio_handle;
2956 if (rmd->layout_map_count > 2) {
2957 index += rmd->data_disks_per_row;
2958 rmd->it_nexus[2] = raid_map->disk_data[index].aio_handle;
2961 rmd->num_it_nexus_entries = rmd->layout_map_count;
2974 struct pqi_scsi_dev_raid_map_data rmd = { 0 };
2976 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
2980 rmd.raid_level = device->raid_level;
2982 if (!pqi_aio_raid_level_supported(ctrl_info, &rmd))
2985 if (unlikely(rmd.block_cnt == 0))
2990 rc = pci_get_aio_common_raid_map_values(ctrl_info, &rmd, raid_map);
2996 if (rmd.is_write) {
2997 pqi_calc_aio_r1_nexus(raid_map, &rmd);
2999 group = device->next_bypass_group[rmd.map_index];
3001 if (next_bypass_group >= rmd.layout_map_count)
3003 device->next_bypass_group[rmd.map_index] = next_bypass_group;
3004 rmd.map_index += group * rmd.data_disks_per_row;
3008 (rmd.layout_map_count > 1 || rmd.is_write)) {
3009 rc = pqi_calc_aio_r5_or_r6(&rmd, raid_map);
3014 if (unlikely(rmd.map_index >= RAID_MAP_MAX_ENTRIES))
3017 rmd.aio_handle = raid_map->disk_data[rmd.map_index].aio_handle;
3018 rmd.disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
3019 rmd.first_row * rmd.strip_size +
3020 (rmd.first_row_offset - rmd.first_column * rmd.strip_size);
3021 rmd.disk_block_cnt = rmd.block_cnt;
3025 rmd.disk_block <<= raid_map->phys_blk_shift;
3026 rmd.disk_block_cnt <<= raid_map->phys_blk_shift;
3029 if (unlikely(rmd.disk_block_cnt > 0xffff))
3032 pqi_set_aio_cdb(&rmd);
3035 if (rmd.data_length > device->max_transfer_encrypted)
3037 pqi_set_encryption_info(&encryption_info, raid_map, rmd.first_block);
3043 if (rmd.is_write) {
3048 encryption_info_ptr, device, &rmd);
3052 encryption_info_ptr, device, &rmd);
3056 return pqi_aio_submit_io(ctrl_info, scmd, rmd.aio_handle,
3057 rmd.cdb, rmd.cdb_length, queue_group,
5758 struct pqi_scsi_dev_raid_map_data *rmd)
5777 r1_request->num_drives = rmd->num_it_nexus_entries;
5778 put_unaligned_le32(rmd->it_nexus[0], &r1_request->it_nexus_1);
5779 put_unaligned_le32(rmd->it_nexus[1], &r1_request->it_nexus_2);
5780 if (rmd->num_it_nexus_entries == 3)
5781 put_unaligned_le32(rmd->it_nexus[2], &r1_request->it_nexus_3);
5787 if (rmd->cdb_length > sizeof(r1_request->cdb))
5788 rmd->cdb_length = sizeof(r1_request->cdb);
5789 r1_request->cdb_length = rmd->cdb_length;
5790 memcpy(r1_request->cdb, rmd->cdb, rmd->cdb_length);
5819 struct pqi_scsi_dev_raid_map_data *rmd)
5841 put_unaligned_le32(rmd->aio_handle, &r56_request->data_it_nexus);
5842 put_unaligned_le32(rmd->p_parity_it_nexus, &r56_request->p_parity_it_nexus);
5843 if (rmd->raid_level == SA_RAID_6) {
5844 put_unaligned_le32(rmd->q_parity_it_nexus, &r56_request->q_parity_it_nexus);
5845 r56_request->xor_multiplier = rmd->xor_mult;
5849 put_unaligned_le64(rmd->row, &r56_request->row);
5854 if (rmd->cdb_length > sizeof(r56_request->cdb))
5855 rmd->cdb_length = sizeof(r56_request->cdb);
5856 r56_request->cdb_length = rmd->cdb_length;
5857 memcpy(r56_request->cdb, rmd->cdb, rmd->cdb_length);
5937 struct pqi_scsi_dev_raid_map_data rmd;
5942 rc = pqi_get_aio_lba_and_block_count(scmd, &rmd);
5947 if (!rmd.is_write)
5973 rmd.first_block >= pqi_stream_data->next_lba) &&
5974 rmd.first_block <= pqi_stream_data->next_lba +
5975 rmd.block_cnt) {
5976 pqi_stream_data->next_lba = rmd.first_block +
5977 rmd.block_cnt;
5998 pqi_stream_data->next_lba = rmd.first_block + rmd.block_cnt;