/kernel/linux/linux-6.6/drivers/infiniband/hw/irdma/ |
H A D | hmc.c | 42 * @pd_idx: pointer to return page descriptor index 50 u32 idx, u32 cnt, u32 *pd_idx, in irdma_find_pd_index_limit() 58 *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE); in irdma_find_pd_index_limit() 102 * @pd_idx: page descriptor index 105 u32 pd_idx) in irdma_invalidate_pf_hmc_pd() 109 FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx); in irdma_invalidate_pf_hmc_pd() 225 u32 pd_idx = 0, pd_lmt = 0; in irdma_sc_create_hmc_obj() local 252 info->start_idx, info->count, &pd_idx, in irdma_sc_create_hmc_obj() 266 pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT)); in irdma_sc_create_hmc_obj() 300 pd_idx1 = max(pd_idx, ( in irdma_sc_create_hmc_obj() 49 irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type, u32 idx, u32 cnt, u32 *pd_idx, u32 *pd_limit) irdma_find_pd_index_limit() argument 104 irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx, u32 pd_idx) irdma_invalidate_pf_hmc_pd() argument 375 u32 pd_idx, pd_lmt, rel_pd_idx; irdma_sc_del_hmc_obj() local [all...] |
H A D | pble.c | 75 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE); in get_sd_pd_idx() 76 idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD); in get_sd_pd_idx() 148 u32 pd_idx = info->idx.pd_idx; in add_bp_pages() local 168 pd_idx++, &mem); in add_bp_pages()
|
H A D | uda.c | 31 qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) | in irdma_sc_access_ah() 38 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16); in irdma_sc_access_ah()
|
H A D | uda.h | 14 u32 pd_idx; member
|
H A D | pble.h | 57 u32 pd_idx; member
|
H A D | verbs.c | 4312 ah_info->pd_idx = pd->sc_pd.pd_id; in irdma_setup_ah()
|
/kernel/linux/linux-6.6/drivers/md/ |
H A D | raid5.c | 148 if (idx == sh->pd_idx) in raid6_idx_to_slot() 283 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); 922 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ in stripe_add_to_batch_list() 952 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list() 1645 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources() 1876 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5() local 1880 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5() 1881 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5() 2009 int pd_idx in ops_complete_reconstruct() local 2060 int count, pd_idx = sh->pd_idx, i; ops_run_reconstruct5() local 2228 int pd_idx = sh->pd_idx; ops_run_check_p() local 3007 int pd_idx, qd_idx; raid5_compute_sector() local 3382 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; schedule_reconstruction() local 4448 int pd_idx = sh->pd_idx; handle_parity_checks6() local [all...] |
H A D | raid5-ppl.c | 163 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_partial_parity() local 180 srcs[count++] = sh->dev[pd_idx].page; in ops_run_partial_parity() 302 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) { in ppl_log_stripe() 339 e->parity_disk = cpu_to_le32(sh->pd_idx); in ppl_log_stripe() 368 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || in ppl_write_stripe() 369 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) { in ppl_write_stripe() 374 log = &ppl_conf->child_logs[sh->pd_idx]; in ppl_write_stripe() 937 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk)); in ppl_recover_entry() 941 conf->disks[sh.pd_idx] in ppl_recover_entry() [all...] |
H A D | raid5-cache.c | 499 * Set R5_InJournal for parity dev[pd_idx]. This means in r5c_finish_cache_stripe() 504 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); in r5c_finish_cache_stripe() 509 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); in r5c_finish_cache_stripe() 928 if (i == sh->pd_idx || i == sh->qd_idx) in r5l_log_stripe() 947 sh->sector, sh->dev[sh->pd_idx].log_checksum, in r5l_log_stripe() 949 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe() 953 sh->sector, sh->dev[sh->pd_idx].log_checksum, in r5l_log_stripe() 955 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe() 1004 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || in r5l_write_stripe() 1835 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx] in r5l_recovery_load_parity() [all...] |
H A D | raid5.h | 208 short pd_idx; /* parity disk index */ member 338 * if R5_InJournal is set for parity pd_idx, all the
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_hmc.h | 68 * @pd_idx: page descriptor index 70 #define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ 74 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) 80 * @pd_idx: page descriptor index 83 #define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \ 86 (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
|
H A D | i40iw_hmc.c | 92 u32 *pd_idx, in i40iw_find_pd_index_limit() 100 *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE); in i40iw_find_pd_index_limit() 300 u32 pd_idx = 0, pd_lmt = 0; in i40iw_sc_create_hmc_obj() local 329 info->start_idx, info->count, &pd_idx, &pd_lmt); in i40iw_sc_create_hmc_obj() 343 pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT)); in i40iw_sc_create_hmc_obj() 377 pd_idx1 = max(pd_idx, in i40iw_sc_create_hmc_obj() 453 u32 pd_idx, pd_lmt, rel_pd_idx; in i40iw_sc_del_hmc_obj() local 482 info->start_idx, info->count, &pd_idx, &pd_lmt); in i40iw_sc_del_hmc_obj() 484 for (j = pd_idx; j < pd_lmt; j++) { in i40iw_sc_del_hmc_obj() 88 i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info, u32 type, u32 idx, u32 cnt, u32 *pd_idx, u32 *pd_limit) i40iw_find_pd_index_limit() argument
|
H A D | i40iw_pble.c | 122 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE; in get_sd_pd_idx() 123 idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD); in get_sd_pd_idx() 263 u32 pd_idx = info->idx.pd_idx; in add_bp_pages() local 291 status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem); in add_bp_pages()
|
H A D | i40iw_pble.h | 78 u32 pd_idx; member
|
/kernel/linux/linux-5.10/drivers/md/ |
H A D | raid5.c | 141 if (idx == sh->pd_idx) in raid6_idx_to_slot() 274 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe() 824 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ in stripe_add_to_batch_list() 870 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list() 1567 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources() 1798 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5() local 1802 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5() 1803 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5() 1931 int pd_idx in ops_complete_reconstruct() local 1982 int count, pd_idx = sh->pd_idx, i; ops_run_reconstruct5() local 2150 int pd_idx = sh->pd_idx; ops_run_check_p() local 2919 int pd_idx, qd_idx; raid5_compute_sector() local 3294 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; schedule_reconstruction() local 4336 int pd_idx = sh->pd_idx; handle_parity_checks6() local [all...] |
H A D | raid5-ppl.c | 163 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_partial_parity() local 180 srcs[count++] = sh->dev[pd_idx].page; in ops_run_partial_parity() 301 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) { in ppl_log_stripe() 338 e->parity_disk = cpu_to_le32(sh->pd_idx); in ppl_log_stripe() 367 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || in ppl_write_stripe() 368 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) { in ppl_write_stripe() 373 log = &ppl_conf->child_logs[sh->pd_idx]; in ppl_write_stripe() 945 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk)); in ppl_recover_entry() 946 parity_rdev = conf->disks[sh.pd_idx] in ppl_recover_entry() [all...] |
H A D | raid5-cache.c | 499 * Set R5_InJournal for parity dev[pd_idx]. This means in r5c_finish_cache_stripe() 504 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); in r5c_finish_cache_stripe() 509 set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); in r5c_finish_cache_stripe() 929 if (i == sh->pd_idx || i == sh->qd_idx) in r5l_log_stripe() 948 sh->sector, sh->dev[sh->pd_idx].log_checksum, in r5l_log_stripe() 950 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe() 954 sh->sector, sh->dev[sh->pd_idx].log_checksum, in r5l_log_stripe() 956 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); in r5l_log_stripe() 1005 if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || in r5l_write_stripe() 1842 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx] in r5l_recovery_load_parity() [all...] |
H A D | raid5.h | 207 short pd_idx; /* parity disk index */ member 337 * if R5_InJournal is set for parity pd_idx, all the
|
/kernel/linux/linux-5.10/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_lan_hmc.c | 285 u32 pd_idx = 0, pd_lmt = 0; in i40e_create_lan_hmc_object() local 332 info->start_idx, info->count, &pd_idx, in i40e_create_lan_hmc_object() 360 /* find pd_idx and pd_lmt in this sd */ in i40e_create_lan_hmc_object() 361 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); in i40e_create_lan_hmc_object() 409 pd_idx1 = max(pd_idx, in i40e_create_lan_hmc_object() 528 u32 pd_idx, pd_lmt, rel_pd_idx; in i40e_delete_lan_hmc_object() local 575 info->start_idx, info->count, &pd_idx, in i40e_delete_lan_hmc_object() 578 for (j = pd_idx; j < pd_lmt; j++) { in i40e_delete_lan_hmc_object() 984 u32 pd_idx, pd_lmt, rel_pd_idx; in i40e_hmc_get_object_va() local 1025 &pd_idx, in i40e_hmc_get_object_va() [all...] |
H A D | i40e_hmc.h | 136 * @pd_idx: page descriptor index 138 #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ 141 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
|
/kernel/linux/linux-6.6/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_lan_hmc.c | 284 u32 pd_idx = 0, pd_lmt = 0; in i40e_create_lan_hmc_object() local 332 info->start_idx, info->count, &pd_idx, in i40e_create_lan_hmc_object() 360 /* find pd_idx and pd_lmt in this sd */ in i40e_create_lan_hmc_object() 361 pd_idx1 = max(pd_idx, (j * I40E_HMC_MAX_BP_COUNT)); in i40e_create_lan_hmc_object() 409 pd_idx1 = max(pd_idx, in i40e_create_lan_hmc_object() 527 u32 pd_idx, pd_lmt, rel_pd_idx; in i40e_delete_lan_hmc_object() local 575 info->start_idx, info->count, &pd_idx, in i40e_delete_lan_hmc_object() 578 for (j = pd_idx; j < pd_lmt; j++) { in i40e_delete_lan_hmc_object() 984 u32 pd_idx, pd_lmt, rel_pd_idx; in i40e_hmc_get_object_va() local 1025 &pd_idx, in i40e_hmc_get_object_va() [all...] |
H A D | i40e_hmc.h | 136 * @pd_idx: page descriptor index 138 #define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ 141 ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 315 u16 pd_idx = 0; in ocrdma_get_pd_num() local 322 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); in ocrdma_get_pd_num() 323 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; in ocrdma_get_pd_num() 324 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; in ocrdma_get_pd_num() 327 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); in ocrdma_get_pd_num() 328 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; in ocrdma_get_pd_num() 335 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); in ocrdma_get_pd_num() 336 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; in ocrdma_get_pd_num()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 317 u16 pd_idx = 0; in ocrdma_get_pd_num() local 324 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); in ocrdma_get_pd_num() 325 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; in ocrdma_get_pd_num() 326 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; in ocrdma_get_pd_num() 329 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); in ocrdma_get_pd_num() 330 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; in ocrdma_get_pd_num() 337 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); in ocrdma_get_pd_num() 338 pd->id = dev->pd_mgr->pd_norm_start + pd_idx; in ocrdma_get_pd_num()
|