Lines Matching refs:ind_bufp
760 struct ibmvnic_ind_xmit_queue *ind_bufp;
778 ind_bufp = &rx_scrq->ind_buf;
782 * To account for them, start the loop at ind_bufp->index rather
783 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
786 for (i = ind_bufp->index; i < count; ++i) {
822 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
841 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
845 (u64)ind_bufp->indir_dma,
846 (u64)ind_bufp->index);
849 buffers_added += ind_bufp->index;
850 adapter->replenish_add_buff_success += ind_bufp->index;
851 ind_bufp->index = 0;
860 for (i = ind_bufp->index - 1; i >= 0; --i) {
865 sub_crq = &ind_bufp->indir_arr[i];
873 adapter->replenish_add_buff_failure += ind_bufp->index;
875 ind_bufp->index = 0;
2297 struct ibmvnic_ind_xmit_queue *ind_bufp;
2306 ind_bufp = &tx_scrq->ind_buf;
2307 entries = (u64)ind_bufp->index;
2311 tx_scrq_entry = ind_bufp->indir_arr[i];
2336 ind_bufp->index = 0;
2356 struct ibmvnic_ind_xmit_queue *ind_bufp;
2362 ind_bufp = &tx_scrq->ind_buf;
2363 dma_addr = (u64)ind_bufp->indir_dma;
2364 entries = (u64)ind_bufp->index;
2373 ind_bufp->index = 0;
2383 struct ibmvnic_ind_xmit_queue *ind_bufp;
2421 ind_bufp = &tx_scrq->ind_buf;
2535 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2542 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2544 ind_bufp->index += num_entries;
2547 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {