Lines Matching refs:mtt
95 struct mlx4_mtt *mtt,
134 err = mlx4_write_mtt(dev->dev, mtt, *start_index,
163 * mtt size accordingly. Here we take the last right bit which
175 * reduce the mtt size accordingly.
182 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
200 mtt_shift = mtt->page_shift;
211 * If len is malaligned, write an extra mtt entry to cover the
214 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
229 * If len is malaligned, write an extra mtt entry to cover
232 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size,
241 err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages);
249 * Calculate optimal mtt size based on contiguous pages.
254 * middle already handled as part of mtt shift calculation for both their start
433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
526 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);