Lines Matching refs:ic

47 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
52 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
64 sge->addr = ic->i_recv_hdrs_dma[i];
66 sge->lkey = ic->i_pd->local_dma_lkey;
71 sge->lkey = ic->i_pd->local_dma_lkey;
121 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
129 free_percpu(ic->i_cache_incs.percpu);
155 void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
163 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
164 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
165 free_percpu(ic->i_cache_incs.percpu);
174 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
175 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
176 free_percpu(ic->i_cache_frags.percpu);
192 static void rds_ib_frag_free(struct rds_ib_connection *ic,
197 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
198 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
208 struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
215 rds_ib_frag_free(ic, frag);
220 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
223 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
231 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
232 rds_ib_frag_free(ic, recv->r_frag);
237 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
241 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
242 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
245 static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
252 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
270 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr);
275 static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
282 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
285 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
310 struct rds_ib_connection *ic = conn->c_transport_data;
321 if (!ic->i_cache_incs.ready)
322 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
323 if (!ic->i_cache_frags.ready)
324 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
331 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
337 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
341 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
346 sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
384 struct rds_ib_connection *ic = conn->c_transport_data;
400 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
401 if (pos >= ic->i_recv_ring.w_nr) {
407 recv = &ic->i_recvs[pos];
419 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL);
437 if (ic->i_flowctl && posted)
441 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
457 (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
458 rds_ib_ring_empty(&ic->i_recv_ring))) {
573 /* ic starts out kzalloc()ed */
574 void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
576 struct ib_send_wr *wr = &ic->i_ack_wr;
577 struct ib_sge *sge = &ic->i_ack_sge;
579 sge->addr = ic->i_ack_dma;
581 sge->lkey = ic->i_pd->local_dma_lkey;
613 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
617 spin_lock_irqsave(&ic->i_ack_lock, flags);
618 ic->i_ack_next = seq;
620 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
621 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
624 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
629 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
631 spin_lock_irqsave(&ic->i_ack_lock, flags);
632 seq = ic->i_ack_next;
633 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
638 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required)
640 atomic64_set(&ic->i_ack_next, seq);
643 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
647 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
649 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
652 return atomic64_read(&ic->i_ack_next);
657 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
659 struct rds_header *hdr = ic->i_ack;
663 seq = rds_ib_get_ack(ic);
665 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
667 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma,
673 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma,
676 ic->i_ack_queued = jiffies;
678 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL);
683 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
684 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
688 rds_ib_conn_error(ic->conn, "sending ack failed\n");
731 void rds_ib_attempt_ack(struct rds_ib_connection *ic)
735 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
738 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
744 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
746 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
750 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
751 rds_ib_send_ack(ic, adv_credits);
758 void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
760 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
761 rds_ib_attempt_ack(ic);
768 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
770 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
772 return rds_ib_get_ack(ic);
852 struct rds_ib_connection *ic = conn->c_transport_data;
853 struct rds_ib_incoming *ibinc = ic->i_ibinc;
855 dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs];
859 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
872 ihdr = ic->i_recv_hdrs[recv - ic->i_recvs];
874 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr,
910 rds_ib_frag_free(ic, recv->r_frag);
924 ic->i_ibinc = ibinc;
930 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
934 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
935 ic->i_recv_data_rem, hdr->h_flags);
953 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
954 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
956 ic->i_recv_data_rem = 0;
957 ic->i_ibinc = NULL;
979 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr,
983 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
987 struct rds_connection *conn = ic->conn;
996 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
997 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1,
1024 rds_ib_frag_free(ic, recv->r_frag);
1027 rds_ib_ring_free(&ic->i_recv_ring, 1);
1032 if (rds_ib_ring_empty(&ic->i_recv_ring))
1035 if (rds_ib_ring_low(&ic->i_recv_ring)) {
1044 struct rds_ib_connection *ic = conn->c_transport_data;
1048 rds_ib_attempt_ack(ic);