Lines Matching refs:ic

73 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
78 ib_dma_unmap_sg(ic->i_cm_id->device,
83 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
88 ib_dma_unmap_sg(ic->i_cm_id->device,
123 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
129 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
150 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
161 rds_ib_send_unmap_data(ic, send->s_op, wc_status);
168 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
175 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
190 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
195 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
205 sge->addr = ic->i_send_hdrs_dma[i];
208 sge->lkey = ic->i_pd->local_dma_lkey;
210 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
214 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
219 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
221 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
229 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
231 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
234 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
243 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
246 struct rds_connection *conn = ic->conn;
261 if (time_after(jiffies, ic->i_ack_queued + HZ / 2))
263 rds_ib_ack_send_complete(ic);
267 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
269 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest);
272 send = &ic->i_sends[oldest];
276 rm = rds_ib_send_unmap_op(ic, send, wc->status);
292 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
295 rds_ib_ring_free(&ic->i_send_ring, completed);
296 rds_ib_sub_signaled(ic, nr_sig);
355 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
362 if (!ic->i_flowctl)
367 oldval = newval = atomic_read(&ic->i_credits);
379 struct rds_connection *conn = ic->i_cm_id->context;
401 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
410 struct rds_ib_connection *ic = conn->c_transport_data;
417 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
420 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
431 struct rds_ib_connection *ic = conn->c_transport_data;
436 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
450 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
451 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
454 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
463 if (ic->i_unsignaled_wrs-- == 0 || notify) {
464 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
487 struct rds_ib_connection *ic = conn->c_transport_data;
488 struct ib_device *dev = ic->i_cm_id->device;
524 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
532 if (ic->i_flowctl) {
533 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
536 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
549 if (!ic->i_data_op) {
555 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
558 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
569 ic->i_data_op = &rm->data;
596 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
602 if (ic->i_flowctl) {
603 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
619 send = &ic->i_sends[pos];
622 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg];
635 send->s_sge[0].addr = ic->i_send_hdrs_dma[pos];
638 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
640 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev,
641 ic->i_send_hdrs_dma[pos],
644 memcpy(ic->i_send_hdrs[pos], &rm->m_inc.i_hdr,
658 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey;
669 rds_ib_set_wr_signal_state(ic, send, false);
674 if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
675 rds_ib_set_wr_signal_state(ic, send, true);
685 if (ic->i_flowctl && adv_credits) {
686 struct rds_header *hdr = ic->i_send_hdrs[pos];
694 ib_dma_sync_single_for_device(ic->rds_ibdev->dev,
695 ic->i_send_hdrs_dma[pos],
703 pos = (pos + 1) % ic->i_send_ring.w_nr;
704 send = &ic->i_sends[pos];
717 prev->s_op = ic->i_data_op;
720 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
721 ic->i_data_op = NULL;
726 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
729 if (ic->i_flowctl && i < credit_alloc)
733 atomic_add(nr_sig, &ic->i_signaled_sends);
737 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
738 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
744 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
745 rds_ib_sub_signaled(ic, nr_sig);
747 ic->i_data_op = prev->s_op;
751 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
768 struct rds_ib_connection *ic = conn->c_transport_data;
776 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
784 send = &ic->i_sends[pos];
801 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
810 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
811 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
813 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
822 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey;
828 atomic_add(nr_sig, &ic->i_signaled_sends);
831 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr);
832 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
838 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
839 rds_ib_sub_signaled(ic, nr_sig);
854 struct rds_ib_connection *ic = conn->c_transport_data;
862 u32 max_sge = ic->rds_ibdev->max_sge;
878 ib_dma_map_sg(ic->i_cm_id->device, op->op_sg,
882 rdsdebug("ic %p mapping op %p: %d\n", ic, op,
902 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
904 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
910 send = &ic->i_sends[pos];
923 nr_sig += rds_ib_set_wr_signal_state(ic, send,
947 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey;
955 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
968 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
969 send = ic->i_sends;
979 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
984 atomic_add(nr_sig, &ic->i_signaled_sends);
987 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr);
988 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
994 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
995 rds_ib_sub_signaled(ic, nr_sig);
1012 struct rds_ib_connection *ic = conn->c_transport_data;
1016 rds_ib_attempt_ack(ic);