Lines Matching refs:rm
67 struct rds_message *rm, *tmp;
71 rm = cp->cp_xmit_rm;
77 rds_message_unmapped(rm);
78 rds_message_put(rm);
95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
140 struct rds_message *rm;
202 rm = cp->cp_xmit_rm;
204 if (!rm) {
219 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
220 rm = rds_cong_update_alloc(conn);
221 if (IS_ERR(rm)) {
222 ret = PTR_ERR(rm);
225 rm->data.op_active = 1;
226 rm->m_inc.i_conn_path = cp;
227 rm->m_inc.i_conn = cp->cp_conn;
229 cp->cp_xmit_rm = rm;
239 if (!rm) {
255 rm = list_entry(cp->cp_send_queue.next,
258 rds_message_addref(rm);
264 list_move_tail(&rm->m_conn_item,
270 if (!rm)
280 if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) ||
281 (rm->rdma.op_active &&
282 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) {
284 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
285 list_move(&rm->m_conn_item, &to_be_dropped);
291 len = ntohl(rm->m_inc.i_hdr.h_len);
294 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
306 cp->cp_xmit_rm = rm;
310 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) {
311 rm->m_final_op = &rm->rdma;
315 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
316 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
318 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
319 wake_up_interruptible(&rm->m_flush_wait);
326 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) {
327 rm->m_final_op = &rm->atomic;
331 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
332 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
334 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
335 wake_up_interruptible(&rm->m_flush_wait);
349 if (rm->data.op_nents == 0) {
353 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
354 if (rm->atomic.op_active && !rm->atomic.op_silent)
356 if (rm->rdma.op_active && !rm->rdma.op_silent)
360 && !rm->m_rdma_cookie)
361 rm->data.op_active = 0;
364 if (rm->data.op_active && !cp->cp_xmit_data_sent) {
365 rm->m_final_op = &rm->data;
367 ret = conn->c_trans->xmit(conn, rm,
382 sg = &rm->data.op_sg[cp->cp_xmit_sg];
393 rm->data.op_nents);
398 (cp->cp_xmit_sg == rm->data.op_nents))
403 * A rm will only take multiple times through this loop
405 * none), then we're done with the rm.
407 if (!rm->data.op_active || cp->cp_xmit_data_sent) {
416 rds_message_put(rm);
428 list_for_each_entry(rm, &to_be_dropped, m_conn_item)
429 rds_message_put(rm);
473 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
475 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
486 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
490 return is_acked(rm, ack);
491 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
500 void rds_rdma_send_complete(struct rds_message *rm, int status)
507 spin_lock_irqsave(&rm->m_rs_lock, flags);
509 ro = &rm->rdma;
510 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
513 rs = rm->m_rs;
524 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
536 void rds_atomic_send_complete(struct rds_message *rm, int status)
543 spin_lock_irqsave(&rm->m_rs_lock, flags);
545 ao = &rm->atomic;
546 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
549 rs = rm->m_rs;
560 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
575 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
580 ro = &rm->rdma;
587 ao = &rm->atomic;
609 struct rds_message *rm;
614 rm = list_entry(messages->next, struct rds_message,
616 list_del_init(&rm->m_conn_item);
624 * The message spinlock makes sure nobody clears rm->m_rs
628 spin_lock_irqsave(&rm->m_rs_lock, flags);
629 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
632 if (rs != rm->m_rs) {
637 rs = rm->m_rs;
645 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
646 struct rm_rdma_op *ro = &rm->rdma;
649 list_del_init(&rm->m_sock_item);
650 rds_send_sndbuf_remove(rs, rm);
659 rm->rdma.op_notifier = NULL;
666 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
667 rds_message_put(rm);
669 rds_message_put(rm);
689 struct rds_message *rm, *tmp;
695 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) {
696 if (!rds_send_is_acked(rm, ack, is_acked))
699 list_move(&rm->m_conn_item, &list);
700 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
724 struct rds_message *rm, *tmp;
733 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
735 (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) ||
736 dest->sin6_port != rm->m_inc.i_hdr.h_dport))
739 list_move(&rm->m_sock_item, &list);
740 rds_send_sndbuf_remove(rs, rm);
741 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
753 list_for_each_entry(rm, &list, m_sock_item) {
755 conn = rm->m_inc.i_conn;
757 cp = rm->m_inc.i_conn_path;
763 * Maybe someone else beat us to removing rm from the conn.
767 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
771 list_del_init(&rm->m_conn_item);
778 spin_lock_irqsave(&rm->m_rs_lock, flags);
781 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
784 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
786 rds_message_put(rm);
792 rm = list_entry(list.next, struct rds_message, m_sock_item);
793 list_del_init(&rm->m_sock_item);
794 rds_message_wait(rm);
801 spin_lock_irqsave(&rm->m_rs_lock, flags);
804 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
807 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
809 rds_message_put(rm);
820 struct rds_message *rm, __be16 sport,
829 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
852 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
854 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
855 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
856 rds_message_addref(rm);
858 rm->m_rs = rs;
862 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
863 rm->m_inc.i_conn = conn;
864 rm->m_inc.i_conn_path = cp;
865 rds_message_addref(rm);
868 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++);
869 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
870 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
874 rm, len, rs, rs->rs_snd_bytes,
875 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
971 static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
977 !rm->data.op_mmp_znotifier)
980 rm->data.op_mmp_znotifier->z_cookie = *cookie;
984 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
999 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
1005 ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
1010 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
1014 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
1027 ret = rds_cmsg_atomic(rs, rm, cmsg);
1031 ret = rds_cmsg_zcopy(rs, rm, cmsg);
1108 struct rds_message *rm = NULL;
1264 /* size of rm including all sgs */
1269 rm = rds_message_alloc(ret, GFP_KERNEL);
1270 if (!rm) {
1275 /* Attach data to the rm */
1277 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
1278 if (IS_ERR(rm->data.op_sg)) {
1279 ret = PTR_ERR(rm->data.op_sg);
1282 ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
1286 rm->data.op_active = 1;
1288 rm->m_daddr = daddr;
1313 rm->m_conn_path = cpath;
1316 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
1324 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1326 &rm->rdma, conn->c_trans->xmit_rdma);
1331 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1333 &rm->atomic, conn->c_trans->xmit_atomic);
1351 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port,
1361 rds_send_queue_rm(rs, conn, cpath, rm,
1394 rds_message_put(rm);
1411 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1413 if (rm)
1414 rds_message_put(rm);
1430 struct rds_message *rm;
1434 rm = rds_message_alloc(0, GFP_ATOMIC);
1435 if (!rm) {
1440 rm->m_daddr = cp->cp_conn->c_faddr;
1441 rm->data.op_active = 1;
1450 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue);
1451 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1452 rds_message_addref(rm);
1453 rm->m_inc.i_conn = cp->cp_conn;
1454 rm->m_inc.i_conn_path = cp;
1456 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport,
1458 rm->m_inc.i_hdr.h_flags |= h_flags;
1466 rds_message_add_extension(&rm->m_inc.i_hdr,
1469 rds_message_add_extension(&rm->m_inc.i_hdr,
1485 rds_message_put(rm);
1489 if (rm)
1490 rds_message_put(rm);