Lines Matching refs:rm
51 void rds_message_addref(struct rds_message *rm)
53 rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
54 refcount_inc(&rm->m_refcount);
130 static void rds_message_purge(struct rds_message *rm)
135 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
138 spin_lock_irqsave(&rm->m_rs_lock, flags);
139 if (rm->m_rs) {
140 struct rds_sock *rs = rm->m_rs;
142 if (rm->data.op_mmp_znotifier) {
144 rds_rm_zerocopy_callback(rs, rm->data.op_mmp_znotifier);
146 rm->data.op_mmp_znotifier = NULL;
149 rm->m_rs = NULL;
151 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
153 for (i = 0; i < rm->data.op_nents; i++) {
156 __free_page(sg_page(&rm->data.op_sg[i]));
158 put_page(sg_page(&rm->data.op_sg[i]));
160 rm->data.op_nents = 0;
162 if (rm->rdma.op_active)
163 rds_rdma_free_op(&rm->rdma);
164 if (rm->rdma.op_rdma_mr)
165 kref_put(&rm->rdma.op_rdma_mr->r_kref, __rds_put_mr_final);
167 if (rm->atomic.op_active)
168 rds_atomic_free_op(&rm->atomic);
169 if (rm->atomic.op_rdma_mr)
170 kref_put(&rm->atomic.op_rdma_mr->r_kref, __rds_put_mr_final);
173 void rds_message_put(struct rds_message *rm)
175 rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
176 WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
177 if (refcount_dec_and_test(&rm->m_refcount)) {
178 BUG_ON(!list_empty(&rm->m_sock_item));
179 BUG_ON(!list_empty(&rm->m_conn_item));
180 rds_message_purge(rm);
182 kfree(rm);
286 struct rds_message *rm;
291 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
292 if (!rm)
295 rm->m_used_sgs = 0;
296 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
298 refcount_set(&rm->m_refcount, 1);
299 INIT_LIST_HEAD(&rm->m_sock_item);
300 INIT_LIST_HEAD(&rm->m_conn_item);
301 spin_lock_init(&rm->m_rs_lock);
302 init_waitqueue_head(&rm->m_flush_wait);
305 return rm;
309 * RDS ops use this to grab SG entries from the rm's sg pool.
311 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
313 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
321 if (rm->m_used_sgs + nents > rm->m_total_sgs) {
323 rm->m_total_sgs, rm->m_used_sgs, nents);
327 sg_ret = &sg_first[rm->m_used_sgs];
329 rm->m_used_sgs += nents;
336 struct rds_message *rm;
341 rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
342 if (!rm)
345 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
346 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
347 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
348 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
349 if (IS_ERR(rm->data.op_sg)) {
350 void *err = ERR_CAST(rm->data.op_sg);
351 rds_message_put(rm);
355 for (i = 0; i < rm->data.op_nents; ++i) {
356 sg_set_page(&rm->data.op_sg[i],
361 return rm;
364 static int rds_message_zcopy_from_user(struct rds_message *rm, struct iov_iter *from)
371 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
376 sg = rm->data.op_sg;
382 rm->data.op_mmp_znotifier = &info->znotif;
383 if (mm_account_pinned_pages(&rm->data.op_mmp_znotifier->z_mmp,
399 for (i = 0; i < rm->data.op_nents; i++)
400 put_page(sg_page(&rm->data.op_sg[i]));
401 mmp = &rm->data.op_mmp_znotifier->z_mmp;
408 rm->data.op_nents++;
415 rm->data.op_mmp_znotifier = NULL;
419 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
427 rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
430 sg = rm->data.op_sg;
434 return rds_message_zcopy_from_user(rm, from);
442 rm->data.op_nents++;
466 struct rds_message *rm;
474 rm = container_of(inc, struct rds_message, m_inc);
475 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
477 sg = rm->data.op_sg;
508 void rds_message_wait(struct rds_message *rm)
510 wait_event_interruptible(rm->m_flush_wait,
511 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
514 void rds_message_unmapped(struct rds_message *rm)
516 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
517 wake_up_interruptible(&rm->m_flush_wait);