Lines Matching defs:new_op
23 struct orangefs_kernel_op_s *new_op;
31 new_op = op_alloc(ORANGEFS_VFS_OP_RA_FLUSH);
32 if (!new_op)
34 new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn;
36 ret = service_operation(new_op, "orangefs_flush_racache",
42 op_release(new_op);
56 struct orangefs_kernel_op_s *new_op = NULL;
63 new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
64 if (!new_op)
68 new_op->upcall.req.io.readahead_size = readahead_size;
69 new_op->upcall.req.io.io_type = type;
70 new_op->upcall.req.io.refn = orangefs_inode->refn;
86 new_op,
89 new_op->uses_shared_memory = 1;
90 new_op->upcall.req.io.buf_index = buffer_index;
91 new_op->upcall.req.io.count = total_size;
92 new_op->upcall.req.io.offset = *offset;
94 new_op->upcall.uid = from_kuid(&init_user_ns, wr->uid);
95 new_op->upcall.gid = from_kgid(&init_user_ns, wr->gid);
126 new_op->upcall.uid = 0;
128 new_op->upcall.uid = 0;
153 llu(new_op->tag));
156 ret = service_operation(new_op,
171 if (ret == -EAGAIN && op_state_purged(new_op)) {
194 switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) {
220 new_op->op_state);
227 new_op->op_state,
228 new_op);
236 if (orangefs_cancel_op_in_progress(new_op))
245 if (type == ORANGEFS_IO_READ && new_op->downcall.resp.io.amt_complete) {
252 copy_amount = new_op->downcall.resp.io.amt_complete;
267 (int)new_op->downcall.resp.io.amt_complete);
269 ret = new_op->downcall.resp.io.amt_complete;
278 op_release(new_op);
464 struct orangefs_kernel_op_s *new_op = NULL;
471 new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
472 if (!new_op)
474 new_op->upcall.req.fsync.refn = orangefs_inode->refn;
476 ret = service_operation(new_op,
484 op_release(new_op);