Lines Matching refs:iov

752 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
754 struct smb2_create_req *req = iov[0].iov_base;
757 iov[num].iov_base = create_posix_buf(mode);
760 if (iov[num].iov_base == NULL)
762 iov[num].iov_len = sizeof(struct create_posix);
766 iov[num - 1].iov_len);
794 struct kvec iov[1];
865 iov[0].iov_base = (char *)req;
866 iov[0].iov_len = total_len;
869 rqst.rq_iov = iov;
1182 struct kvec iov[2];
1235 sess_data->iov[0].iov_base = (char *)req;
1237 sess_data->iov[0].iov_len = total_len - 1;
1250 free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
1259 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
1265 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1268 rqst.rq_iov = sess_data->iov;
1277 cifs_small_buf_release(sess_data->iov[0].iov_base);
1278 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
1366 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1367 sess_data->iov[1].iov_len = msg->secblob_len;
1373 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1444 sess_data->iov[1].iov_base = ntlmssp_blob;
1445 sess_data->iov[1].iov_len = blob_length;
1448 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1508 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
1524 sess_data->iov[1].iov_base = ntlmssp_blob;
1525 sess_data->iov[1].iov_len = blob_length;
1531 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1645 struct kvec iov[1];
1675 iov[0].iov_base = (char *)req;
1676 iov[0].iov_len = total_len;
1679 rqst.rq_iov = iov;
1716 struct kvec iov[2];
1758 iov[0].iov_base = (char *)req;
1760 iov[0].iov_len = total_len - 1;
1765 iov[1].iov_base = unc_path;
1766 iov[1].iov_len = unc_path_len;
1781 rqst.rq_iov = iov;
1859 struct kvec iov[1];
1884 iov[0].iov_base = (char *)req;
1885 iov[0].iov_len = total_len;
1888 rqst.rq_iov = iov;
2055 add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
2058 struct smb2_create_req *req = iov[0].iov_base;
2061 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
2062 if (iov[num].iov_base == NULL)
2064 iov[num].iov_len = server->vals->create_lease_size;
2069 iov[num - 1].iov_len);
2147 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
2150 struct smb2_create_req *req = iov[0].iov_base;
2153 iov[num].iov_base = create_durable_v2_buf(oparms);
2154 if (iov[num].iov_base == NULL)
2156 iov[num].iov_len = sizeof(struct create_durable_v2);
2160 iov[1].iov_len);
2167 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2170 struct smb2_create_req *req = iov[0].iov_base;
2176 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2177 if (iov[num].iov_base == NULL)
2179 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2183 iov[1].iov_len);
2191 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2194 struct smb2_create_req *req = iov[0].iov_base;
2199 return add_durable_reconnect_v2_context(iov, num_iovec,
2202 return add_durable_v2_context(iov, num_iovec, oparms);
2206 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2210 iov[num].iov_base = create_durable_buf();
2211 if (iov[num].iov_base == NULL)
2213 iov[num].iov_len = sizeof(struct create_durable);
2217 iov[1].iov_len);
2250 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2252 struct smb2_create_req *req = iov[0].iov_base;
2255 iov[num].iov_base = create_twarp_buf(timewarp);
2256 if (iov[num].iov_base == NULL)
2258 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2262 iov[num - 1].iov_len);
2384 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
2386 struct smb2_create_req *req = iov[0].iov_base;
2390 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
2391 if (iov[num].iov_base == NULL)
2393 iov[num].iov_len = len;
2397 iov[num - 1].iov_len);
2427 add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2429 struct smb2_create_req *req = iov[0].iov_base;
2432 iov[num].iov_base = create_query_id_buf();
2433 if (iov[num].iov_base == NULL)
2435 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2439 iov[num - 1].iov_len);
2499 struct kvec iov[3]; /* make sure at least one for each open context */
2544 iov[0].iov_base = (char *)req;
2546 iov[0].iov_len = total_len - 1;
2593 iov[1].iov_len = uni_path_len;
2594 iov[1].iov_base = utf16_path;
2599 rc = add_posix_context(iov, &n_iov, mode);
2602 pc_buf = iov[n_iov-1].iov_base;
2607 rqst.rq_iov = iov;
2654 struct kvec *iov = rqst->rq_iov;
2663 iov[0].iov_base = (char *)req;
2665 iov[0].iov_len = total_len - 1;
2718 iov[1].iov_len = uni_path_len;
2719 iov[1].iov_base = path;
2731 rc = add_lease_context(server, iov, &n_iov,
2741 (struct create_context *)iov[n_iov-1].iov_base;
2746 rc = add_durable_context(iov, &n_iov, oparms,
2755 (struct create_context *)iov[n_iov-1].iov_base;
2757 cpu_to_le32(iov[n_iov-1].iov_len);
2760 rc = add_posix_context(iov, &n_iov, oparms->mode);
2769 (struct create_context *)iov[n_iov-1].iov_base;
2771 cpu_to_le32(iov[n_iov-1].iov_len);
2774 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
2799 (struct create_context *)iov[n_iov-1].iov_base;
2800 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2804 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
2812 (struct create_context *)iov[n_iov-1].iov_base;
2813 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2815 add_query_id_context(iov, &n_iov);
2848 struct kvec iov[SMB2_CREATE_IOV_SIZE];
2862 memset(&iov, 0, sizeof(iov));
2863 rqst.rq_iov = iov;
2934 struct kvec *iov = rqst->rq_iov;
2960 iov[0].iov_base = (char *)req;
2975 iov[0].iov_len = total_len - 1;
2976 iov[1].iov_base = in_data_buf;
2977 iov[1].iov_len = indatalen;
2980 iov[0].iov_len = total_len;
3041 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
3071 memset(&iov, 0, sizeof(iov));
3072 rqst.rq_iov = iov;
3175 struct kvec *iov = rqst->rq_iov;
3190 iov[0].iov_base = (char *)req;
3191 iov[0].iov_len = total_len;
3212 struct kvec iov[1];
3228 memset(&iov, 0, sizeof(iov));
3229 rqst.rq_iov = iov;
3290 struct kvec *iov, unsigned int min_buf_size)
3292 unsigned int smb_len = iov->iov_len;
3293 char *end_of_smb = smb_len + (char *)iov->iov_base;
3294 char *begin_of_buf = offset + (char *)iov->iov_base;
3325 struct kvec *iov, unsigned int minbufsize,
3328 char *begin_of_buf = offset + (char *)iov->iov_base;
3334 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
3351 struct kvec *iov = rqst->rq_iov;
3379 iov[0].iov_base = (char *)req;
3381 iov[0].iov_len = len;
3400 struct kvec iov[1];
3421 memset(&iov, 0, sizeof(iov));
3422 rqst.rq_iov = iov;
3539 struct kvec *iov = rqst->rq_iov;
3559 iov[0].iov_base = (char *)req;
3560 iov[0].iov_len = total_len;
3573 struct kvec iov[1];
3587 memset(&iov, 0, sizeof(iov));
3588 rqst.rq_iov = iov;
3717 struct kvec iov[1];
3718 struct smb_rqst rqst = { .rq_iov = iov,
3737 iov[0].iov_len = total_len;
3738 iov[0].iov_base = (char *)req;
3762 struct kvec *iov = rqst->rq_iov;
3774 iov[0].iov_base = (char *)req;
3775 iov[0].iov_len = total_len;
3786 struct kvec iov[1];
3801 memset(&iov, 0, sizeof(iov));
3802 rqst.rq_iov = iov;
3934 (struct smb2_sync_hdr *)rdata->iov[0].iov_base;
3936 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
4025 struct smb_rqst rqst = { .rq_iov = rdata->iov,
4053 rdata->iov[0].iov_base = buf;
4054 rdata->iov[0].iov_len = total_len;
4097 struct kvec iov[1];
4114 iov[0].iov_base = (char *)req;
4115 iov[0].iov_len = total_len;
4118 rqst.rq_iov = iov;
4265 struct kvec iov[1];
4337 iov[0].iov_len = total_len - 1;
4338 iov[0].iov_base = (char *)req;
4340 rqst.rq_iov = iov;
4349 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
4394 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
4396 * elements with data to write that begins with position 1 in iov array. All
4401 unsigned int *nbytes, struct kvec *iov, int n_vec)
4449 iov[0].iov_base = (char *)req;
4451 iov[0].iov_len = total_len - 1;
4454 rqst.rq_iov = iov;
4645 struct kvec *iov = rqst->rq_iov;
4688 iov[0].iov_base = (char *)req;
4690 iov[0].iov_len = total_len - 1;
4692 iov[1].iov_base = (char *)(req->Buffer);
4693 iov[1].iov_len = len;
4786 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
4802 memset(&iov, 0, sizeof(iov));
4803 rqst.rq_iov = iov;
4858 struct kvec *iov = rqst->rq_iov;
4881 iov[0].iov_base = (char *)req;
4883 iov[0].iov_len = total_len - 1;
4887 iov[i].iov_base = (char *)data[i];
4888 iov[i].iov_len = size[i];
4909 struct kvec *iov;
4926 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
4927 if (!iov)
4931 rqst.rq_iov = iov;
4939 kfree(iov);
4957 kfree(iov);
5011 struct kvec iov[1];
5031 iov[0].iov_base = (char *)req;
5032 iov[0].iov_len = total_len;
5035 rqst.rq_iov = iov;
5082 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5111 iov->iov_base = (char *)req;
5112 iov->iov_len = total_len;
5116 static inline void free_qfs_info_req(struct kvec *iov)
5118 cifs_buf_release(iov->iov_base);
5127 struct kvec iov;
5136 rc = build_qfs_info_req(&iov, tcon, server,
5147 rqst.rq_iov = &iov;
5152 free_qfs_info_req(&iov);
5178 struct kvec iov;
5187 rc = build_qfs_info_req(&iov, tcon, server,
5198 rqst.rq_iov = &iov;
5203 free_qfs_info_req(&iov);
5229 struct kvec iov;
5255 rc = build_qfs_info_req(&iov, tcon, server,
5265 rqst.rq_iov = &iov;
5270 free_qfs_info_req(&iov);
5316 struct kvec iov[2];
5342 iov[0].iov_base = (char *)req;
5343 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
5344 iov[1].iov_base = (char *)buf;
5345 iov[1].iov_len = count;
5350 rqst.rq_iov = iov;
5394 struct kvec iov[1];
5419 iov[0].iov_base = (char *)req;
5420 iov[0].iov_len = total_len;
5423 rqst.rq_iov = iov;