Lines Matching refs:server

44 alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 if (server == NULL) {
63 temp->server = server;
84 __le16 command = midEntry->server->vals->lock_cmd;
89 struct TCP_Server_Info *server = midEntry->server;
94 server->ops->handle_cancelled_mid)
95 server->ops->handle_cancelled_mid(midEntry, server);
110 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
111 server->slowest_cmd[smb_cmd] = roundtrip_time;
112 server->fastest_cmd[smb_cmd] = roundtrip_time;
114 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
115 server->slowest_cmd[smb_cmd] = roundtrip_time;
116 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
117 server->fastest_cmd[smb_cmd] = roundtrip_time;
119 cifs_stats_inc(&server->num_cmds[smb_cmd]);
120 server->time_per_cmd[smb_cmd] += roundtrip_time;
125 * busy server. Note that this calc is unlikely or impossible to wrap
140 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
162 spin_lock(&mid->server->mid_lock);
167 spin_unlock(&mid->server->mid_lock);
173 * smb_send_kvec - send an array of kvecs to the server
174 * @server: Server to send the data to
178 * Our basic "send data to server" function. Should be called with srv_mutex
182 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
187 struct socket *ssocket = server->ssocket;
191 if (server->noblocksnd)
204 * the server in SendReceive[2] for the server to send
219 (!server->noblocksnd && (retries > 2))) {
247 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
254 if (!is_smb1(server) && rqst->rq_nvec >= 2 &&
272 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
282 struct socket *ssocket = server->ssocket;
286 cifs_in_send_inc(server);
287 if (cifs_rdma_enabled(server)) {
290 if (server->smbd_conn)
291 rc = smbd_send(server, num_rqst, rqst);
310 send_length += smb_rqst_len(server, &rqst[j]);
316 * latency of system calls and overload a server with unnecessary
324 if (!is_smb1(server)) {
330 rc = smb_send_kvec(server, &smb_msg, &sent);
352 rc = smb_send_kvec(server, &smb_msg, &sent);
360 rc = smb_send_kvec(server, &smb_msg, &sent);
373 * the server we need to return success status to allow a corresponding
375 * to handle responses from the server by the client.
379 * won't be any response from the server to handle.
396 * socket so the server throws away the partial SMB
398 cifs_signal_cifsd_for_reconnect(server, false);
399 trace_smb3_partial_send_reconnect(server->CurrentMid,
400 server->conn_id, server->hostname);
409 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
412 cifs_signal_cifsd_for_reconnect(server, false);
416 cifs_in_send_dec(server);
427 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
436 return __smb_send_rqst(server, num_rqst, rqst);
441 if (!server->ops->init_transform_rq) {
457 rc = server->ops->init_transform_rq(server, num_rqst + 1,
462 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
470 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
482 return __smb_send_rqst(server, 1, &rqst);
486 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
505 credits = server->ops->get_credits_field(server, optype);
510 spin_lock(&server->req_lock);
513 server->in_flight++;
514 if (server->in_flight > server->max_in_flight)
515 server->max_in_flight = server->in_flight;
517 *instance = server->reconnect_instance;
519 in_flight = server->in_flight;
520 spin_unlock(&server->req_lock);
522 trace_smb3_nblk_credits(server->CurrentMid,
523 server->conn_id, server->hostname, scredits, -1, in_flight);
531 spin_unlock(&server->req_lock);
533 spin_lock(&server->srv_lock);
534 if (server->tcpStatus == CifsExiting) {
535 spin_unlock(&server->srv_lock);
538 spin_unlock(&server->srv_lock);
540 spin_lock(&server->req_lock);
543 spin_unlock(&server->req_lock);
545 cifs_num_waiters_inc(server);
546 rc = wait_event_killable_timeout(server->request_q,
547 has_credits(server, credits, num_credits), t);
548 cifs_num_waiters_dec(server);
550 spin_lock(&server->req_lock);
552 in_flight = server->in_flight;
553 spin_unlock(&server->req_lock);
555 trace_smb3_credit_timeout(server->CurrentMid,
556 server->conn_id, server->hostname, scredits,
564 spin_lock(&server->req_lock);
580 server->in_flight > 2 * MAX_COMPOUND &&
582 spin_unlock(&server->req_lock);
584 cifs_num_waiters_inc(server);
586 server->request_q,
587 has_credits(server, credits,
590 cifs_num_waiters_dec(server);
592 spin_lock(&server->req_lock);
594 in_flight = server->in_flight;
595 spin_unlock(&server->req_lock);
598 server->CurrentMid,
599 server->conn_id, server->hostname,
607 spin_lock(&server->req_lock);
613 * as they are allowed to block on server.
616 /* update # of requests on the wire to server */
619 server->in_flight += num_credits;
620 if (server->in_flight > server->max_in_flight)
621 server->max_in_flight = server->in_flight;
622 *instance = server->reconnect_instance;
625 in_flight = server->in_flight;
626 spin_unlock(&server->req_lock);
628 trace_smb3_waitff_credits(server->CurrentMid,
629 server->conn_id, server->hostname, scredits,
640 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
643 return wait_for_free_credits(server, 1, -1, flags,
648 wait_for_compound_request(struct TCP_Server_Info *server, int num,
654 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
656 spin_lock(&server->req_lock);
658 in_flight = server->in_flight;
662 * If the server is tight on resources or just gives us less
664 * order and the server delays granting more credits until it
671 * server will return enough credits to satisfy this compound
677 if (server->in_flight == 0) {
678 spin_unlock(&server->req_lock);
679 trace_smb3_insufficient_credits(server->CurrentMid,
680 server->conn_id, server->hostname, scredits,
687 spin_unlock(&server->req_lock);
689 return wait_for_free_credits(server, num, 60000, flags,
694 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
699 credits->instance = server->reconnect_instance;
726 *ppmidQ = alloc_mid(in_buf, ses->server);
729 spin_lock(&ses->server->mid_lock);
730 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
731 spin_unlock(&ses->server->mid_lock);
736 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
740 error = wait_event_state(server->response_q,
751 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
761 /* enable signing if server requires it */
762 if (server->sign)
765 mid = alloc_mid(hdr, server);
769 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
783 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
797 rc = wait_for_free_request(server, flags, &instance);
805 cifs_server_lock(server);
812 if (instance != server->reconnect_instance) {
813 cifs_server_unlock(server);
814 add_credits_and_wake_if(server, &credits, optype);
818 mid = server->ops->setup_async_request(server, rqst);
820 cifs_server_unlock(server);
821 add_credits_and_wake_if(server, &credits, optype);
832 spin_lock(&server->mid_lock);
833 list_add_tail(&mid->qhead, &server->pending_mid_q);
834 spin_unlock(&server->mid_lock);
841 rc = smb_send_rqst(server, 1, rqst, flags);
844 revert_current_mid(server, mid->credits);
845 server->sequence_number -= 2;
849 cifs_server_unlock(server);
854 add_credits_and_wake_if(server, &credits, optype);
886 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
893 spin_lock(&server->mid_lock);
896 spin_unlock(&server->mid_lock);
916 spin_unlock(&server->mid_lock);
923 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
926 return server->ops->send_cancel ?
927 server->ops->send_cancel(server, rqst, mid) : 0;
931 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
939 if (server->sign) {
950 rc = cifs_verify_signature(&rqst, server,
976 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
987 struct TCP_Server_Info *server = mid->server;
990 credits.value = server->ops->get_credits(mid);
991 credits.instance = server->reconnect_instance;
993 add_credits(server, &credits, mid->optype);
1024 struct TCP_Server_Info *server = NULL;
1032 server = ses->chans[i].server;
1033 if (!server || server->terminate)
1041 * server->in_flight. But it shouldn't matter much here if we
1047 if (server->in_flight < min_in_flight) {
1048 min_in_flight = server->in_flight;
1051 if (server->in_flight > max_in_flight)
1052 max_in_flight = server->in_flight;
1062 return ses->chans[index].server;
1067 struct TCP_Server_Info *server,
1085 if (!ses || !ses->server || !server) {
1090 spin_lock(&server->srv_lock);
1091 if (server->tcpStatus == CifsExiting) {
1092 spin_unlock(&server->srv_lock);
1095 spin_unlock(&server->srv_lock);
1100 * credits if the server doesn't grant credits to the outstanding
1105 rc = wait_for_compound_request(server, num_rqst, flags,
1121 cifs_server_lock(server);
1130 if (instance != server->reconnect_instance) {
1131 cifs_server_unlock(server);
1133 add_credits(server, &credits[j], optype);
1138 midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1140 revert_current_mid(server, i);
1143 cifs_server_unlock(server);
1145 /* Update # of requests on wire to server */
1147 add_credits(server, &credits[j], optype);
1163 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1169 revert_current_mid(server, num_rqst);
1170 server->sequence_number -= 2;
1173 cifs_server_unlock(server);
1181 add_credits(server, &credits[i], optype);
1187 * that any credits taken from the server structure on the client have
1189 * we will collect credits granted by the server in the mid callbacks
1190 * and add those credits to the server structure.
1200 cifs_server_lock(server);
1201 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
1202 cifs_server_unlock(server);
1209 rc = wait_for_response(server, midQ[i]);
1217 send_cancel(server, &rqst[i], midQ[i]);
1218 spin_lock(&server->mid_lock);
1226 spin_unlock(&server->mid_lock);
1234 rc = cifs_sync_mid_result(midQ[i], server);
1251 HEADER_PREAMBLE_SIZE(server);
1258 rc = server->ops->check_receive(midQ[i], server,
1277 cifs_server_lock(server);
1278 smb311_update_preauth_hash(ses, server, &iov, 1);
1279 cifs_server_unlock(server);
1301 struct TCP_Server_Info *server,
1305 return compound_send_recv(xid, ses, server, flags, 1,
1341 rc = cifs_send_recv(xid, ses, ses->server,
1359 struct TCP_Server_Info *server;
1365 server = ses->server;
1366 if (server == NULL) {
1371 spin_lock(&server->srv_lock);
1372 if (server->tcpStatus == CifsExiting) {
1373 spin_unlock(&server->srv_lock);
1376 spin_unlock(&server->srv_lock);
1379 to the same server. We may make this configurable later or
1388 rc = wait_for_free_request(server, flags, &credits.instance);
1396 cifs_server_lock(server);
1400 cifs_server_unlock(server);
1401 /* Update # of requests on wire to server */
1402 add_credits(server, &credits, 0);
1406 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1408 cifs_server_unlock(server);
1414 rc = smb_send(server, in_buf, len);
1418 server->sequence_number -= 2;
1420 cifs_server_unlock(server);
1425 rc = wait_for_response(server, midQ);
1427 send_cancel(server, &rqst, midQ);
1428 spin_lock(&server->mid_lock);
1433 spin_unlock(&server->mid_lock);
1434 add_credits(server, &credits, 0);
1437 spin_unlock(&server->mid_lock);
1440 rc = cifs_sync_mid_result(midQ, server);
1442 add_credits(server, &credits, 0);
1455 rc = cifs_check_receive(midQ, server, 0);
1458 add_credits(server, &credits, 0);
1482 pSMB->hdr.Mid = get_next_mid(ses->server);
1501 struct TCP_Server_Info *server;
1508 server = ses->server;
1510 if (server == NULL) {
1515 spin_lock(&server->srv_lock);
1516 if (server->tcpStatus == CifsExiting) {
1517 spin_unlock(&server->srv_lock);
1520 spin_unlock(&server->srv_lock);
1523 to the same server. We may make this configurable later or
1532 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1540 cifs_server_lock(server);
1544 cifs_server_unlock(server);
1548 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1551 cifs_server_unlock(server);
1556 rc = smb_send(server, in_buf, len);
1560 server->sequence_number -= 2;
1562 cifs_server_unlock(server);
1570 rc = wait_event_interruptible(server->response_q,
1573 ((server->tcpStatus != CifsGood) &&
1574 (server->tcpStatus != CifsNew)));
1577 spin_lock(&server->srv_lock);
1581 ((server->tcpStatus == CifsGood) ||
1582 (server->tcpStatus == CifsNew))) {
1583 spin_unlock(&server->srv_lock);
1588 rc = send_cancel(server, &rqst, midQ);
1607 rc = wait_for_response(server, midQ);
1609 send_cancel(server, &rqst, midQ);
1610 spin_lock(&server->mid_lock);
1615 spin_unlock(&server->mid_lock);
1618 spin_unlock(&server->mid_lock);
1623 spin_lock(&server->srv_lock);
1625 spin_unlock(&server->srv_lock);
1627 rc = cifs_sync_mid_result(midQ, server);
1640 rc = cifs_check_receive(midQ, server, 0);
1653 cifs_discard_remaining_data(struct TCP_Server_Info *server)
1655 unsigned int rfclen = server->pdu_size;
1656 size_t remaining = rfclen + HEADER_PREAMBLE_SIZE(server) -
1657 server->total_read;
1662 length = cifs_discard_from_socket(server,
1664 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1667 server->total_read += length;
1675 __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid,
1680 length = cifs_discard_remaining_data(server);
1682 mid->resp_buf = server->smallbuf;
1683 server->smallbuf = NULL;
1688 cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1692 return __cifs_readv_discard(server, mid, rdata->result);
1696 cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1701 char *buf = server->smallbuf;
1702 unsigned int buflen = server->pdu_size + HEADER_PREAMBLE_SIZE(server);
1713 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1714 HEADER_SIZE(server) + 1;
1716 length = cifs_read_from_socket(server,
1717 buf + HEADER_SIZE(server) - 1, len);
1720 server->total_read += length;
1722 if (server->ops->is_session_expired &&
1723 server->ops->is_session_expired(buf)) {
1724 cifs_reconnect(server, true);
1728 if (server->ops->is_status_pending &&
1729 server->ops->is_status_pending(buf, server)) {
1730 cifs_discard_remaining_data(server);
1736 rdata->iov[0].iov_len = HEADER_PREAMBLE_SIZE(server);
1737 rdata->iov[1].iov_base = buf + HEADER_PREAMBLE_SIZE(server);
1739 server->total_read - HEADER_PREAMBLE_SIZE(server);
1746 rdata->result = server->ops->map_error(buf, false);
1748 cifs_dbg(FYI, "%s: server returned error %d\n",
1751 return __cifs_readv_discard(server, mid, false);
1755 if (server->total_read < server->vals->read_rsp_size) {
1756 cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n",
1757 __func__, server->total_read,
1758 server->vals->read_rsp_size);
1760 return cifs_readv_discard(server, mid);
1763 data_offset = server->ops->read_data_offset(buf) +
1764 HEADER_PREAMBLE_SIZE(server);
1765 if (data_offset < server->total_read) {
1773 data_offset = server->total_read;
1779 return cifs_readv_discard(server, mid);
1783 __func__, server->total_read, data_offset);
1785 len = data_offset - server->total_read;
1788 length = cifs_read_from_socket(server,
1789 buf + server->total_read, len);
1792 server->total_read += length;
1799 data_len = server->ops->read_data_length(buf, use_rdma_mr);
1803 return cifs_readv_discard(server, mid);
1811 length = cifs_read_iter_from_socket(server, &rdata->iter,
1815 server->total_read += length;
1818 server->total_read, buflen, data_len);
1821 if (server->total_read < buflen)
1822 return cifs_readv_discard(server, mid);
1825 mid->resp_buf = server->smallbuf;
1826 server->smallbuf = NULL;