Lines Matching refs:req
29 #include "req-inl.h"
268 uv_req_t* req;
276 req = &handle->recv_req;
277 memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
306 &req->u.io.overlapped,
310 /* Process the req without IOCP. */
312 req->u.io.overlapped.InternalHigh = bytes;
314 uv__insert_pending_req(loop, req);
316 /* The req will be processed with IOCP. */
320 /* Make this req pending reporting an error. */
321 SET_REQ_ERROR(req, WSAGetLastError());
322 uv__insert_pending_req(loop, req);
338 &req->u.io.overlapped,
342 /* Process the req without IOCP. */
344 req->u.io.overlapped.InternalHigh = bytes;
346 uv__insert_pending_req(loop, req);
348 /* The req will be processed with IOCP. */
352 /* Make this req pending reporting an error. */
353 SET_REQ_ERROR(req, WSAGetLastError());
354 uv__insert_pending_req(loop, req);
404 static int uv__send(uv_udp_send_t* req,
414 UV_REQ_INIT(req, UV_UDP_SEND);
415 req->handle = handle;
416 req->cb = cb;
417 memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
426 &req->u.io.overlapped,
431 req->u.io.queued_bytes = 0;
433 handle->send_queue_size += req->u.io.queued_bytes;
435 REGISTER_HANDLE_REQ(loop, handle, req);
436 uv__insert_pending_req(loop, (uv_req_t*)req);
439 req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
441 handle->send_queue_size += req->u.io.queued_bytes;
443 REGISTER_HANDLE_REQ(loop, handle, req);
454 uv_req_t* req) {
462 if (!REQ_SUCCESS(req)) {
463 DWORD err = GET_REQ_SOCK_ERROR(req);
490 partial = !REQ_SUCCESS(req);
492 req->u.io.overlapped.InternalHigh,
565 uv_udp_send_t* req) {
570 assert(handle->send_queue_size >= req->u.io.queued_bytes);
572 handle->send_queue_size -= req->u.io.queued_bytes;
575 UNREGISTER_HANDLE_REQ(loop, handle, req);
577 if (req->cb) {
579 if (!REQ_SUCCESS(req)) {
580 err = GET_REQ_SOCK_ERROR(req);
582 req->cb(req, uv_translate_sys_error(err));
1102 int uv__udp_send(uv_udp_send_t* req,
1125 err = uv__send(req, handle, bufs, nbufs, addr, addrlen, send_cb);