This allows the client to postpone the initial I/O until the server has queued the I/O request. The server should perform the postprocessing only after the initial I/O has been done.
In the case of send_socket, the manipulation of event flags shall ideally be done *after* (not *before*) the client has attempted the initial I/O, since the outbound queue status of the socket may change due to I/O. Also, the implicitly bound address is available only after the send* system call has been performed.
Signed-off-by: Jinoh Kang jinoh.kang.kr@gmail.com --- server/sock.c | 43 ++++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-)
diff --git a/server/sock.c b/server/sock.c index e2717b2c867..c769de05f8a 100644 --- a/server/sock.c +++ b/server/sock.c @@ -3471,16 +3471,9 @@ DECL_HANDLER(recv_socket) release_object( sock ); }
-DECL_HANDLER(send_socket) +static void send_socket_initial_callback( void *private, unsigned int status ) { - struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops ); - unsigned int status = req->status; - timeout_t timeout = 0; - struct async *async; - struct fd *fd; - - if (!sock) return; - fd = sock->fd; + struct sock *sock = (struct sock *)private;
if (sock->type == WS_SOCK_DGRAM) { @@ -3488,7 +3481,7 @@ DECL_HANDLER(send_socket) union unix_sockaddr unix_addr; socklen_t unix_len = sizeof(unix_addr);
- if (!sock->bound && !getsockname( get_unix_fd( fd ), &unix_addr.addr, &unix_len )) + if (!sock->bound && !getsockname( get_unix_fd( sock->fd ), &unix_addr.addr, &unix_len )) sock->addr_len = sockaddr_from_unix( &unix_addr, &sock->addr.addr, sizeof(sock->addr) ); sock->bound = 1; } @@ -3500,12 +3493,37 @@ DECL_HANDLER(send_socket) sock->reported_events &= ~AFD_POLL_WRITE; }
+ sock_reselect( sock ); + release_object( sock ); +} + +DECL_HANDLER(send_socket) +{ + struct sock *sock = (struct sock *)get_handle_obj( current->process, req->async.handle, 0, &sock_ops ); + unsigned int status = req->status; + timeout_t timeout = 0; + struct async *async; + struct fd *fd; + + if (!sock) return; + fd = sock->fd; + /* If we had a short write and the socket is nonblocking (and the client is * not trying to force the operation to be asynchronous), return success. * Windows actually refuses to send any data in this case, and returns * EWOULDBLOCK, but we have no way of doing that. */ if (status == STATUS_DEVICE_NOT_READY && req->total && sock->nonblocking) + { + /* send() calls only clear and reselect events if unsuccessful. + * + * Since send_socket_initial_callback will observe success status (set + * by us) and skip clearing events, We shall clear them here. + */ + sock->pending_events &= ~AFD_POLL_WRITE; + sock->reported_events &= ~AFD_POLL_WRITE; + status = STATUS_SUCCESS; + }
/* send() returned EWOULDBLOCK or a short write, i.e. cannot send all data yet */ if (status == STATUS_DEVICE_NOT_READY && !sock->nonblocking) @@ -3535,15 +3553,14 @@ DECL_HANDLER(send_socket) } set_error( status );
+ async_set_initial_status_callback( async, send_socket_initial_callback, grab_object( sock )); + if (timeout) async_set_timeout( async, timeout, STATUS_IO_TIMEOUT );
if (status == STATUS_PENDING) queue_async( &sock->write_q, async );
- /* always reselect; we changed reported_events above */ - sock_reselect( sock ); - reply->wait = async_handoff( async, NULL, 0 ); reply->options = get_fd_options( fd ); release_object( async );