v2-v5: Fix some corner cases.
Signed-off-by: Guillaume Charifi guillaume.charifi@sfr.fr --- dlls/ntdll/unix/socket.c | 6 +-- include/wine/afd.h | 2 +- include/wine/server_protocol.h | 5 ++- server/protocol.def | 1 + server/request.h | 1 + server/sock.c | 75 ++++++++++++++++++++++++++++++++-- server/trace.c | 3 +- 7 files changed, 83 insertions(+), 10 deletions(-)
diff --git a/dlls/ntdll/unix/socket.c b/dlls/ntdll/unix/socket.c index 8469def786a..b1b90b363ee 100644 --- a/dlls/ntdll/unix/socket.c +++ b/dlls/ntdll/unix/socket.c @@ -747,12 +747,11 @@ static NTSTATUS sock_poll( HANDLE handle, HANDLE event, PIO_APC_ROUTINE apc, voi || in_size < offsetof( struct afd_poll_params, sockets[params->count] )) return STATUS_INVALID_PARAMETER;
- TRACE( "timeout %s, count %u, unknown %#x, padding (%#x, %#x, %#x), sockets[0] {%04lx, %#x}\n", - wine_dbgstr_longlong(params->timeout), params->count, params->unknown, + TRACE( "timeout %s, count %u, exclusive %u, padding (%#x, %#x, %#x), sockets[0] {%04lx, %#x}\n", + wine_dbgstr_longlong(params->timeout), params->count, params->exclusive, params->padding[0], params->padding[1], params->padding[2], params->sockets[0].socket, params->sockets[0].flags );
- if (params->unknown) FIXME( "unknown boolean is %#x\n", params->unknown ); if (params->padding[0]) FIXME( "padding[0] is %#x\n", params->padding[0] ); if (params->padding[1]) FIXME( "padding[1] is %#x\n", params->padding[1] ); if (params->padding[2]) FIXME( "padding[2] is %#x\n", params->padding[2] ); @@ -793,6 +792,7 @@ static NTSTATUS sock_poll( HANDLE handle, HANDLE event, PIO_APC_ROUTINE apc, voi SERVER_START_REQ( poll_socket ) { req->async = server_async( handle, &async->io, event, apc, apc_user, iosb_client_ptr(io) ); + req->exclusive = !!params->exclusive; req->timeout = params->timeout; wine_server_add_data( req, input, params->count * sizeof(*input) ); wine_server_set_reply( req, async->sockets, params->count * sizeof(async->sockets[0]) ); diff --git a/include/wine/afd.h b/include/wine/afd.h index e67ecae25a9..f4682f464e8 100644 --- a/include/wine/afd.h +++ b/include/wine/afd.h @@ -104,7 +104,7 @@ struct afd_poll_params { LONGLONG timeout; unsigned int count; - BOOLEAN unknown; + BOOLEAN exclusive; BOOLEAN padding[3]; struct { diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h index bb4862c9669..14518f0bf6f 100644 --- a/include/wine/server_protocol.h +++ b/include/wine/server_protocol.h @@ -1761,7 +1761,8 @@ struct poll_socket_output struct poll_socket_request { struct request_header __header; - char __pad_12[4]; + char exclusive; + char __pad_13[3]; async_data_t async; timeout_t timeout; /* VARARG(sockets,poll_socket_input); */ @@ -6252,7 +6253,7 @@ union generic_reply
/* ### protocol_version begin ### */
-#define SERVER_PROTOCOL_VERSION 726 +#define SERVER_PROTOCOL_VERSION 727
/* ### protocol_version end ### */
diff --git a/server/protocol.def b/server/protocol.def index 133d6ad0552..8eefdaae17f 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -1450,6 +1450,7 @@ struct poll_socket_output
/* Perform an async poll on a socket */ @REQ(poll_socket) + char exclusive; async_data_t async; /* async I/O parameters */ timeout_t timeout; /* timeout */ VARARG(sockets,poll_socket_input); /* list of sockets to poll */ diff --git a/server/request.h b/server/request.h index 18a6a2df3c7..f1c8f1c5432 100644 --- a/server/request.h +++ b/server/request.h @@ -1045,6 +1045,7 @@ C_ASSERT( sizeof(struct recv_socket_request) == 64 ); C_ASSERT( FIELD_OFFSET(struct recv_socket_reply, wait) == 8 ); C_ASSERT( FIELD_OFFSET(struct recv_socket_reply, options) == 12 ); C_ASSERT( sizeof(struct recv_socket_reply) == 16 ); +C_ASSERT( FIELD_OFFSET(struct poll_socket_request, exclusive) == 12 ); C_ASSERT( FIELD_OFFSET(struct poll_socket_request, async) == 16 ); C_ASSERT( FIELD_OFFSET(struct poll_socket_request, timeout) == 56 ); C_ASSERT( sizeof(struct poll_socket_request) == 64 ); diff --git a/server/sock.c b/server/sock.c index 50bfc08e145..7cdbed940c6 100644 --- a/server/sock.c +++ b/server/sock.c @@ -128,6 +128,7 @@ struct poll_req struct async *async; struct iosb *iosb; struct timeout_user *timeout; + char exclusive; unsigned int count; struct poll_socket_output *output; struct @@ -2853,8 +2854,8 @@ static int poll_single_socket( struct sock *sock, int mask ) return get_poll_flags( sock, pollfd.revents ) & mask; }
-static int poll_socket( struct sock *poll_sock, struct async *async, timeout_t timeout, - unsigned int count, const struct poll_socket_input *input ) +static int poll_socket( struct sock *poll_sock, struct async *async, char exclusive, + timeout_t timeout, unsigned int count, const struct poll_socket_input *input ) { struct poll_socket_output *output; struct poll_req *req; @@ -2893,11 +2894,79 @@ static int poll_socket( struct sock *poll_sock, struct async *async, timeout_t t req->sockets[i].flags = input[i].flags; }
+ req->exclusive = exclusive; req->count = count; req->async = (struct async *)grab_object( async ); req->iosb = async_get_iosb( async ); req->output = output;
+ if (exclusive) + { + int has_shared = FALSE; + struct poll_req *areq; + + LIST_FOR_EACH_ENTRY( areq, &poll_list, struct poll_req, entry ) + { + int skip_areq = FALSE; + + for (i = 0; i < areq->count; ++i) + { + struct sock *asock = areq->sockets[i].sock; + + for (j = 0; j < req->count; ++j) + { + if (asock == req->sockets[j].sock) + { + if (!areq->exclusive) + has_shared = TRUE; + + skip_areq = TRUE; + break; + } + } + + if (skip_areq || has_shared) + break; + } + + if (has_shared) + break; + } + + LIST_FOR_EACH_ENTRY( areq, &poll_list, struct poll_req, entry ) + { + int areq_term = FALSE; + + if (has_shared) + break; + + for (i = 0; i < areq->count; ++i) + { + struct sock *asock = areq->sockets[i].sock; + + for (j = 0; j < req->count; ++j) + { + if (asock == req->sockets[j].sock) + { + areq_term = TRUE; + break; + } + } + + if (areq_term) + break; + } + + if (areq_term) + { + areq->iosb->status = STATUS_SUCCESS; + areq->iosb->out_data = areq->output; + areq->iosb->out_size = areq->count * sizeof(*areq->output); + async_terminate( areq->async, STATUS_ALERTED ); + } + } + } + list_add_tail( &poll_list, &req->entry ); async_set_completion_callback( async, free_poll_req, req ); queue_async( &poll_sock->poll_q, async ); @@ -3312,7 +3381,7 @@ DECL_HANDLER(poll_socket)
if ((async = create_request_async( sock->fd, get_fd_comp_flags( sock->fd ), &req->async ))) { - reply->wait = async_handoff( async, poll_socket( sock, async, req->timeout, count, input ), NULL, 0 ); + reply->wait = async_handoff( async, poll_socket( sock, async, req->exclusive, req->timeout, count, input ), NULL, 0 ); reply->options = get_fd_options( sock->fd ); release_object( async ); } diff --git a/server/trace.c b/server/trace.c index 4e91d933a5d..567c54c4df7 100644 --- a/server/trace.c +++ b/server/trace.c @@ -2122,7 +2122,8 @@ static void dump_recv_socket_reply( const struct recv_socket_reply *req )
static void dump_poll_socket_request( const struct poll_socket_request *req ) { - dump_async_data( " async=", &req->async ); + fprintf( stderr, " exclusive=%c", req->exclusive ); + dump_async_data( ", async=", &req->async ); dump_timeout( ", timeout=", &req->timeout ); dump_varargs_poll_socket_input( ", sockets=", cur_size ); }
Signed-off-by: Guillaume Charifi guillaume.charifi@sfr.fr --- dlls/ws2_32/tests/afd.c | 73 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-)
diff --git a/dlls/ws2_32/tests/afd.c b/dlls/ws2_32/tests/afd.c index eb7c8ee50de..2568f251b6f 100644 --- a/dlls/ws2_32/tests/afd.c +++ b/dlls/ws2_32/tests/afd.c @@ -148,8 +148,10 @@ static void test_poll(void) const struct sockaddr_in bind_addr = {.sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_LOOPBACK)}; char in_buffer[offsetof(struct afd_poll_params, sockets[3])]; char out_buffer[offsetof(struct afd_poll_params, sockets[3])]; + char out_buffer1[offsetof(struct afd_poll_params, sockets[3])]; struct afd_poll_params *in_params = (struct afd_poll_params *)in_buffer; struct afd_poll_params *out_params = (struct afd_poll_params *)out_buffer; + struct afd_poll_params *out_params1 = (struct afd_poll_params *)out_buffer1; int large_buffer_size = 1024 * 1024; SOCKET client, server, listener; struct sockaddr_in addr; @@ -157,13 +159,14 @@ static void test_poll(void) IO_STATUS_BLOCK io; LARGE_INTEGER now; ULONG params_size; - HANDLE event; + HANDLE event, event1; int ret, len;
large_buffer = malloc(large_buffer_size); memset(in_buffer, 0, sizeof(in_buffer)); memset(out_buffer, 0, sizeof(out_buffer)); event = CreateEventW(NULL, TRUE, FALSE, NULL); + event1 = CreateEventW(NULL, TRUE, FALSE, NULL);
listener = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); ret = bind(listener, (const struct sockaddr *)&bind_addr, sizeof(bind_addr)); @@ -208,10 +211,77 @@ static void test_poll(void) IOCTL_AFD_POLL, in_params, params_size, out_params, params_size); ok(ret == STATUS_INVALID_PARAMETER, "got %#x\n", ret);
+ /* Test exclusive flag */ + + client = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + + in_params->timeout = -1000 * 10000; + in_params->count = 1; + in_params->exclusive = FALSE; + in_params->sockets[0].socket = client; + in_params->sockets[0].flags = ~0; + in_params->sockets[0].status = 0; + + out_params->sockets[0].socket = 0; + out_params->sockets[0].flags = 0; + out_params->sockets[0].status = 0xdeadbeef; + + ret = NtDeviceIoControlFile((HANDLE)listener, event, NULL, NULL, &io, + IOCTL_AFD_POLL, in_params, params_size, out_params, params_size); + ok(ret == STATUS_PENDING, "got %#x\n", ret); + + in_params->timeout = 0x7fffffffffffffff; /* TIMEOUT_INFINITE */ + in_params->exclusive = TRUE; + + ret = NtDeviceIoControlFile((HANDLE)listener, event1, NULL, NULL, &io, + IOCTL_AFD_POLL, in_params, params_size, out_params1, params_size); + ok(ret == STATUS_PENDING, "got %#x\n", ret); + + ret = WaitForSingleObject(event, 100); + ok(ret == STATUS_TIMEOUT, "got %#x\n", ret); + ok(!out_params->sockets[0].socket, "got socket %#Ix\n", out_params->sockets[0].socket); + ok(!out_params->sockets[0].flags, "got flags %#x\n", out_params->sockets[0].flags); + ok(out_params->sockets[0].status == 0xdeadbeef, "got status %#x\n", out_params->sockets[0].status); + + closesocket(client); + + ret = WaitForSingleObject(event, 100); + ok(!ret, "got %#x\n", ret); + + client = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + + out_params->sockets[0].socket = 0; + out_params->sockets[0].flags = 0; + out_params->sockets[0].status = 0xdeadbeef; + + ret = NtDeviceIoControlFile((HANDLE)listener, event, NULL, NULL, &io, + IOCTL_AFD_POLL, in_params, params_size, out_params, params_size); + ok(ret == STATUS_PENDING, "got %#x\n", ret); + + ret = NtDeviceIoControlFile((HANDLE)listener, event1, NULL, NULL, &io, + IOCTL_AFD_POLL, in_params, params_size, out_params1, params_size); + ok(ret == STATUS_PENDING, "got %#x\n", ret); + + ret = WaitForSingleObject(event, 100); + ok(!ret, "got %#x\n", ret); + ok(io.Status == STATUS_SUCCESS, "got %#x\n", io.Status); + ok(io.Information == offsetof(struct afd_poll_params, sockets[0]), "got %#Ix\n", io.Information); + ok(out_params->timeout == 0x7fffffffffffffff, "got timeout %#I64x\n", out_params->timeout); + ok(!out_params->count, "got count %u\n", out_params->count); + ok(!out_params->sockets[0].socket, "got socket %#Ix\n", out_params->sockets[0].socket); + ok(!out_params->sockets[0].flags, "got flags %#x\n", out_params->sockets[0].flags); + ok(out_params->sockets[0].status == 0xdeadbeef, "got status %#x\n", out_params->sockets[0].status); + + closesocket(client); + + ret = WaitForSingleObject(event1, 100); + ok(!ret, "got %#x\n", ret); + /* Basic semantics of the ioctl. */
in_params->timeout = 0; in_params->count = 1; + in_params->exclusive = FALSE; in_params->sockets[0].socket = listener; in_params->sockets[0].flags = ~0; in_params->sockets[0].status = 0xdeadbeef; @@ -743,6 +813,7 @@ static void test_poll(void) closesocket(server);
CloseHandle(event); + CloseHandle(event1); free(large_buffer); }
I think these tests could be more extensive. For example:
* What happens if you try to perform a non-exclusive wait while an exclusive wait is in progress? i.e. the inverse of the test added here.
* Can we verify that exclusive waits are specific to a given socket (i.e. you can simultaneously do an exclusive wait on two different sockets?)
* Can we test that exclusive waits apply if two unequal sets overlap?
* Can you do an exclusive wait on the same socket from two different threads?
There's also a part of me that would like to know what can possibly be the point of this API; it's hard to even think of what "reasonable" behaviour is otherwise. Do you happen to know?
My best guess is that it's meant to explicitly interrupt poll requests somehow (what, I/O cancellation wasn't good enough for them?), in which case it'd be good to explicitly confirm or refute that the "exclusive" poll really does act like a normal poll, and doesn't e.g. return immediately.
On 9/1/21 6:04 PM, Zebediah Figura (she/her) wrote:
I think these tests could be more extensive. For example:
- What happens if you try to perform a non-exclusive wait while an
exclusive wait is in progress? i.e. the inverse of the test added here.
- Can we verify that exclusive waits are specific to a given socket
(i.e. you can simultaneously do an exclusive wait on two different sockets?)
Can we test that exclusive waits apply if two unequal sets overlap?
Can you do an exclusive wait on the same socket from two different
threads?
There's also a part of me that would like to know what can possibly be the point of this API; it's hard to even think of what "reasonable" behaviour is otherwise. Do you happen to know?
My best guess is that it's meant to explicitly interrupt poll requests somehow (what, I/O cancellation wasn't good enough for them?), in which case it'd be good to explicitly confirm or refute that the "exclusive" poll really does act like a normal poll, and doesn't e.g. return immediately.
One more thing I meant to say: test_poll() is already big enough as it is, and the placement of these tests before the "basic" tests is not great either. I'd recommend putting all of the exclusive tests in a separate function.
Thank you for your answer!
I spent a lot of time testing a whole lot of different combinations and the results still confused me...
There's also a part of me that would like to know what can possibly be the point of this API; it's hard to even think of what "reasonable" behaviour is otherwise. Do you happen to know?
My best guess is that it's meant to explicitly interrupt poll requests somehow (what, I/O cancellation wasn't good enough for them?), in which case it'd be good to explicitly confirm or refute that the "exclusive" poll really does act like a normal poll, and doesn't e.g. return immediately.
I discovered this behavior while investigating an app that seemed to rely on them to interrupt some polls, and so far it seems to be its sole purpose. There is only one reason I can imagine a use for this instead of CancelIo(): As the NtDeviceIoControlFile()'s file handle is independent of the socket in params, it provides a way to interrupt another poll that waits on the same socket without the need to know the file handle that was originally given to it.
Regardless its behaviour is kind of crazy. I tried to restrict to a subset of the tests to avoid polluting test_poll() too much, but indeed it requires an entire function to test it properly.
I think these tests could be more extensive. For example:
- What happens if you try to perform a non-exclusive wait while an
exclusive wait is in progress? i.e. the inverse of the test added here.
So after some extensive testing, I'm rather confident that the logic for a new poll on a socket is: (The main poll is an arbitrary designation for one of the poll waiting on the socket) If there is no main poll on the socket, then the new poll becomes the main poll. If the main poll is terminated, then there is no main poll anymore. If the main poll is exclusive and the new poll is exclusive, then the main poll is terminated and the new poll becomes the main poll.
The flag does not seem to do anything more than this (apart from requiring 0x7fffffff00000000 <= timeout <= 0x7fffffffffffffff).
- Can we verify that exclusive waits are specific to a given socket
(i.e. you can simultaneously do an exclusive wait on two different sockets?)
Yes, the mechanism described earlier seems to be done on a per-socket basis.
- Can we test that exclusive waits apply if two unequal sets overlap?
Yes, it works the same way (a single common socket between exclusive polls is enough to wake the poll)
- Can you do an exclusive wait on the same socket from two different
threads?
Can yes, didn't want to, but did it anyway :) I get the expected result: It works exactly the same as the single threaded version.
So I guess I now have enough insight to write a proper implementation. The patch for the tests is attached to this job: https://testbot.winehq.org/JobDetails.pl? Key=97234[1] Feel free to tell me if other tests come to your mind.
Le mercredi 1 septembre 2021, 17:52:14 CEST Guillaume Charifi a écrit :
Signed-off-by: Guillaume Charifi guillaume.charifi@sfr.fr
dlls/ws2_32/tests/afd.c | 73 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-)
diff --git a/dlls/ws2_32/tests/afd.c b/dlls/ws2_32/tests/afd.c index eb7c8ee50de..2568f251b6f 100644 --- a/dlls/ws2_32/tests/afd.c +++ b/dlls/ws2_32/tests/afd.c @@ -148,8 +148,10 @@ static void test_poll(void) const struct sockaddr_in bind_addr = {.sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_LOOPBACK)}; char in_buffer[offsetof(struct afd_poll_params, sockets[3])]; char out_buffer[offsetof(struct afd_poll_params, sockets[3])]; + char out_buffer1[offsetof(struct afd_poll_params, sockets[3])]; struct afd_poll_params *in_params = (struct afd_poll_params *)in_buffer; struct afd_poll_params *out_params = (struct afd_poll_params *)out_buffer; + struct afd_poll_params *out_params1 = (struct afd_poll_params *)out_buffer1; int large_buffer_size = 1024 * 1024; SOCKET client, server, listener; struct sockaddr_in addr; @@ -157,13 +159,14 @@ static void test_poll(void) IO_STATUS_BLOCK io; LARGE_INTEGER now; ULONG params_size;
- HANDLE event;
HANDLE event, event1; int ret, len;
large_buffer = malloc(large_buffer_size); memset(in_buffer, 0, sizeof(in_buffer)); memset(out_buffer, 0, sizeof(out_buffer)); event = CreateEventW(NULL, TRUE, FALSE, NULL);
event1 = CreateEventW(NULL, TRUE, FALSE, NULL);
listener = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); ret = bind(listener, (const struct sockaddr *)&bind_addr,
sizeof(bind_addr)); @@ -208,10 +211,77 @@ static void test_poll(void) IOCTL_AFD_POLL, in_params, params_size, out_params, params_size); ok(ret == STATUS_INVALID_PARAMETER, "got %#x\n", ret);
Here is an updated job with easier to read comments: https://testbot.winehq.org/JobDetails.pl?Key=97235%5B1]
Le jeudi 2 septembre 2021, 19:08:43 CEST Guillaume Charifi-Hoareau a écrit :
Thank you for your answer!
I spent a lot of time testing a whole lot of different combinations and the results still confused me...
There's also a part of me that would like to know what can possibly be the point of this API; it's hard to even think of what "reasonable" behaviour is otherwise. Do you happen to know?
My best guess is that it's meant to explicitly interrupt poll requests somehow (what, I/O cancellation wasn't good enough for them?), in which case it'd be good to explicitly confirm or refute that the "exclusive" poll really does act like a normal poll, and doesn't e.g. return immediately.
I discovered this behavior while investigating an app that seemed to rely on them to interrupt some polls, and so far it seems to be its sole purpose. There is only one reason I can imagine a use for this instead of CancelIo(): As the NtDeviceIoControlFile()'s file handle is independent of the socket in params, it provides a way to interrupt another poll that waits on the same socket without the need to know the file handle that was originally given to it.
Regardless its behaviour is kind of crazy. I tried to restrict to a subset of the tests to avoid polluting test_poll() too much, but indeed it requires an entire function to test it properly.
I think these tests could be more extensive. For example:
- What happens if you try to perform a non-exclusive wait while an
exclusive wait is in progress? i.e. the inverse of the test added here.
So after some extensive testing, I'm rather confident that the logic for a new poll on a socket is: (The main poll is an arbitrary designation for one of the poll waiting on the socket) If there is no main poll on the socket, then the new poll becomes the main poll. If the main poll is terminated, then there is no main poll anymore. If the main poll is exclusive and the new poll is exclusive, then the main poll is terminated and the new poll becomes the main poll.
The flag does not seem to do anything more than this (apart from requiring 0x7fffffff00000000 <= timeout <= 0x7fffffffffffffff).
- Can we verify that exclusive waits are specific to a given socket
(i.e. you can simultaneously do an exclusive wait on two different sockets?)
Yes, the mechanism described earlier seems to be done on a per-socket basis.
- Can we test that exclusive waits apply if two unequal sets overlap?
Yes, it works the same way (a single common socket between exclusive polls is enough to wake the poll)
- Can you do an exclusive wait on the same socket from two different
threads?
Can yes, didn't want to, but did it anyway :) I get the expected result: It works exactly the same as the single threaded version.
So I guess I now have enough insight to write a proper implementation. The patch for the tests is attached to this job: https://testbot.winehq.org/JobDetails.pl? Key=97234[1] Feel free to tell me if other tests come to your mind.
Le mercredi 1 septembre 2021, 17:52:14 CEST Guillaume Charifi a écrit :
Signed-off-by: Guillaume Charifi guillaume.charifi@sfr.fr
dlls/ws2_32/tests/afd.c | 73 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-)
diff --git a/dlls/ws2_32/tests/afd.c b/dlls/ws2_32/tests/afd.c index eb7c8ee50de..2568f251b6f 100644 --- a/dlls/ws2_32/tests/afd.c +++ b/dlls/ws2_32/tests/afd.c @@ -148,8 +148,10 @@ static void test_poll(void)
const struct sockaddr_in bind_addr = {.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_LOOPBACK)}; char in_buffer[offsetof(struct afd_poll_params, sockets[3])]; char out_buffer[offsetof(struct afd_poll_params, sockets[3])]; + char out_buffer1[offsetof(struct afd_poll_params, sockets[3])]; struct afd_poll_params *in_params = (struct afd_poll_params *)in_buffer; struct afd_poll_params *out_params = (struct afd_poll_params *)out_buffer; + struct afd_poll_params *out_params1 = (struct afd_poll_params *)out_buffer1; int large_buffer_size = 1024 * 1024;
SOCKET client, server, listener; struct sockaddr_in addr;
@@ -157,13 +159,14 @@ static void test_poll(void)
IO_STATUS_BLOCK io; LARGE_INTEGER now; ULONG params_size;
- HANDLE event;
HANDLE event, event1;
int ret, len;
large_buffer = malloc(large_buffer_size); memset(in_buffer, 0, sizeof(in_buffer)); memset(out_buffer, 0, sizeof(out_buffer));
On 9/2/21 12:08 PM, Guillaume Charifi-Hoareau wrote:
Thank you for your answer!
I spent a lot of time testing a whole lot of different combinations and the results still confused me...
There's also a part of me that would like to know what can possibly be the point of this API; it's hard to even think of what "reasonable" behaviour is otherwise. Do you happen to know?
My best guess is that it's meant to explicitly interrupt poll requests somehow (what, I/O cancellation wasn't good enough for them?), in which case it'd be good to explicitly confirm or refute that the "exclusive" poll really does act like a normal poll, and doesn't e.g. return immediately.
I discovered this behavior while investigating an app that seemed to rely on them to interrupt some polls, and so far it seems to be its sole purpose. There is only one reason I can imagine a use for this instead of CancelIo(): As the NtDeviceIoControlFile()'s file handle is independent of the socket in params, it provides a way to interrupt another poll that waits on the same socket without the need to know the file handle that was originally given to it.
Regardless its behaviour is kind of crazy. I tried to restrict to a subset of the tests to avoid polluting test_poll() too much, but indeed it requires an entire function to test it properly.
I think these tests could be more extensive. For example:
- What happens if you try to perform a non-exclusive wait while an
exclusive wait is in progress? i.e. the inverse of the test added here.
So after some extensive testing, I'm rather confident that the logic for a new poll on a socket is: (The main poll is an arbitrary designation for one of the poll waiting on the socket) If there is no main poll on the socket, then the new poll becomes the main poll. If the main poll is terminated, then there is no main poll anymore. If the main poll is exclusive and the new poll is exclusive, then the main poll is terminated and the new poll becomes the main poll.
The flag does not seem to do anything more than this (apart from requiring 0x7fffffff00000000 <= timeout <= 0x7fffffffffffffff).
Wow. That is some of the bizarrest behaviour I've seen. I'd call it a bug, but I can't even figure out what behaviour they were *trying* to implement.
I kept reading through your test, trying to describe the behaviour in simpler (or saner) terms, and failing. So congratulations, I think you've hit the nail on the head :D
- Can we verify that exclusive waits are specific to a given socket
(i.e. you can simultaneously do an exclusive wait on two different sockets?)
Yes, the mechanism described earlier seems to be done on a per-socket basis.
- Can we test that exclusive waits apply if two unequal sets overlap?
Yes, it works the same way (a single common socket between exclusive polls is enough to wake the poll)
- Can you do an exclusive wait on the same socket from two different
threads?
Can yes, didn't want to, but did it anyway :) I get the expected result: It works exactly the same as the single threaded version.
So I guess I now have enough insight to write a proper implementation. The patch for the tests is attached to this job: https://testbot.winehq.org/JobDetails.pl? Key=97234[1] Feel free to tell me if other tests come to your mind.
Yeah, that looks about right, nothing else comes to mind now.
In terms of some informal review on those, I saw that one and the one with the comments changed. I think in both cases the comments are kind of redundant—reading the code tells me as much and is actually kind of easier. Where comments could help (and the way I usually tend to write them) is in listing the conclusions that each following chunk of code actually proves. That's a bit vague, but I think it would help make the tests make sense a bit more readily.
Also, a nitpick—there's a C99 variable declaration in the for loop initializer; unfortunately we have to avoid those.
Oops sorry, I noticed your mail after I sent the patches, I'm rebasing and resending
Le jeudi 2 septembre 2021, 23:29:55 CEST Zebediah Figura (she/her) a écrit :
Wow. That is some of the bizarrest behaviour I've seen. I'd call it a bug, but I can't even figure out what behaviour they were *trying* to implement.
Well you know... windows being a bug per se... ¯_(ツ)_/¯
I kept reading through your test, trying to describe the behaviour in simpler (or saner) terms, and failing. So congratulations, I think you've hit the nail on the head :D
Thank you that's kind! :)
Yeah, that looks about right, nothing else comes to mind now.
In terms of some informal review on those, I saw that one and the one with the comments changed. I think in both cases the comments are kind of redundant—reading the code tells me as much and is actually kind of easier. Where comments could help (and the way I usually tend to write them) is in listing the conclusions that each following chunk of code actually proves. That's a bit vague, but I think it would help make the tests make sense a bit more readily.
Ok, I'll add some explanatory notes.
Also, a nitpick—there's a C99 variable declaration in the for loop initializer; unfortunately we have to avoid those.
Oops, fixing!
On 9/1/21 10:52 AM, Guillaume Charifi wrote:
v2-v5: Fix some corner cases.
Signed-off-by: Guillaume Charifi guillaume.charifi@sfr.fr
dlls/ntdll/unix/socket.c | 6 +-- include/wine/afd.h | 2 +- include/wine/server_protocol.h | 5 ++- server/protocol.def | 1 + server/request.h | 1 + server/sock.c | 75 ++++++++++++++++++++++++++++++++-- server/trace.c | 3 +- 7 files changed, 83 insertions(+), 10 deletions(-)
diff --git a/dlls/ntdll/unix/socket.c b/dlls/ntdll/unix/socket.c index 8469def786a..b1b90b363ee 100644 --- a/dlls/ntdll/unix/socket.c +++ b/dlls/ntdll/unix/socket.c @@ -747,12 +747,11 @@ static NTSTATUS sock_poll( HANDLE handle, HANDLE event, PIO_APC_ROUTINE apc, voi || in_size < offsetof( struct afd_poll_params, sockets[params->count] )) return STATUS_INVALID_PARAMETER;
- TRACE( "timeout %s, count %u, unknown %#x, padding (%#x, %#x, %#x), sockets[0] {%04lx, %#x}\n",
wine_dbgstr_longlong(params->timeout), params->count, params->unknown,
- TRACE( "timeout %s, count %u, exclusive %u, padding (%#x, %#x, %#x), sockets[0] {%04lx, %#x}\n",
wine_dbgstr_longlong(params->timeout), params->count, params->exclusive, params->padding[0], params->padding[1], params->padding[2], params->sockets[0].socket, params->sockets[0].flags );
- if (params->unknown) FIXME( "unknown boolean is %#x\n", params->unknown ); if (params->padding[0]) FIXME( "padding[0] is %#x\n", params->padding[0] ); if (params->padding[1]) FIXME( "padding[1] is %#x\n", params->padding[1] ); if (params->padding[2]) FIXME( "padding[2] is %#x\n", params->padding[2] );
@@ -793,6 +792,7 @@ static NTSTATUS sock_poll( HANDLE handle, HANDLE event, PIO_APC_ROUTINE apc, voi SERVER_START_REQ( poll_socket ) { req->async = server_async( handle, &async->io, event, apc, apc_user, iosb_client_ptr(io) );
req->exclusive = !!params->exclusive; req->timeout = params->timeout; wine_server_add_data( req, input, params->count * sizeof(*input) ); wine_server_set_reply( req, async->sockets, params->count * sizeof(async->sockets[0]) );
diff --git a/include/wine/afd.h b/include/wine/afd.h index e67ecae25a9..f4682f464e8 100644 --- a/include/wine/afd.h +++ b/include/wine/afd.h @@ -104,7 +104,7 @@ struct afd_poll_params { LONGLONG timeout; unsigned int count;
- BOOLEAN unknown;
- BOOLEAN exclusive; BOOLEAN padding[3]; struct {
diff --git a/include/wine/server_protocol.h b/include/wine/server_protocol.h index bb4862c9669..14518f0bf6f 100644 --- a/include/wine/server_protocol.h +++ b/include/wine/server_protocol.h @@ -1761,7 +1761,8 @@ struct poll_socket_output struct poll_socket_request { struct request_header __header;
- char __pad_12[4];
- char exclusive;
- char __pad_13[3]; async_data_t async; timeout_t timeout; /* VARARG(sockets,poll_socket_input); */
@@ -6252,7 +6253,7 @@ union generic_reply
/* ### protocol_version begin ### */
-#define SERVER_PROTOCOL_VERSION 726 +#define SERVER_PROTOCOL_VERSION 727
/* ### protocol_version end ### */
diff --git a/server/protocol.def b/server/protocol.def index 133d6ad0552..8eefdaae17f 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -1450,6 +1450,7 @@ struct poll_socket_output
/* Perform an async poll on a socket */ @REQ(poll_socket)
- char exclusive;
If you look at the generated server/trace.c output, this generates %c, which isn't really what we want. int is fine here.
async_data_t async; /* async I/O parameters */ timeout_t timeout; /* timeout */ VARARG(sockets,poll_socket_input); /* list of sockets to poll */
diff --git a/server/request.h b/server/request.h index 18a6a2df3c7..f1c8f1c5432 100644 --- a/server/request.h +++ b/server/request.h @@ -1045,6 +1045,7 @@ C_ASSERT( sizeof(struct recv_socket_request) == 64 ); C_ASSERT( FIELD_OFFSET(struct recv_socket_reply, wait) == 8 ); C_ASSERT( FIELD_OFFSET(struct recv_socket_reply, options) == 12 ); C_ASSERT( sizeof(struct recv_socket_reply) == 16 ); +C_ASSERT( FIELD_OFFSET(struct poll_socket_request, exclusive) == 12 ); C_ASSERT( FIELD_OFFSET(struct poll_socket_request, async) == 16 ); C_ASSERT( FIELD_OFFSET(struct poll_socket_request, timeout) == 56 ); C_ASSERT( sizeof(struct poll_socket_request) == 64 ); diff --git a/server/sock.c b/server/sock.c index 50bfc08e145..7cdbed940c6 100644 --- a/server/sock.c +++ b/server/sock.c @@ -128,6 +128,7 @@ struct poll_req struct async *async; struct iosb *iosb; struct timeout_user *timeout;
- char exclusive; unsigned int count; struct poll_socket_output *output; struct
@@ -2853,8 +2854,8 @@ static int poll_single_socket( struct sock *sock, int mask ) return get_poll_flags( sock, pollfd.revents ) & mask; }
-static int poll_socket( struct sock *poll_sock, struct async *async, timeout_t timeout,
unsigned int count, const struct poll_socket_input *input )
+static int poll_socket( struct sock *poll_sock, struct async *async, char exclusive,
{ struct poll_socket_output *output; struct poll_req *req;timeout_t timeout, unsigned int count, const struct poll_socket_input *input )
@@ -2893,11 +2894,79 @@ static int poll_socket( struct sock *poll_sock, struct async *async, timeout_t t req->sockets[i].flags = input[i].flags; }
req->exclusive = exclusive; req->count = count; req->async = (struct async *)grab_object( async ); req->iosb = async_get_iosb( async ); req->output = output;
if (exclusive)
{
int has_shared = FALSE;
struct poll_req *areq;
LIST_FOR_EACH_ENTRY( areq, &poll_list, struct poll_req, entry )
{
int skip_areq = FALSE;
for (i = 0; i < areq->count; ++i)
{
struct sock *asock = areq->sockets[i].sock;
for (j = 0; j < req->count; ++j)
{
if (asock == req->sockets[j].sock)
{
if (!areq->exclusive)
has_shared = TRUE;
skip_areq = TRUE;
break;
}
}
if (skip_areq || has_shared)
break;
}
if (has_shared)
break;
}
LIST_FOR_EACH_ENTRY( areq, &poll_list, struct poll_req, entry )
{
int areq_term = FALSE;
if (has_shared)
break;
Wait, so we don't wake up *any* exclusive pollers if there are *any* shared pollers? This definitely deserves a test.
for (i = 0; i < areq->count; ++i)
{
struct sock *asock = areq->sockets[i].sock;
for (j = 0; j < req->count; ++j)
{
if (asock == req->sockets[j].sock)
{
areq_term = TRUE;
break;
}
}
if (areq_term)
break;
}
if (areq_term)
{
areq->iosb->status = STATUS_SUCCESS;
areq->iosb->out_data = areq->output;
areq->iosb->out_size = areq->count * sizeof(*areq->output);
async_terminate( areq->async, STATUS_ALERTED );
}
}
These loops are not particularly easy to read, and would probably be a lot better with a helper function.
- }
list_add_tail( &poll_list, &req->entry ); async_set_completion_callback( async, free_poll_req, req ); queue_async( &poll_sock->poll_q, async );
@@ -3312,7 +3381,7 @@ DECL_HANDLER(poll_socket)
if ((async = create_request_async( sock->fd, get_fd_comp_flags( sock->fd ), &req->async ))) {
reply->wait = async_handoff( async, poll_socket( sock, async, req->timeout, count, input ), NULL, 0 );
reply->wait = async_handoff( async, poll_socket( sock, async, req->exclusive, req->timeout, count, input ), NULL, 0 ); reply->options = get_fd_options( sock->fd ); release_object( async ); }
diff --git a/server/trace.c b/server/trace.c index 4e91d933a5d..567c54c4df7 100644 --- a/server/trace.c +++ b/server/trace.c @@ -2122,7 +2122,8 @@ static void dump_recv_socket_reply( const struct recv_socket_reply *req )
static void dump_poll_socket_request( const struct poll_socket_request *req ) {
- dump_async_data( " async=", &req->async );
- fprintf( stderr, " exclusive=%c", req->exclusive );
- dump_async_data( ", async=", &req->async ); dump_timeout( ", timeout=", &req->timeout ); dump_varargs_poll_socket_input( ", sockets=", cur_size ); }