Newer
Older
/*
* Server-side socket management
*
* Copyright (C) 1999 Marcus Meissner, Ove Kåven
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*
* FIXME: we use read|write access in all cases. Shouldn't we depend that
* on the access of the current handle?
*/
#include "config.h"
#include <assert.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#ifdef HAVE_IFADDRS_H
# include <ifaddrs.h>
#endif
#ifdef HAVE_NET_IF_H
# include <net/if.h>
#endif
#ifdef HAVE_NETINET_IN_H
# include <netinet/in.h>
#endif
#include <poll.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/socket.h>
#ifdef HAVE_SYS_FILIO_H
# include <sys/filio.h>
#endif
#include <time.h>
#include <unistd.h>
#include <limits.h>
#ifdef HAVE_LINUX_FILTER_H
# include <linux/filter.h>
#endif
#ifdef HAVE_LINUX_RTNETLINK_H
# include <linux/rtnetlink.h>
#endif
#ifdef HAVE_NETIPX_IPX_H
# include <netipx/ipx.h>
#elif defined(HAVE_LINUX_IPX_H)
# ifdef HAVE_ASM_TYPES_H
# include <asm/types.h>
# endif
# ifdef HAVE_LINUX_TYPES_H
# include <linux/types.h>
# endif
# include <linux/ipx.h>
#endif
#if defined(SOL_IPX) || defined(SO_DEFAULT_HEADERS)
# define HAS_IPX
#endif
#ifdef HAVE_LINUX_IRDA_H
# ifdef HAVE_LINUX_TYPES_H
# include <linux/types.h>
# endif
# include <linux/irda.h>
# define HAS_IRDA
#endif
#include "ntstatus.h"
#define WIN32_NO_STATUS
#include "windef.h"
Francois Gouget
committed
#include "winerror.h"
#define USE_WS_PREFIX
#include "winsock2.h"
#include "af_irda.h"
#include "file.h"
#include "handle.h"
#include "thread.h"
#include "request.h"
#include "user.h"
#if defined(linux) && !defined(IP_UNICAST_IF)
#define IP_UNICAST_IF 50
#endif
static const char magic_loopback_addr[] = {127, 12, 34, 56};
union win_sockaddr
{
struct WS_sockaddr addr;
struct WS_sockaddr_in in;
struct WS_sockaddr_in6 in6;
struct WS_sockaddr_ipx ipx;
SOCKADDR_IRDA irda;
};
static struct list poll_list = LIST_INIT( poll_list );
struct poll_req
{
struct list entry;
struct async *async;
struct iosb *iosb;
struct timeout_user *timeout;
timeout_t orig_timeout;
unsigned int count;
struct
{
struct sock *sock;
int mask;
obj_handle_t handle;
int flags;
unsigned int status;
struct accept_req
{
struct list entry;
struct async *async;
struct iosb *iosb;
struct sock *sock, *acceptsock;
int accepted;
unsigned int recv_len, local_len;
};
struct connect_req
{
struct async *async;
struct iosb *iosb;
struct sock *sock;
unsigned int addr_len, send_len, send_cursor;
};
enum connection_state
{
SOCK_LISTENING,
SOCK_UNCONNECTED,
SOCK_CONNECTING,
SOCK_CONNECTED,
SOCK_CONNECTIONLESS,
};
struct sock
{
struct object obj; /* object header */
struct fd *fd; /* socket file descriptor */
enum connection_state state; /* connection state */
/* pending AFD_POLL_* events which have not yet been reported to the application */
unsigned int pending_events;
/* AFD_POLL_* events which have already been reported and should not be
* selected for again until reset by a relevant call.
* For example, if AFD_POLL_READ is set here and not in pending_events, it
* has already been reported and consumed, and we should not report it
* again, even if POLLIN is signaled, until it is reset by e.g recv().
*
* If an event has been signaled and not consumed yet, it will be set in
* both pending_events and reported_events (as we should only ever report
* any event once until it is reset.) */
unsigned int reported_events;
unsigned short proto; /* socket protocol */
unsigned short type; /* socket type */
unsigned short family; /* socket family */
struct event *event; /* event object */
user_handle_t window; /* window to send the message to */
unsigned int message; /* message to send */
obj_handle_t wparam; /* message wparam (socket handle) */
int errors[AFD_POLL_BIT_COUNT]; /* event errors */
timeout_t connect_time;/* time the socket was connected */
struct sock *deferred; /* socket that waits for a deferred accept */
struct async_queue read_q; /* queue for asynchronous reads */
struct async_queue write_q; /* queue for asynchronous writes */
struct async_queue ifchange_q; /* queue for interface change notifications */
struct async_queue accept_q; /* queue for asynchronous accepts */
struct async_queue connect_q; /* queue for asynchronous connects */
struct async_queue poll_q; /* queue for asynchronous polls */
Erich Hoover
committed
struct object *ifchange_obj; /* the interface change notification object */
struct list ifchange_entry; /* entry in ifchange notification list */
struct list accept_list; /* list of pending accept requests */
struct accept_req *accept_recv_req; /* pending accept-into request which will recv on this socket */
struct connect_req *connect_req; /* pending connection request */
struct poll_req *main_poll; /* main poll */
union win_sockaddr addr; /* socket name */
int addr_len; /* socket name length */
unsigned int rcvbuf; /* advisory recv buffer size */
unsigned int sndbuf; /* advisory send buffer size */
unsigned int rcvtimeo; /* receive timeout in ms */
unsigned int sndtimeo; /* send timeout in ms */
unsigned int rd_shutdown : 1; /* is the read end shut down? */
unsigned int wr_shutdown : 1; /* is the write end shut down? */
unsigned int wr_shutdown_pending : 1; /* is a write shutdown pending? */
unsigned int hangup : 1; /* has the read end received a hangup? */
unsigned int aborted : 1; /* did we get a POLLERR or irregular POLLHUP? */
unsigned int nonblocking : 1; /* is the socket nonblocking? */
unsigned int bound : 1; /* is the socket bound? */
};
static void sock_dump( struct object *obj, int verbose );
static struct fd *sock_get_fd( struct object *obj );
static int sock_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
static void sock_destroy( struct object *obj );
static struct object *sock_get_ifchange( struct sock *sock );
static void sock_release_ifchange( struct sock *sock );
static int sock_get_poll_events( struct fd *fd );
static void sock_poll_event( struct fd *fd, int event );

Alexandre Julliard
committed
static enum server_fd_type sock_get_fd_type( struct fd *fd );
static void sock_ioctl( struct fd *fd, ioctl_code_t code, struct async *async );
static void sock_cancel_async( struct fd *fd, struct async *async );
static void sock_queue_async( struct fd *fd, struct async *async, int type, int count );

Alexandre Julliard
committed
static void sock_reselect_async( struct fd *fd, struct async_queue *queue );
static int accept_into_socket( struct sock *sock, struct sock *acceptsock );
static struct sock *accept_socket( struct sock *sock );
static int sock_get_ntstatus( int err );
static unsigned int sock_get_error( int err );
static void poll_socket( struct sock *poll_sock, struct async *async, int exclusive, timeout_t timeout,
unsigned int count, const struct afd_poll_socket_64 *sockets );
static const struct object_ops sock_ops =
{
sizeof(struct sock), /* size */
&file_type, /* type */
sock_dump, /* dump */
add_queue, /* add_queue */
remove_queue, /* remove_queue */
default_fd_signaled, /* signaled */
no_satisfied, /* satisfied */
sock_get_fd, /* get_fd */
default_map_access, /* map_access */
default_get_sd, /* get_sd */
default_set_sd, /* set_sd */
no_get_full_name, /* get_full_name */
no_lookup_name, /* lookup_name */
no_link_name, /* link_name */
NULL, /* unlink_name */
no_open_file, /* open_file */
Jacek Caban
committed
no_kernel_obj_list, /* get_kernel_obj_list */
sock_close_handle, /* close_handle */
sock_destroy /* destroy */
};
static const struct fd_ops sock_fd_ops =
{
sock_get_poll_events, /* get_poll_events */
sock_poll_event, /* poll_event */
sock_get_fd_type, /* get_fd_type */
no_fd_read, /* read */
no_fd_write, /* write */
no_fd_flush, /* flush */
Jacek Caban
committed
default_fd_get_file_info, /* get_file_info */
no_fd_get_volume_info, /* get_volume_info */
sock_ioctl, /* ioctl */
sock_cancel_async, /* cancel_async */
sock_reselect_async /* reselect_async */
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
union unix_sockaddr
{
struct sockaddr addr;
struct sockaddr_in in;
struct sockaddr_in6 in6;
#ifdef HAS_IPX
struct sockaddr_ipx ipx;
#endif
#ifdef HAS_IRDA
struct sockaddr_irda irda;
#endif
};
static int sockaddr_from_unix( const union unix_sockaddr *uaddr, struct WS_sockaddr *wsaddr, socklen_t wsaddrlen )
{
memset( wsaddr, 0, wsaddrlen );
switch (uaddr->addr.sa_family)
{
case AF_INET:
{
struct WS_sockaddr_in win = {0};
if (wsaddrlen < sizeof(win)) return -1;
win.sin_family = WS_AF_INET;
win.sin_port = uaddr->in.sin_port;
memcpy( &win.sin_addr, &uaddr->in.sin_addr, sizeof(win.sin_addr) );
memcpy( wsaddr, &win, sizeof(win) );
return sizeof(win);
}
case AF_INET6:
{
struct WS_sockaddr_in6 win = {0};
if (wsaddrlen < sizeof(win)) return -1;
win.sin6_family = WS_AF_INET6;
win.sin6_port = uaddr->in6.sin6_port;
win.sin6_flowinfo = uaddr->in6.sin6_flowinfo;
memcpy( &win.sin6_addr, &uaddr->in6.sin6_addr, sizeof(win.sin6_addr) );
#ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
win.sin6_scope_id = uaddr->in6.sin6_scope_id;
#endif
memcpy( wsaddr, &win, sizeof(win) );
return sizeof(win);
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
}
#ifdef HAS_IPX
case AF_IPX:
{
struct WS_sockaddr_ipx win = {0};
if (wsaddrlen < sizeof(win)) return -1;
win.sa_family = WS_AF_IPX;
memcpy( win.sa_netnum, &uaddr->ipx.sipx_network, sizeof(win.sa_netnum) );
memcpy( win.sa_nodenum, &uaddr->ipx.sipx_node, sizeof(win.sa_nodenum) );
win.sa_socket = uaddr->ipx.sipx_port;
memcpy( wsaddr, &win, sizeof(win) );
return sizeof(win);
}
#endif
#ifdef HAS_IRDA
case AF_IRDA:
{
SOCKADDR_IRDA win;
if (wsaddrlen < sizeof(win)) return -1;
win.irdaAddressFamily = WS_AF_IRDA;
memcpy( win.irdaDeviceID, &uaddr->irda.sir_addr, sizeof(win.irdaDeviceID) );
if (uaddr->irda.sir_lsap_sel != LSAP_ANY)
snprintf( win.irdaServiceName, sizeof(win.irdaServiceName), "LSAP-SEL%u", uaddr->irda.sir_lsap_sel );
else
memcpy( win.irdaServiceName, uaddr->irda.sir_name, sizeof(win.irdaServiceName) );
memcpy( wsaddr, &win, sizeof(win) );
return sizeof(win);
}
#endif
case AF_UNSPEC:
return 0;
default:
return -1;
}
}
static socklen_t sockaddr_to_unix( const struct WS_sockaddr *wsaddr, int wsaddrlen, union unix_sockaddr *uaddr )
{
memset( uaddr, 0, sizeof(*uaddr) );
switch (wsaddr->sa_family)
{
case WS_AF_INET:
{
struct WS_sockaddr_in win = {0};
if (wsaddrlen < sizeof(win)) return 0;
memcpy( &win, wsaddr, sizeof(win) );
uaddr->in.sin_family = AF_INET;
uaddr->in.sin_port = win.sin_port;
memcpy( &uaddr->in.sin_addr, &win.sin_addr, sizeof(win.sin_addr) );
return sizeof(uaddr->in);
}
case WS_AF_INET6:
{
struct WS_sockaddr_in6 win = {0};
if (wsaddrlen < sizeof(win)) return 0;
memcpy( &win, wsaddr, sizeof(win) );
uaddr->in6.sin6_family = AF_INET6;
uaddr->in6.sin6_port = win.sin6_port;
uaddr->in6.sin6_flowinfo = win.sin6_flowinfo;
memcpy( &uaddr->in6.sin6_addr, &win.sin6_addr, sizeof(win.sin6_addr) );
#ifdef HAVE_STRUCT_SOCKADDR_IN6_SIN6_SCOPE_ID
uaddr->in6.sin6_scope_id = win.sin6_scope_id;
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
#endif
return sizeof(uaddr->in6);
}
#ifdef HAS_IPX
case WS_AF_IPX:
{
struct WS_sockaddr_ipx win = {0};
if (wsaddrlen < sizeof(win)) return 0;
memcpy( &win, wsaddr, sizeof(win) );
uaddr->ipx.sipx_family = AF_IPX;
memcpy( &uaddr->ipx.sipx_network, win.sa_netnum, sizeof(win.sa_netnum) );
memcpy( &uaddr->ipx.sipx_node, win.sa_nodenum, sizeof(win.sa_nodenum) );
uaddr->ipx.sipx_port = win.sa_socket;
return sizeof(uaddr->ipx);
}
#endif
#ifdef HAS_IRDA
case WS_AF_IRDA:
{
SOCKADDR_IRDA win = {0};
unsigned int lsap_sel;
if (wsaddrlen < sizeof(win)) return 0;
memcpy( &win, wsaddr, sizeof(win) );
uaddr->irda.sir_family = AF_IRDA;
if (sscanf( win.irdaServiceName, "LSAP-SEL%u", &lsap_sel ) == 1)
uaddr->irda.sir_lsap_sel = lsap_sel;
uaddr->irda.sir_lsap_sel = LSAP_ANY;
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
memcpy( uaddr->irda.sir_name, win.irdaServiceName, sizeof(win.irdaServiceName) );
}
memcpy( &uaddr->irda.sir_addr, win.irdaDeviceID, sizeof(win.irdaDeviceID) );
return sizeof(uaddr->irda);
}
#endif
case WS_AF_UNSPEC:
switch (wsaddrlen)
{
default: /* likely an ipv4 address */
case sizeof(struct WS_sockaddr_in):
return sizeof(uaddr->in);
#ifdef HAS_IPX
case sizeof(struct WS_sockaddr_ipx):
return sizeof(uaddr->ipx);
#endif
#ifdef HAS_IRDA
case sizeof(SOCKADDR_IRDA):
return sizeof(uaddr->irda);
#endif
case sizeof(struct WS_sockaddr_in6):
return sizeof(uaddr->in6);
}
default:
return 0;
}
}
/* some events are generated at the same time but must be sent in a particular
* order (e.g. CONNECT must be sent before READ) */
static const enum afd_poll_bit event_bitorder[] =
{
AFD_POLL_BIT_CONNECT,
AFD_POLL_BIT_CONNECT_ERR,
AFD_POLL_BIT_ACCEPT,
AFD_POLL_BIT_OOB,
AFD_POLL_BIT_WRITE,
AFD_POLL_BIT_READ,
AFD_POLL_BIT_RESET,
AFD_POLL_BIT_HUP,
AFD_POLL_BIT_CLOSE,
};
typedef enum {
SOCK_SHUTDOWN_ERROR = -1,
SOCK_SHUTDOWN_EOF = 0,
SOCK_SHUTDOWN_POLLHUP = 1
} sock_shutdown_t;
static sock_shutdown_t sock_shutdown_type = SOCK_SHUTDOWN_ERROR;
static sock_shutdown_t sock_check_pollhup(void)
{
sock_shutdown_t ret = SOCK_SHUTDOWN_ERROR;
int fd[2], n;
struct pollfd pfd;
char dummy;
Sebastian Lackner
committed
if ( socketpair( AF_UNIX, SOCK_STREAM, 0, fd ) ) return ret;
pfd.fd = fd[1];
pfd.events = POLLIN;
pfd.revents = 0;
/* Solaris' poll() sometimes returns nothing if given a 0ms timeout here */
n = poll( &pfd, 1, 1 );
if ( n != 1 ) goto out; /* error or timeout */
if ( pfd.revents & POLLHUP )
ret = SOCK_SHUTDOWN_POLLHUP;
else if ( pfd.revents & POLLIN &&
ret = SOCK_SHUTDOWN_EOF;
out:
return ret;
}
void sock_init(void)
{
switch ( sock_shutdown_type )
{
case SOCK_SHUTDOWN_EOF:
if (debug_level) fprintf( stderr, "sock_init: shutdown() causes EOF\n" );
break;
case SOCK_SHUTDOWN_POLLHUP:
if (debug_level) fprintf( stderr, "sock_init: shutdown() causes POLLHUP\n" );
break;
default:
fprintf( stderr, "sock_init: ERROR in sock_check_pollhup()\n" );
sock_shutdown_type = SOCK_SHUTDOWN_EOF;
}
}
static int sock_reselect( struct sock *sock )
int ev = sock_get_poll_events( sock->fd );
fprintf(stderr,"sock_reselect(%p): new mask %x\n", sock, ev);
set_fd_events( sock->fd, ev );
return ev;
}
static unsigned int afd_poll_flag_to_win32( unsigned int flags )
{
static const unsigned int map[] =
{
FD_READ, /* READ */
FD_OOB, /* OOB */
FD_WRITE, /* WRITE */
FD_CLOSE, /* HUP */
FD_CLOSE, /* RESET */
0, /* CLOSE */
FD_CONNECT, /* CONNECT */
FD_ACCEPT, /* ACCEPT */
FD_CONNECT, /* CONNECT_ERR */
};
unsigned int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(map); ++i)
{
if (flags & (1 << i)) ret |= map[i];
}
return ret;
}
/* wake anybody waiting on the socket event or send the associated message */
static void sock_wake_up( struct sock *sock )
{
unsigned int events = sock->pending_events & sock->mask;
int i;
if (sock->event)
{
if (debug_level) fprintf(stderr, "signalling events %x ptr %p\n", events, sock->event );
if (events)
set_event( sock->event );
}
if (sock->window)
{
if (debug_level) fprintf(stderr, "signalling events %x win %08x\n", events, sock->window );
for (i = 0; i < ARRAY_SIZE(event_bitorder); i++)
{
enum afd_poll_bit event = event_bitorder[i];
Zebediah Figura
committed
if (events & (1 << event))
{
lparam_t lparam = afd_poll_flag_to_win32(1 << event) | (sock_get_error( sock->errors[event] ) << 16);

Alexandre Julliard
committed
post_message( sock->window, sock->message, sock->wparam, lparam );
}
}
sock_reselect( sock );
}
static inline int sock_error( struct fd *fd )
unsigned int optval = 0;
socklen_t optlen = sizeof(optval);
getsockopt( get_unix_fd(fd), SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
static void free_accept_req( void *private )
struct accept_req *req = private;
list_remove( &req->entry );
if (req->acceptsock)
{
req->acceptsock->accept_recv_req = NULL;
release_object( req->acceptsock );
}
release_object( req->async );
release_object( req->iosb );
release_object( req->sock );
free( req );
}
static void fill_accept_output( struct accept_req *req )
const data_size_t out_size = req->iosb->out_size;
struct async *async = req->async;
union unix_sockaddr unix_addr;
struct WS_sockaddr *win_addr;
unsigned int remote_len;
socklen_t unix_len;
int fd, size = 0;
char *out_data;
int win_len;
if (!(out_data = mem_alloc( out_size )))
{
async_terminate( async, get_error() );
return;
}
fd = get_unix_fd( req->acceptsock->fd );
if (req->recv_len && (size = recv( fd, out_data, req->recv_len, 0 )) < 0)
{
if (!req->accepted && errno == EWOULDBLOCK)
{
req->accepted = 1;
sock_reselect( req->acceptsock );
return;
}
async_terminate( async, sock_get_ntstatus( errno ) );
free( out_data );
return;
}
if (req->local_len)
{
if (req->local_len < sizeof(int))
{
async_terminate( async, STATUS_BUFFER_TOO_SMALL );
free( out_data );
return;
}
unix_len = sizeof(unix_addr);
win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + sizeof(int));
if (getsockname( fd, &unix_addr.addr, &unix_len ) < 0 ||
(win_len = sockaddr_from_unix( &unix_addr, win_addr, req->local_len - sizeof(int) )) < 0)
async_terminate( async, sock_get_ntstatus( errno ) );
free( out_data );
return;
}
memcpy( out_data + req->recv_len, &win_len, sizeof(int) );
}
unix_len = sizeof(unix_addr);
win_addr = (struct WS_sockaddr *)(out_data + req->recv_len + req->local_len + sizeof(int));
remote_len = out_size - req->recv_len - req->local_len;
if (getpeername( fd, &unix_addr.addr, &unix_len ) < 0 ||
(win_len = sockaddr_from_unix( &unix_addr, win_addr, remote_len - sizeof(int) )) < 0)
async_terminate( async, sock_get_ntstatus( errno ) );
free( out_data );
return;
}
memcpy( out_data + req->recv_len + req->local_len, &win_len, sizeof(int) );
async_request_complete( req->async, STATUS_SUCCESS, size, out_size, out_data );
}
static void complete_async_accept( struct sock *sock, struct accept_req *req )
{
struct sock *acceptsock = req->acceptsock;
struct async *async = req->async;
if (debug_level) fprintf( stderr, "completing accept request for socket %p\n", sock );
if (!accept_into_socket( sock, acceptsock ))
{
async_terminate( async, get_error() );
return;
}
fill_accept_output( req );
}
else
{
obj_handle_t handle;
if (!(acceptsock = accept_socket( sock )))
{
async_terminate( async, get_error() );
return;
}
handle = alloc_handle_no_access_check( async_get_thread( async )->process, &acceptsock->obj,
GENERIC_READ | GENERIC_WRITE | SYNCHRONIZE, OBJ_INHERIT );
acceptsock->wparam = handle;
sock_reselect( acceptsock );
release_object( acceptsock );
if (!handle)
{
async_terminate( async, get_error() );
return;
}
Elizabeth Figura
committed
async_request_complete_alloc( req->async, STATUS_SUCCESS, 0, sizeof(handle), &handle );
}
static void complete_async_accept_recv( struct accept_req *req )
{
if (debug_level) fprintf( stderr, "completing accept recv request for socket %p\n", req->acceptsock );
assert( req->recv_len );
fill_accept_output( req );
static void free_connect_req( void *private )
{
struct connect_req *req = private;
req->sock->connect_req = NULL;
release_object( req->async );
release_object( req->iosb );
release_object( req->sock );
free( req );
}
static void complete_async_connect( struct sock *sock )
{
struct connect_req *req = sock->connect_req;
const char *in_buffer;
size_t len;
int ret;
if (debug_level) fprintf( stderr, "completing connect request for socket %p\n", sock );
sock->state = SOCK_CONNECTED;
if (!req->send_len)
{
async_terminate( req->async, STATUS_SUCCESS );
in_buffer = (const char *)req->iosb->in_data + sizeof(struct afd_connect_params) + req->addr_len;
len = req->send_len - req->send_cursor;
ret = send( get_unix_fd( sock->fd ), in_buffer + req->send_cursor, len, 0 );
if (ret < 0 && errno != EWOULDBLOCK)
async_terminate( req->async, sock_get_ntstatus( errno ) );
async_request_complete( req->async, STATUS_SUCCESS, req->send_len, 0, NULL );
else
req->send_cursor += ret;
}
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
static void free_poll_req( void *private )
{
struct poll_req *req = private;
unsigned int i;
if (req->timeout) remove_timeout_user( req->timeout );
for (i = 0; i < req->count; ++i)
release_object( req->sockets[i].sock );
release_object( req->async );
release_object( req->iosb );
list_remove( &req->entry );
free( req );
}
static int is_oobinline( struct sock *sock )
{
int oobinline;
socklen_t len = sizeof(oobinline);
return !getsockopt( get_unix_fd( sock->fd ), SOL_SOCKET, SO_OOBINLINE, (char *)&oobinline, &len ) && oobinline;
}
static int get_poll_flags( struct sock *sock, int event )
{
int flags = 0;
/* A connection-mode socket which has never been connected does not return
* write or hangup events, but Linux reports POLLOUT | POLLHUP. */
if (sock->state == SOCK_UNCONNECTED)
event &= ~(POLLOUT | POLLHUP);
if (event & POLLIN)
{
if (sock->state == SOCK_LISTENING)
flags |= AFD_POLL_ACCEPT;
else
flags |= AFD_POLL_READ;
}
if (event & POLLPRI)
flags |= is_oobinline( sock ) ? AFD_POLL_READ : AFD_POLL_OOB;
if (event & POLLOUT)
flags |= AFD_POLL_WRITE;
if (sock->state == SOCK_CONNECTED)
flags |= AFD_POLL_CONNECT;
if (event & POLLHUP)
flags |= AFD_POLL_HUP;
if (event & POLLERR)
flags |= AFD_POLL_CONNECT_ERR;
return flags;
}
static void complete_async_poll( struct poll_req *req, unsigned int status )
{
unsigned int i, signaled_count = 0;
for (i = 0; i < req->count; ++i)
{
struct sock *sock = req->sockets[i].sock;
if (sock->main_poll == req)
sock->main_poll = NULL;
}
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
if (!status)
{
for (i = 0; i < req->count; ++i)
{
if (req->sockets[i].flags)
++signaled_count;
}
}
if (is_machine_64bit( async_get_thread( req->async )->process->machine ))
{
size_t output_size = offsetof( struct afd_poll_params_64, sockets[signaled_count] );
struct afd_poll_params_64 *output;
if (!(output = mem_alloc( output_size )))
{
async_terminate( req->async, get_error() );
return;
}
memset( output, 0, output_size );
output->timeout = req->orig_timeout;
output->exclusive = req->exclusive;
for (i = 0; i < req->count; ++i)
{
if (!req->sockets[i].flags) continue;
output->sockets[output->count].socket = req->sockets[i].handle;
output->sockets[output->count].flags = req->sockets[i].flags;
output->sockets[output->count].status = req->sockets[i].status;
++output->count;
}
assert( output->count == signaled_count );
async_request_complete( req->async, status, output_size, output_size, output );
}
else
{
size_t output_size = offsetof( struct afd_poll_params_32, sockets[signaled_count] );
struct afd_poll_params_32 *output;
if (!(output = mem_alloc( output_size )))
{
async_terminate( req->async, get_error() );
return;
}
memset( output, 0, output_size );
output->timeout = req->orig_timeout;
output->exclusive = req->exclusive;
for (i = 0; i < req->count; ++i)
{
if (!req->sockets[i].flags) continue;
output->sockets[output->count].socket = req->sockets[i].handle;
output->sockets[output->count].flags = req->sockets[i].flags;
output->sockets[output->count].status = req->sockets[i].status;
++output->count;
}
assert( output->count == signaled_count );
async_request_complete( req->async, status, output_size, output_size, output );
}
static void complete_async_polls( struct sock *sock, int event, int error )
{
int flags = get_poll_flags( sock, event );
struct poll_req *req, *next;
LIST_FOR_EACH_ENTRY_SAFE( req, next, &poll_list, struct poll_req, entry )
{
unsigned int i;
if (req->iosb->status != STATUS_PENDING) continue;
for (i = 0; i < req->count; ++i)
{
if (req->sockets[i].sock != sock) continue;
if (!(req->sockets[i].mask & flags)) continue;
if (debug_level)
fprintf( stderr, "completing poll for socket %p, wanted %#x got %#x\n",
sock, req->sockets[i].mask, flags );
req->sockets[i].flags = req->sockets[i].mask & flags;
req->sockets[i].status = sock_get_ntstatus( error );
complete_async_poll( req, STATUS_SUCCESS );
break;
}
}
}
static void async_poll_timeout( void *private )
{
struct poll_req *req = private;
req->timeout = NULL;
if (req->iosb->status != STATUS_PENDING) return;
complete_async_poll( req, STATUS_TIMEOUT );
static int sock_dispatch_asyncs( struct sock *sock, int event, int error )
if (event & (POLLIN | POLLPRI))
{
struct accept_req *req;
LIST_FOR_EACH_ENTRY( req, &sock->accept_list, struct accept_req, entry )
{
if (req->iosb->status == STATUS_PENDING && !req->accepted)
{
complete_async_accept( sock, req );
break;
}
}
if (sock->accept_recv_req && sock->accept_recv_req->iosb->status == STATUS_PENDING)
complete_async_accept_recv( sock->accept_recv_req );
}
if ((event & POLLOUT) && sock->connect_req && sock->connect_req->iosb->status == STATUS_PENDING)
complete_async_connect( sock );
if (event & (POLLIN | POLLPRI) && async_waiting( &sock->read_q ))
{
if (debug_level) fprintf( stderr, "activating read queue for socket %p\n", sock );
async_wake_up( &sock->read_q, STATUS_ALERTED );
event &= ~(POLLIN | POLLPRI);
}
if (event & POLLOUT && async_waiting( &sock->write_q ))
if (debug_level) fprintf( stderr, "activating write queue for socket %p\n", sock );
async_wake_up( &sock->write_q, STATUS_ALERTED );
event &= ~POLLOUT;
if (event & (POLLERR | POLLHUP))
{
int status = sock_get_ntstatus( error );
struct accept_req *req, *next;
if (sock->rd_shutdown || sock->hangup)
async_wake_up( &sock->read_q, status );
if (sock->wr_shutdown)
async_wake_up( &sock->write_q, status );
LIST_FOR_EACH_ENTRY_SAFE( req, next, &sock->accept_list, struct accept_req, entry )
{
if (req->iosb->status == STATUS_PENDING)
async_terminate( req->async, status );