354 #define BLOCKING_IO_RETURN_INT(FD, FUNC) { \ 355 int ret; \ 356 threadEntry_t self; \ 357 fdEntry_t *fdEntry = getFdEntry(FD); \ 358 if (fdEntry == NULL) { \ 359 errno = EBADF; \ 360 return -1; \ 361 } \ 362 do { \ 363 startOp(fdEntry, &self); \ 364 ret = FUNC; \ 365 endOp(fdEntry, &self); \ 366 } while (ret == -1 && errno == EINTR); \ 367 return ret; \ 368 } 369 370 int NET_Read(int s, void* buf, size_t len) { 371 BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) ); 372 } 373 374 int NET_ReadV(int s, const struct iovec * vector, int count) { 375 BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) ); 376 } 377 378 int NET_RecvFrom(int s, void *buf, int len, unsigned int flags, 379 struct sockaddr *from, socklen_t *fromlen) { 380 BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) ); 381 } 382 383 int NET_Send(int s, void *msg, int len, unsigned int flags) { 384 BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) ); 385 } 386 387 int NET_WriteV(int s, const struct iovec * vector, int count) { 388 BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) ); 389 } 390 391 int NET_SendTo(int s, const void *msg, int len, unsigned int 392 flags, const struct sockaddr *to, int tolen) { 393 BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) ); 394 } 395 396 int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) { 397 BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) ); 398 } 399 400 int NET_Connect(int s, struct sockaddr *addr, int addrlen) { 401 BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) ); 402 } 403 404 int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) { 405 BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) ); 406 } 407 408 /* 409 * Wrapper for select(s, timeout). We are using select() on Mac OS due to Bug 7131399. 410 * Auto restarts with adjusted timeout if interrupted by 411 * signal other than our wakeup signal. 412 */ 413 int NET_Timeout(int s, long timeout) { 414 long prevtime = 0, newtime; 415 struct timeval t, *tp = &t; 416 fd_set fds; 417 fd_set* fdsp = NULL; 418 int allocated = 0; 419 threadEntry_t self; 420 fdEntry_t *fdEntry = getFdEntry(s); 421 422 /* 423 * Check that fd hasn't been closed. 424 */ 425 if (fdEntry == NULL) { 426 errno = EBADF; 427 return -1; 428 } 429 430 /* 431 * Pick up current time as may need to adjust timeout 432 */ 433 if (timeout > 0) { 434 /* Timed */ 435 struct timeval now; 436 gettimeofday(&now, NULL); 437 prevtime = now.tv_sec * 1000 + now.tv_usec / 1000; 438 t.tv_sec = timeout / 1000; 439 t.tv_usec = (timeout % 1000) * 1000; 440 } else if (timeout < 0) { 441 /* Blocking */ 442 tp = 0; 443 } else { 444 /* Poll */ 445 t.tv_sec = 0; 446 t.tv_usec = 0; 447 } 448 449 if (s < FD_SETSIZE) { 450 fdsp = &fds; 451 FD_ZERO(fdsp); 452 } else { 453 int length = (howmany(s+1, NFDBITS)) * sizeof(int); 454 fdsp = (fd_set *) calloc(1, length); 455 if (fdsp == NULL) { 456 return -1; // errno will be set to ENOMEM 457 } | 354 #define BLOCKING_IO_RETURN_INT(FD, FUNC) { \ 355 int ret; \ 356 threadEntry_t self; \ 357 fdEntry_t *fdEntry = getFdEntry(FD); \ 358 if (fdEntry == NULL) { \ 359 errno = EBADF; \ 360 return -1; \ 361 } \ 362 do { \ 363 startOp(fdEntry, &self); \ 364 ret = FUNC; \ 365 endOp(fdEntry, &self); \ 366 } while (ret == -1 && errno == EINTR); \ 367 return ret; \ 368 } 369 370 int NET_Read(int s, void* buf, size_t len) { 371 BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) ); 372 } 373 374 int NET_NonBlockingRead(int s, void* buf, size_t len) { 375 BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, MSG_DONTWAIT)); 376 } 377 378 int NET_ReadV(int s, const struct iovec * vector, int count) { 379 BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) ); 380 } 381 382 int NET_RecvFrom(int s, void *buf, int len, unsigned int flags, 383 struct sockaddr *from, socklen_t *fromlen) { 384 BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) ); 385 } 386 387 int NET_Send(int s, void *msg, int len, unsigned int flags) { 388 BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) ); 389 } 390 391 int NET_WriteV(int s, const struct iovec * vector, int count) { 392 BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) ); 393 } 394 395 int NET_SendTo(int s, const void *msg, int len, unsigned int 396 flags, const struct sockaddr *to, int tolen) { 397 BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) ); 398 } 399 400 int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) { 401 BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) ); 402 } 403 404 int NET_Connect(int s, struct sockaddr *addr, int addrlen) { 405 BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) ); 406 } 407 408 int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) { 409 BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) ); 410 } 411 412 /* 413 * Wrapper for select(s, timeout). We are using select() on Mac OS due to Bug 7131399. 414 * Auto restarts with adjusted timeout if interrupted by 415 * signal other than our wakeup signal. 416 */ 417 int NET_Timeout0(int s, long timeout,long currentTime) { 418 long prevtime = currentTime, newtime; 419 struct timeval t, *tp = &t; 420 fd_set fds; 421 fd_set* fdsp = NULL; 422 int allocated = 0; 423 threadEntry_t self; 424 fdEntry_t *fdEntry = getFdEntry(s); 425 426 /* 427 * Check that fd hasn't been closed. 428 */ 429 if (fdEntry == NULL) { 430 errno = EBADF; 431 return -1; 432 } 433 434 /* 435 * Pick up current time as may need to adjust timeout 436 */ 437 if (timeout > 0) { 438 /* Timed */ 439 t.tv_sec = timeout / 1000; 440 t.tv_usec = (timeout % 1000) * 1000; 441 } else if (timeout < 0) { 442 /* Blocking */ 443 tp = 0; 444 } else { 445 /* Poll */ 446 t.tv_sec = 0; 447 t.tv_usec = 0; 448 } 449 450 if (s < FD_SETSIZE) { 451 fdsp = &fds; 452 FD_ZERO(fdsp); 453 } else { 454 int length = (howmany(s+1, NFDBITS)) * sizeof(int); 455 fdsp = (fd_set *) calloc(1, length); 456 if (fdsp == NULL) { 457 return -1; // errno will be set to ENOMEM 458 } |