< prev index next >

src/java.base/linux/native/libnet/linux_close.c

Print this page




 350 #define BLOCKING_IO_RETURN_INT(FD, FUNC) {      \
 351     int ret;                                    \
 352     threadEntry_t self;                         \
 353     fdEntry_t *fdEntry = getFdEntry(FD);        \
 354     if (fdEntry == NULL) {                      \
 355         errno = EBADF;                          \
 356         return -1;                              \
 357     }                                           \
 358     do {                                        \
 359         startOp(fdEntry, &self);                \
 360         ret = FUNC;                             \
 361         endOp(fdEntry, &self);                  \
 362     } while (ret == -1 && errno == EINTR);      \
 363     return ret;                                 \
 364 }
 365 
 366 int NET_Read(int s, void* buf, size_t len) {
 367     BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
 368 }
 369 




 370 int NET_ReadV(int s, const struct iovec * vector, int count) {
 371     BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) );
 372 }
 373 
 374 int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
 375        struct sockaddr *from, socklen_t *fromlen) {
 376     BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
 377 }
 378 
 379 int NET_Send(int s, void *msg, int len, unsigned int flags) {
 380     BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
 381 }
 382 
 383 int NET_WriteV(int s, const struct iovec * vector, int count) {
 384     BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) );
 385 }
 386 
 387 int NET_SendTo(int s, const void *msg, int len,  unsigned  int
 388        flags, const struct sockaddr *to, int tolen) {
 389     BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
 390 }
 391 
 392 int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) {
 393     BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
 394 }
 395 
 396 int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
 397     BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
 398 }
 399 
 400 int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
 401     BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
 402 }
 403 
 404 /*
 405  * Wrapper for poll(s, timeout).
 406  * Auto restarts with adjusted timeout if interrupted by
 407  * signal other than our wakeup signal.
 408  */
 409 int NET_Timeout(int s, long timeout) {
 410     long prevtime = 0, newtime;
 411     struct timeval t;
 412     fdEntry_t *fdEntry = getFdEntry(s);
 413 
 414     /*
 415      * Check that fd hasn't been closed.
 416      */
 417     if (fdEntry == NULL) {
 418         errno = EBADF;
 419         return -1;
 420     }
 421 
 422     /*
 423      * Pick up current time as may need to adjust timeout
 424      */
 425     if (timeout > 0) {
 426         gettimeofday(&t, NULL);
 427         prevtime = t.tv_sec * 1000  +  t.tv_usec / 1000;
 428     }
 429 
 430     for(;;) {
 431         struct pollfd pfd;
 432         int rv;
 433         threadEntry_t self;
 434 
 435         /*
 436          * Poll the fd. If interrupted by our wakeup signal
 437          * errno will be set to EBADF.
 438          */
 439         pfd.fd = s;
 440         pfd.events = POLLIN | POLLERR;
 441 
 442         startOp(fdEntry, &self);
 443         rv = poll(&pfd, 1, timeout);
 444         endOp(fdEntry, &self);
 445 
 446         /*
 447          * If interrupted then adjust timeout. If timeout
 448          * has expired return 0 (indicating timeout expired).
 449          */
 450         if (rv < 0 && errno == EINTR) {
 451             if (timeout > 0) {
 452                 gettimeofday(&t, NULL);
 453                 newtime = t.tv_sec * 1000  +  t.tv_usec / 1000;
 454                 timeout -= newtime - prevtime;
 455                 if (timeout <= 0) {
 456                     return 0;
 457                 }
 458                 prevtime = newtime;
 459             }
 460         } else {
 461             return rv;
 462         }
 463 
 464     }














 465 }


 350 #define BLOCKING_IO_RETURN_INT(FD, FUNC) {      \
 351     int ret;                                    \
 352     threadEntry_t self;                         \
 353     fdEntry_t *fdEntry = getFdEntry(FD);        \
 354     if (fdEntry == NULL) {                      \
 355         errno = EBADF;                          \
 356         return -1;                              \
 357     }                                           \
 358     do {                                        \
 359         startOp(fdEntry, &self);                \
 360         ret = FUNC;                             \
 361         endOp(fdEntry, &self);                  \
 362     } while (ret == -1 && errno == EINTR);      \
 363     return ret;                                 \
 364 }
 365 
 366 int NET_Read(int s, void* buf, size_t len) {
 367     BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
 368 }
 369 
 370 int NET_NonBlockingRead(int s, void* buf, size_t len) { 
 371     BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, MSG_DONTWAIT) );
 372 }
 373 
 374 int NET_ReadV(int s, const struct iovec * vector, int count) {
 375     BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) );
 376 }
 377 
 378 int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
 379        struct sockaddr *from, socklen_t *fromlen) {
 380     BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
 381 }
 382 
 383 int NET_Send(int s, void *msg, int len, unsigned int flags) {
 384     BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
 385 }
 386 
 387 int NET_WriteV(int s, const struct iovec * vector, int count) {
 388     BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) );
 389 }
 390 
 391 int NET_SendTo(int s, const void *msg, int len,  unsigned  int
 392        flags, const struct sockaddr *to, int tolen) {
 393     BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
 394 }
 395 
 396 int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) {
 397     BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
 398 }
 399 
 400 int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
 401     BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
 402 }
 403 
 404 int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
 405     BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
 406 }
 407 
 408 /*
 409  * Wrapper for poll(s, timeout).
 410  * Auto restarts with adjusted timeout if interrupted by
 411  * signal other than our wakeup signal.
 412  */
 413 int NET_Timeout0(int s, long timeout,long currentTime) {
 414     long prevtime = currentTime, newtime;
 415     struct timeval t;
 416     fdEntry_t *fdEntry = getFdEntry(s);
 417 
 418     /*
 419      * Check that fd hasn't been closed.
 420      */
 421     if (fdEntry == NULL) {
 422         errno = EBADF;
 423         return -1;
 424     }
 425     








 426     for(;;) {
 427         struct pollfd pfd;
 428         int rv;
 429         threadEntry_t self;
 430 
 431         /*
 432          * Poll the fd. If interrupted by our wakeup signal
 433          * errno will be set to EBADF.
 434          */
 435         pfd.fd = s;
 436         pfd.events = POLLIN | POLLERR;
 437 
 438         startOp(fdEntry, &self);
 439         rv = poll(&pfd, 1, timeout);
 440         endOp(fdEntry, &self);
 441 
 442         /*
 443          * If interrupted then adjust timeout. If timeout
 444          * has expired return 0 (indicating timeout expired).
 445          */
 446         if (rv < 0 && errno == EINTR) {
 447             if (timeout > 0) {
 448                 gettimeofday(&t, NULL);
 449                 newtime = t.tv_sec * 1000  +  t.tv_usec / 1000;
 450                 timeout -= newtime - prevtime;
 451                 if (timeout <= 0) {
 452                     return 0;
 453                 }
 454                 prevtime = newtime;
 455             }
 456         } else {
 457             return rv;
 458         }
 459 
 460     }
 461 }
 462 
 463 int NET_TimeoutWithCurrentTime(int s, long timeout, long currentTime) {
 464     return NET_Timeout0(s, timeout, currentTime);
 465 }
 466 
 467 int NET_Timeout(int s, long timeout) {
 468     long currentTime = 0;
 469     struct timeval t;
 470     if (timeout > 0) {
 471         gettimeofday(&t, NULL);
 472         currentTime = t.tv_sec * 1000 + t.tv_usec / 1000;
 473     }
 474     return NET_Timeout0(s, timeout, currentTime);
 475 }
< prev index next >