1 /* 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 #include <stdio.h> 27 #include <stdlib.h> 28 #include <string.h> 29 #include <signal.h> 30 #include <pthread.h> 31 #include <sys/types.h> 32 #include <sys/socket.h> 33 #include <sys/select.h> 34 #include <sys/time.h> 35 #include <sys/resource.h> 36 #include <sys/uio.h> 37 #include <unistd.h> 38 #include <errno.h> 39 #include <sys/poll.h> 40 41 #include "jni_util.h" 42 43 /* 44 * Stack allocated by thread when doing blocking operation 45 */ 46 typedef struct threadEntry { 47 pthread_t thr; /* this thread */ 48 struct threadEntry *next; /* next thread */ 49 int intr; /* interrupted */ 50 } threadEntry_t; 51 52 /* 53 * Heap allocated during initialized - one entry per fd 54 */ 55 typedef struct { 56 pthread_mutex_t lock; /* fd lock */ 57 threadEntry_t *threads; /* threads blocked on fd */ 58 } fdEntry_t; 59 60 /* 61 * Signal to unblock thread 62 */ 63 static int sigWakeup = SIGIO; 64 65 /* 66 * The fd table and the number of file descriptors 67 */ 68 static fdEntry_t *fdTable; 69 static int fdCount; 70 71 /* 72 * This limit applies if getlimit() returns unlimited. 73 * Unfortunately, this means if someone wants a higher limit 74 * then they have to set an explicit limit, higher than this, 75 * which is probably counter-intuitive. 76 */ 77 #define MAX_FD_COUNT 4096 78 79 /* 80 * Null signal handler 81 */ 82 static void sig_wakeup(int sig) { 83 } 84 85 /* 86 * Initialization routine (executed when library is loaded) 87 * Allocate fd tables and sets up signal handler. 88 */ 89 static void __attribute((constructor)) init() { 90 struct rlimit nbr_files; 91 sigset_t sigset; 92 struct sigaction sa; 93 int i; 94 95 /* 96 * Allocate table based on the maximum number of 97 * file descriptors. 98 */ 99 getrlimit(RLIMIT_NOFILE, &nbr_files); 100 if (nbr_files.rlim_max == RLIM_INFINITY) { 101 fdCount = MAX_FD_COUNT; 102 } else { 103 fdCount = nbr_files.rlim_max; 104 } 105 fdTable = (fdEntry_t *)calloc(fdCount, sizeof(fdEntry_t)); 106 if (fdTable == NULL) { 107 fprintf(stderr, "library initialization failed - " 108 "unable to allocate file descriptor table - out of memory"); 109 abort(); 110 } 111 for (i=0; i<fdCount; i++) { 112 pthread_mutex_init(&fdTable[i].lock, NULL); 113 } 114 115 /* 116 * Setup the signal handler 117 */ 118 sa.sa_handler = sig_wakeup; 119 sa.sa_flags = 0; 120 sigemptyset(&sa.sa_mask); 121 sigaction(sigWakeup, &sa, NULL); 122 123 sigemptyset(&sigset); 124 sigaddset(&sigset, sigWakeup); 125 sigprocmask(SIG_UNBLOCK, &sigset, NULL); 126 } 127 128 /* 129 * Return the fd table for this fd or NULL is fd out 130 * of range. 131 */ 132 static inline fdEntry_t *getFdEntry(int fd) 133 { 134 if (fd < 0 || fd >= fdCount) { 135 return NULL; 136 } 137 return &fdTable[fd]; 138 } 139 140 /* 141 * Start a blocking operation :- 142 * Insert thread onto thread list for the fd. 143 */ 144 static inline void startOp(fdEntry_t *fdEntry, threadEntry_t *self) 145 { 146 self->thr = pthread_self(); 147 self->intr = 0; 148 149 pthread_mutex_lock(&(fdEntry->lock)); 150 { 151 self->next = fdEntry->threads; 152 fdEntry->threads = self; 153 } 154 pthread_mutex_unlock(&(fdEntry->lock)); 155 } 156 157 /* 158 * End a blocking operation :- 159 * Remove thread from thread list for the fd 160 * If fd has been interrupted then set errno to EBADF 161 */ 162 static inline void endOp 163 (fdEntry_t *fdEntry, threadEntry_t *self) 164 { 165 int orig_errno = errno; 166 pthread_mutex_lock(&(fdEntry->lock)); 167 { 168 threadEntry_t *curr, *prev=NULL; 169 curr = fdEntry->threads; 170 while (curr != NULL) { 171 if (curr == self) { 172 if (curr->intr) { 173 orig_errno = EBADF; 174 } 175 if (prev == NULL) { 176 fdEntry->threads = curr->next; 177 } else { 178 prev->next = curr->next; 179 } 180 break; 181 } 182 prev = curr; 183 curr = curr->next; 184 } 185 } 186 pthread_mutex_unlock(&(fdEntry->lock)); 187 errno = orig_errno; 188 } 189 190 /* 191 * Close or dup2 a file descriptor ensuring that all threads blocked on 192 * the file descriptor are notified via a wakeup signal. 193 * 194 * fd1 < 0 => close(fd2) 195 * fd1 >= 0 => dup2(fd1, fd2) 196 * 197 * Returns -1 with errno set if operation fails. 198 */ 199 static int closefd(int fd1, int fd2) { 200 int rv, orig_errno; 201 fdEntry_t *fdEntry = getFdEntry(fd2); 202 if (fdEntry == NULL) { 203 errno = EBADF; 204 return -1; 205 } 206 207 /* 208 * Lock the fd to hold-off additional I/O on this fd. 209 */ 210 pthread_mutex_lock(&(fdEntry->lock)); 211 212 { 213 /* 214 * Send a wakeup signal to all threads blocked on this 215 * file descriptor. 216 */ 217 threadEntry_t *curr = fdEntry->threads; 218 while (curr != NULL) { 219 curr->intr = 1; 220 pthread_kill( curr->thr, sigWakeup ); 221 curr = curr->next; 222 } 223 224 /* 225 * And close/dup the file descriptor 226 * (restart if interrupted by signal) 227 */ 228 do { 229 if (fd1 < 0) { 230 rv = close(fd2); 231 } else { 232 rv = dup2(fd1, fd2); 233 } 234 } while (rv == -1 && errno == EINTR); 235 236 } 237 238 /* 239 * Unlock without destroying errno 240 */ 241 orig_errno = errno; 242 pthread_mutex_unlock(&(fdEntry->lock)); 243 errno = orig_errno; 244 245 return rv; 246 } 247 248 /* 249 * Wrapper for dup2 - same semantics as dup2 system call except 250 * that any threads blocked in an I/O system call on fd2 will be 251 * preempted and return -1/EBADF; 252 */ 253 int NET_Dup2(int fd, int fd2) { 254 if (fd < 0) { 255 errno = EBADF; 256 return -1; 257 } 258 return closefd(fd, fd2); 259 } 260 261 /* 262 * Wrapper for close - same semantics as close system call 263 * except that any threads blocked in an I/O on fd will be 264 * preempted and the I/O system call will return -1/EBADF. 265 */ 266 int NET_SocketClose(int fd) { 267 return closefd(-1, fd); 268 } 269 270 /************** Basic I/O operations here ***************/ 271 272 /* 273 * Macro to perform a blocking IO operation. Restarts 274 * automatically if interrupted by signal (other than 275 * our wakeup signal) 276 */ 277 #define BLOCKING_IO_RETURN_INT(FD, FUNC) { \ 278 int ret; \ 279 threadEntry_t self; \ 280 fdEntry_t *fdEntry = getFdEntry(FD); \ 281 if (fdEntry == NULL) { \ 282 errno = EBADF; \ 283 return -1; \ 284 } \ 285 do { \ 286 startOp(fdEntry, &self); \ 287 ret = FUNC; \ 288 endOp(fdEntry, &self); \ 289 } while (ret == -1 && errno == EINTR); \ 290 return ret; \ 291 } 292 293 int NET_Read(int s, void* buf, size_t len) { 294 BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) ); 295 } 296 297 int NET_ReadV(int s, const struct iovec * vector, int count) { 298 BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) ); 299 } 300 301 int NET_RecvFrom(int s, void *buf, int len, unsigned int flags, 302 struct sockaddr *from, socklen_t *fromlen) { 303 BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) ); 304 } 305 306 int NET_Send(int s, void *msg, int len, unsigned int flags) { 307 BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) ); 308 } 309 310 int NET_WriteV(int s, const struct iovec * vector, int count) { 311 BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) ); 312 } 313 314 int NET_SendTo(int s, const void *msg, int len, unsigned int 315 flags, const struct sockaddr *to, int tolen) { 316 BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) ); 317 } 318 319 int NET_Accept(int s, struct sockaddr *addr, socklen_t *addrlen) { 320 BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) ); 321 } 322 323 int NET_Connect(int s, struct sockaddr *addr, int addrlen) { 324 BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) ); 325 } 326 327 #ifndef USE_SELECT 328 int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) { 329 BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) ); 330 } 331 #else 332 int NET_Select(int s, fd_set *readfds, fd_set *writefds, 333 fd_set *exceptfds, struct timeval *timeout) { 334 BLOCKING_IO_RETURN_INT( s-1, 335 select(s, readfds, writefds, exceptfds, timeout) ); 336 } 337 #endif 338 339 /* 340 * Wrapper for select(s, timeout). We are using select() on Mac OS due to Bug 7131399. 341 * Auto restarts with adjusted timeout if interrupted by 342 * signal other than our wakeup signal. 343 */ 344 int NET_Timeout(JNIEnv *env, int s, long timeout) { 345 long prevtime = 0, newtime; 346 struct timeval t, *tp = &t; 347 fd_set fds; 348 fd_set* fdsp = NULL; 349 int allocated = 0; 350 threadEntry_t self; 351 fdEntry_t *fdEntry = getFdEntry(s); 352 353 /* 354 * Check that fd hasn't been closed. 355 */ 356 if (fdEntry == NULL) { 357 errno = EBADF; 358 return -1; 359 } 360 361 /* 362 * Pick up current time as may need to adjust timeout 363 */ 364 if (timeout > 0) { 365 /* Timed */ 366 struct timeval now; 367 gettimeofday(&now, NULL); 368 prevtime = now.tv_sec * 1000 + now.tv_usec / 1000; 369 t.tv_sec = timeout / 1000; 370 t.tv_usec = (timeout % 1000) * 1000; 371 } else if (timeout < 0) { 372 /* Blocking */ 373 tp = 0; 374 } else { 375 /* Poll */ 376 t.tv_sec = 0; 377 t.tv_usec = 0; 378 } 379 380 if (s < FD_SETSIZE) { 381 fdsp = &fds; 382 FD_ZERO(fdsp); 383 } else { 384 int length = (howmany(s+1, NFDBITS)) * sizeof(int); 385 fdsp = (fd_set *) calloc(1, length); 386 if (fdsp == NULL) { 387 JNU_ThrowOutOfMemoryError(env, "NET_Select native heap allocation failed"); 388 return 0; 389 } 390 allocated = 1; 391 } 392 FD_SET(s, fdsp); 393 394 for(;;) { 395 int rv; 396 397 /* 398 * call select on the fd. If interrupted by our wakeup signal 399 * errno will be set to EBADF. 400 */ 401 402 startOp(fdEntry, &self); 403 rv = select(s+1, fdsp, 0, 0, tp); 404 endOp(fdEntry, &self); 405 406 /* 407 * If interrupted then adjust timeout. If timeout 408 * has expired return 0 (indicating timeout expired). 409 */ 410 if (rv < 0 && errno == EINTR) { 411 if (timeout > 0) { 412 struct timeval now; 413 gettimeofday(&now, NULL); 414 newtime = now.tv_sec * 1000 + now.tv_usec / 1000; 415 timeout -= newtime - prevtime; 416 if (timeout <= 0) { 417 if (allocated != 0) 418 free(fdsp); 419 return 0; 420 } 421 prevtime = newtime; 422 t.tv_sec = timeout / 1000; 423 t.tv_usec = (timeout % 1000) * 1000; 424 } 425 } else { 426 if (allocated != 0) 427 free(fdsp); 428 return rv; 429 } 430 431 } 432 } --- EOF ---