1 /*
2 * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. Oracle designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Oracle in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 */
25
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <signal.h>
29 #include <pthread.h>
30 #include <sys/types.h>
31 #include <sys/socket.h>
32 #include <sys/time.h>
33 #include <sys/resource.h>
34 #include <sys/uio.h>
35 #include <unistd.h>
36 #include <errno.h>
37
38 #include <sys/poll.h>
39
40 /*
41 * Stack allocated by thread when doing blocking operation
42 */
43 typedef struct threadEntry {
44 pthread_t thr; /* this thread */
45 struct threadEntry *next; /* next thread */
46 int intr; /* interrupted */
47 } threadEntry_t;
48
49 /*
50 * Heap allocated during initialized - one entry per fd
51 */
52 typedef struct {
53 pthread_mutex_t lock; /* fd lock */
54 threadEntry_t *threads; /* threads blocked on fd */
55 } fdEntry_t;
56
57 /*
58 * Signal to unblock thread
59 */
60 static int sigWakeup = (__SIGRTMAX - 2);
61
62 /*
63 * The fd table and the number of file descriptors
64 */
65 static fdEntry_t *fdTable;
66 static int fdCount;
67
68 /*
69 * Null signal handler
70 */
71 static void sig_wakeup(int sig) {
72 }
73
74 /*
75 * Initialization routine (executed when library is loaded)
76 * Allocate fd tables and sets up signal handler.
77 */
78 static void __attribute((constructor)) init() {
79 struct rlimit nbr_files;
80 sigset_t sigset;
81 struct sigaction sa;
82
83 /*
84 * Allocate table based on the maximum number of
85 * file descriptors.
86 */
87 getrlimit(RLIMIT_NOFILE, &nbr_files);
88 fdCount = nbr_files.rlim_max;
89 fdTable = (fdEntry_t *)calloc(fdCount, sizeof(fdEntry_t));
90 if (fdTable == NULL) {
91 fprintf(stderr, "library initialization failed - "
92 "unable to allocate file descriptor table - out of memory");
93 abort();
94 }
95
96 /*
97 * Setup the signal handler
98 */
99 sa.sa_handler = sig_wakeup;
100 sa.sa_flags = 0;
101 sigemptyset(&sa.sa_mask);
102 sigaction(sigWakeup, &sa, NULL);
103
104 sigemptyset(&sigset);
105 sigaddset(&sigset, sigWakeup);
106 sigprocmask(SIG_UNBLOCK, &sigset, NULL);
107 }
108
109 /*
110 * Return the fd table for this fd or NULL is fd out
111 * of range.
112 */
113 static inline fdEntry_t *getFdEntry(int fd)
114 {
115 if (fd < 0 || fd > fdCount) {
116 return NULL;
117 }
118 return &fdTable[fd];
119 }
120
121 /*
122 * Start a blocking operation :-
123 * Insert thread onto thread list for the fd.
124 */
125 static inline void startOp(fdEntry_t *fdEntry, threadEntry_t *self)
126 {
127 self->thr = pthread_self();
128 self->intr = 0;
129
130 pthread_mutex_lock(&(fdEntry->lock));
131 {
132 self->next = fdEntry->threads;
133 fdEntry->threads = self;
134 }
135 pthread_mutex_unlock(&(fdEntry->lock));
136 }
137
138 /*
139 * End a blocking operation :-
140 * Remove thread from thread list for the fd
141 * If fd has been interrupted then set errno to EBADF
142 */
143 static inline void endOp
144 (fdEntry_t *fdEntry, threadEntry_t *self)
145 {
146 int orig_errno = errno;
147 pthread_mutex_lock(&(fdEntry->lock));
148 {
149 threadEntry_t *curr, *prev=NULL;
150 curr = fdEntry->threads;
151 while (curr != NULL) {
152 if (curr == self) {
153 if (curr->intr) {
154 orig_errno = EBADF;
155 }
156 if (prev == NULL) {
157 fdEntry->threads = curr->next;
158 } else {
159 prev->next = curr->next;
160 }
161 break;
162 }
163 prev = curr;
164 curr = curr->next;
165 }
166 }
167 pthread_mutex_unlock(&(fdEntry->lock));
168 errno = orig_errno;
169 }
170
171 /*
172 * Close or dup2 a file descriptor ensuring that all threads blocked on
173 * the file descriptor are notified via a wakeup signal.
174 *
175 * fd1 < 0 => close(fd2)
176 * fd1 >= 0 => dup2(fd1, fd2)
177 *
178 * Returns -1 with errno set if operation fails.
179 */
180 static int closefd(int fd1, int fd2) {
181 int rv, orig_errno;
182 fdEntry_t *fdEntry = getFdEntry(fd2);
183 if (fdEntry == NULL) {
184 errno = EBADF;
185 return -1;
186 }
187
188 /*
189 * Lock the fd to hold-off additional I/O on this fd.
190 */
191 pthread_mutex_lock(&(fdEntry->lock));
192
193 {
194 /*
195 * Send a wakeup signal to all threads blocked on this
196 * file descriptor.
197 */
198 threadEntry_t *curr = fdEntry->threads;
199 while (curr != NULL) {
200 curr->intr = 1;
201 pthread_kill( curr->thr, sigWakeup );
202 curr = curr->next;
203 }
204
205 /*
206 * And close/dup the file descriptor
207 * (restart if interrupted by signal)
208 */
209 do {
210 if (fd1 < 0) {
211 rv = close(fd2);
212 } else {
213 rv = dup2(fd1, fd2);
214 }
215 } while (rv == -1 && errno == EINTR);
216
217 }
218
219 /*
220 * Unlock without destroying errno
221 */
222 orig_errno = errno;
223 pthread_mutex_unlock(&(fdEntry->lock));
224 errno = orig_errno;
225
226 return rv;
227 }
228
229 /*
230 * Wrapper for dup2 - same semantics as dup2 system call except
231 * that any threads blocked in an I/O system call on fd2 will be
232 * preempted and return -1/EBADF;
233 */
234 int NET_Dup2(int fd, int fd2) {
235 if (fd < 0) {
236 errno = EBADF;
237 return -1;
238 }
239 return closefd(fd, fd2);
240 }
241
242 /*
243 * Wrapper for close - same semantics as close system call
244 * except that any threads blocked in an I/O on fd will be
245 * preempted and the I/O system call will return -1/EBADF.
246 */
247 int NET_SocketClose(int fd) {
248 return closefd(-1, fd);
249 }
250
251 /************** Basic I/O operations here ***************/
252
253 /*
254 * Macro to perform a blocking IO operation. Restarts
255 * automatically if interrupted by signal (other than
256 * our wakeup signal)
257 */
258 #define BLOCKING_IO_RETURN_INT(FD, FUNC) { \
259 int ret; \
260 threadEntry_t self; \
261 fdEntry_t *fdEntry = getFdEntry(FD); \
262 if (fdEntry == NULL) { \
263 errno = EBADF; \
264 return -1; \
265 } \
266 do { \
267 startOp(fdEntry, &self); \
268 ret = FUNC; \
269 endOp(fdEntry, &self); \
270 } while (ret == -1 && errno == EINTR); \
271 return ret; \
272 }
273
274 int NET_Read(int s, void* buf, size_t len) {
275 BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
276 }
277
278 int NET_ReadV(int s, const struct iovec * vector, int count) {
279 BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) );
280 }
281
282 int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
283 struct sockaddr *from, int *fromlen) {
284 socklen_t socklen = *fromlen;
285 BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, &socklen) );
286 *fromlen = socklen;
287 }
288
289 int NET_Send(int s, void *msg, int len, unsigned int flags) {
290 BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
291 }
292
293 int NET_WriteV(int s, const struct iovec * vector, int count) {
294 BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) );
295 }
296
297 int NET_SendTo(int s, const void *msg, int len, unsigned int
298 flags, const struct sockaddr *to, int tolen) {
299 BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
300 }
301
302 int NET_Accept(int s, struct sockaddr *addr, int *addrlen) {
303 socklen_t socklen = *addrlen;
304 BLOCKING_IO_RETURN_INT( s, accept(s, addr, &socklen) );
305 *addrlen = socklen;
306 }
307
308 int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
309 BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
310 }
311
312 #ifndef USE_SELECT
313 int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
314 BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
315 }
316 #else
317 int NET_Select(int s, fd_set *readfds, fd_set *writefds,
318 fd_set *exceptfds, struct timeval *timeout) {
319 BLOCKING_IO_RETURN_INT( s-1,
320 select(s, readfds, writefds, exceptfds, timeout) );
321 }
322 #endif
323
324 /*
325 * Wrapper for poll(s, timeout).
326 * Auto restarts with adjusted timeout if interrupted by
327 * signal other than our wakeup signal.
328 */
329 int NET_Timeout(int s, long timeout) {
330 long prevtime = 0, newtime;
331 struct timeval t;
332 fdEntry_t *fdEntry = getFdEntry(s);
333
334 /*
335 * Check that fd hasn't been closed.
336 */
337 if (fdEntry == NULL) {
338 errno = EBADF;
339 return -1;
340 }
341
342 /*
343 * Pick up current time as may need to adjust timeout
344 */
345 if (timeout > 0) {
346 gettimeofday(&t, NULL);
347 prevtime = t.tv_sec * 1000 + t.tv_usec / 1000;
348 }
349
350 for(;;) {
351 struct pollfd pfd;
352 int rv;
353 threadEntry_t self;
354
355 /*
356 * Poll the fd. If interrupted by our wakeup signal
357 * errno will be set to EBADF.
358 */
359 pfd.fd = s;
360 pfd.events = POLLIN | POLLERR;
361
362 startOp(fdEntry, &self);
363 rv = poll(&pfd, 1, timeout);
364 endOp(fdEntry, &self);
365
366 /*
367 * If interrupted then adjust timeout. If timeout
368 * has expired return 0 (indicating timeout expired).
369 */
370 if (rv < 0 && errno == EINTR) {
371 if (timeout > 0) {
372 gettimeofday(&t, NULL);
373 newtime = t.tv_sec * 1000 + t.tv_usec / 1000;
374 timeout -= newtime - prevtime;
375 if (timeout <= 0) {
376 return 0;
377 }
378 prevtime = newtime;
379 }
380 } else {
381 return rv;
382 }
383
384 }
385 }
--- EOF ---