1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "classfile/classLoader.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "classfile/moduleEntry.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/icBuffer.hpp"
34 #include "code/vtableStubs.hpp"
35 #include "gc/shared/gcVMOperations.hpp"
36 #include "logging/log.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "logging/log.hpp"
39 #include "logging/logStream.hpp"
40 #include "memory/allocation.inline.hpp"
41 #include "memory/guardedMemory.hpp"
42 #include "memory/resourceArea.hpp"
43 #include "memory/universe.hpp"
44 #include "oops/compressedOops.inline.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "prims/jvm_misc.hpp"
47 #include "runtime/arguments.hpp"
48 #include "runtime/atomic.hpp"
49 #include "runtime/frame.inline.hpp"
50 #include "runtime/handles.inline.hpp"
51 #include "runtime/interfaceSupport.inline.hpp"
52 #include "runtime/java.hpp"
53 #include "runtime/javaCalls.hpp"
54 #include "runtime/mutexLocker.hpp"
55 #include "runtime/os.inline.hpp"
56 #include "runtime/sharedRuntime.hpp"
57 #include "runtime/stubRoutines.hpp"
58 #include "runtime/thread.inline.hpp"
59 #include "runtime/threadSMR.hpp"
60 #include "runtime/vm_version.hpp"
61 #include "services/attachListener.hpp"
62 #include "services/mallocTracker.hpp"
63 #include "services/memTracker.hpp"
64 #include "services/nmtCommon.hpp"
65 #include "services/threadService.hpp"
66 #include "utilities/align.hpp"
67 #include "utilities/defaultStream.hpp"
68 #include "utilities/events.hpp"
69
70 # include <signal.h>
71 # include <errno.h>
72
73 OSThread* os::_starting_thread = NULL;
74 address os::_polling_page = NULL;
75 volatile unsigned int os::_rand_seed = 1;
76 int os::_processor_count = 0;
77 int os::_initial_active_processor_count = 0;
78 size_t os::_page_sizes[os::page_sizes_max];
79
80 #ifndef PRODUCT
81 julong os::num_mallocs = 0; // # of calls to malloc/realloc
82 julong os::alloc_bytes = 0; // # of bytes allocated
83 julong os::num_frees = 0; // # of calls to free
84 julong os::free_bytes = 0; // # of bytes freed
85 #endif
86
87 static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords
88
89 DEBUG_ONLY(bool os::_mutex_init_done = false;)
90
91 static time_t get_timezone(const struct tm* time_struct) {
92 #if defined(_ALLBSD_SOURCE)
93 return time_struct->tm_gmtoff;
94 #elif defined(_WINDOWS)
95 long zone;
96 _get_timezone(&zone);
97 return static_cast<time_t>(zone);
98 #else
99 return timezone;
100 #endif
101 }
102
103 int os::snprintf(char* buf, size_t len, const char* fmt, ...) {
104 va_list args;
105 va_start(args, fmt);
106 int result = os::vsnprintf(buf, len, fmt, args);
107 va_end(args);
108 return result;
109 }
110
111 // Fill in buffer with current local time as an ISO-8601 string.
112 // E.g., yyyy-mm-ddThh:mm:ss-zzzz.
113 // Returns buffer, or NULL if it failed.
114 // This would mostly be a call to
115 // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
116 // except that on Windows the %z behaves badly, so we do it ourselves.
117 // Also, people wanted milliseconds on there,
118 // and strftime doesn't do milliseconds.
119 char* os::iso8601_time(char* buffer, size_t buffer_length, bool utc) {
120 // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
121 // 1 2
122 // 12345678901234567890123456789
123 // format string: "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d"
124 static const size_t needed_buffer = 29;
125
126 // Sanity check the arguments
127 if (buffer == NULL) {
128 assert(false, "NULL buffer");
129 return NULL;
130 }
131 if (buffer_length < needed_buffer) {
132 assert(false, "buffer_length too small");
133 return NULL;
134 }
135 // Get the current time
136 jlong milliseconds_since_19700101 = javaTimeMillis();
137 const int milliseconds_per_microsecond = 1000;
138 const time_t seconds_since_19700101 =
139 milliseconds_since_19700101 / milliseconds_per_microsecond;
140 const int milliseconds_after_second =
141 milliseconds_since_19700101 % milliseconds_per_microsecond;
142 // Convert the time value to a tm and timezone variable
143 struct tm time_struct;
144 if (utc) {
145 if (gmtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
146 assert(false, "Failed gmtime_pd");
147 return NULL;
148 }
149 } else {
150 if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
151 assert(false, "Failed localtime_pd");
152 return NULL;
153 }
154 }
155 const time_t zone = get_timezone(&time_struct);
156
157 // If daylight savings time is in effect,
158 // we are 1 hour East of our time zone
159 const time_t seconds_per_minute = 60;
160 const time_t minutes_per_hour = 60;
161 const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour;
162 time_t UTC_to_local = zone;
163 if (time_struct.tm_isdst > 0) {
164 UTC_to_local = UTC_to_local - seconds_per_hour;
165 }
166
167 // No offset when dealing with UTC
168 if (utc) {
169 UTC_to_local = 0;
170 }
171
172 // Compute the time zone offset.
173 // localtime_pd() sets timezone to the difference (in seconds)
174 // between UTC and and local time.
175 // ISO 8601 says we need the difference between local time and UTC,
176 // we change the sign of the localtime_pd() result.
177 const time_t local_to_UTC = -(UTC_to_local);
178 // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
179 char sign_local_to_UTC = '+';
180 time_t abs_local_to_UTC = local_to_UTC;
181 if (local_to_UTC < 0) {
182 sign_local_to_UTC = '-';
183 abs_local_to_UTC = -(abs_local_to_UTC);
184 }
185 // Convert time zone offset seconds to hours and minutes.
186 const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour);
187 const time_t zone_min =
188 ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute);
189
190 // Print an ISO 8601 date and time stamp into the buffer
191 const int year = 1900 + time_struct.tm_year;
192 const int month = 1 + time_struct.tm_mon;
193 const int printed = jio_snprintf(buffer, buffer_length,
194 "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d",
195 year,
196 month,
197 time_struct.tm_mday,
198 time_struct.tm_hour,
199 time_struct.tm_min,
200 time_struct.tm_sec,
201 milliseconds_after_second,
202 sign_local_to_UTC,
203 zone_hours,
204 zone_min);
205 if (printed == 0) {
206 assert(false, "Failed jio_printf");
207 return NULL;
208 }
209 return buffer;
210 }
211
212 OSReturn os::set_priority(Thread* thread, ThreadPriority p) {
213 debug_only(Thread::check_for_dangling_thread_pointer(thread);)
214
215 if ((p >= MinPriority && p <= MaxPriority) ||
216 (p == CriticalPriority && thread->is_ConcurrentGC_thread())) {
217 int priority = java_to_os_priority[p];
218 return set_native_priority(thread, priority);
219 } else {
220 assert(false, "Should not happen");
221 return OS_ERR;
222 }
223 }
224
225 // The mapping from OS priority back to Java priority may be inexact because
226 // Java priorities can map M:1 with native priorities. If you want the definite
227 // Java priority then use JavaThread::java_priority()
228 OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) {
229 int p;
230 int os_prio;
231 OSReturn ret = get_native_priority(thread, &os_prio);
232 if (ret != OS_OK) return ret;
233
234 if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) {
235 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ;
236 } else {
237 // niceness values are in reverse order
238 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ;
239 }
240 priority = (ThreadPriority)p;
241 return OS_OK;
242 }
243
244 bool os::dll_build_name(char* buffer, size_t size, const char* fname) {
245 int n = jio_snprintf(buffer, size, "%s%s%s", JNI_LIB_PREFIX, fname, JNI_LIB_SUFFIX);
246 return (n != -1);
247 }
248
249 #if !defined(LINUX) && !defined(_WINDOWS)
250 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
251 committed_start = start;
252 committed_size = size;
253 return true;
254 }
255 #endif
256
257 // Helper for dll_locate_lib.
258 // Pass buffer and printbuffer as we already printed the path to buffer
259 // when we called get_current_directory. This way we avoid another buffer
260 // of size MAX_PATH.
261 static bool conc_path_file_and_check(char *buffer, char *printbuffer, size_t printbuflen,
262 const char* pname, char lastchar, const char* fname) {
263
264 // Concatenate path and file name, but don't print double path separators.
265 const char *filesep = (WINDOWS_ONLY(lastchar == ':' ||) lastchar == os::file_separator()[0]) ?
266 "" : os::file_separator();
267 int ret = jio_snprintf(printbuffer, printbuflen, "%s%s%s", pname, filesep, fname);
268 // Check whether file exists.
269 if (ret != -1) {
270 struct stat statbuf;
271 return os::stat(buffer, &statbuf) == 0;
272 }
273 return false;
274 }
275
276 // Frees all memory allocated on the heap for the
277 // supplied array of arrays of chars (a), where n
278 // is the number of elements in the array.
279 static void free_array_of_char_arrays(char** a, size_t n) {
280 while (n > 0) {
281 n--;
282 if (a[n] != NULL) {
283 FREE_C_HEAP_ARRAY(char, a[n]);
284 }
285 }
286 FREE_C_HEAP_ARRAY(char*, a);
287 }
288
289 bool os::dll_locate_lib(char *buffer, size_t buflen,
290 const char* pname, const char* fname) {
291 bool retval = false;
292
293 size_t fullfnamelen = strlen(JNI_LIB_PREFIX) + strlen(fname) + strlen(JNI_LIB_SUFFIX);
294 char* fullfname = NEW_C_HEAP_ARRAY(char, fullfnamelen + 1, mtInternal);
295 if (dll_build_name(fullfname, fullfnamelen + 1, fname)) {
296 const size_t pnamelen = pname ? strlen(pname) : 0;
297
298 if (pnamelen == 0) {
299 // If no path given, use current working directory.
300 const char* p = get_current_directory(buffer, buflen);
301 if (p != NULL) {
302 const size_t plen = strlen(buffer);
303 const char lastchar = buffer[plen - 1];
304 retval = conc_path_file_and_check(buffer, &buffer[plen], buflen - plen,
305 "", lastchar, fullfname);
306 }
307 } else if (strchr(pname, *os::path_separator()) != NULL) {
308 // A list of paths. Search for the path that contains the library.
309 size_t n;
310 char** pelements = split_path(pname, &n, fullfnamelen);
311 if (pelements != NULL) {
312 for (size_t i = 0; i < n; i++) {
313 char* path = pelements[i];
314 // Really shouldn't be NULL, but check can't hurt.
315 size_t plen = (path == NULL) ? 0 : strlen(path);
316 if (plen == 0) {
317 continue; // Skip the empty path values.
318 }
319 const char lastchar = path[plen - 1];
320 retval = conc_path_file_and_check(buffer, buffer, buflen, path, lastchar, fullfname);
321 if (retval) break;
322 }
323 // Release the storage allocated by split_path.
324 free_array_of_char_arrays(pelements, n);
325 }
326 } else {
327 // A definite path.
328 const char lastchar = pname[pnamelen-1];
329 retval = conc_path_file_and_check(buffer, buffer, buflen, pname, lastchar, fullfname);
330 }
331 }
332
333 FREE_C_HEAP_ARRAY(char*, fullfname);
334 return retval;
335 }
336
337 // --------------------- sun.misc.Signal (optional) ---------------------
338
339
340 // SIGBREAK is sent by the keyboard to query the VM state
341 #ifndef SIGBREAK
342 #define SIGBREAK SIGQUIT
343 #endif
344
345 // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread.
346
347
348 static void signal_thread_entry(JavaThread* thread, TRAPS) {
349 os::set_priority(thread, NearMaxPriority);
350 while (true) {
351 int sig;
352 {
353 // FIXME : Currently we have not decided what should be the status
354 // for this java thread blocked here. Once we decide about
355 // that we should fix this.
356 sig = os::signal_wait();
357 }
358 if (sig == os::sigexitnum_pd()) {
359 // Terminate the signal thread
360 return;
361 }
362
363 switch (sig) {
364 case SIGBREAK: {
365 #if INCLUDE_SERVICES
366 // Check if the signal is a trigger to start the Attach Listener - in that
367 // case don't print stack traces.
368 if (!DisableAttachMechanism) {
369 // Attempt to transit state to AL_INITIALIZING.
370 AttachListenerState cur_state = AttachListener::transit_state(AL_INITIALIZING, AL_NOT_INITIALIZED);
371 if (cur_state == AL_INITIALIZING) {
372 // Attach Listener has been started to initialize. Ignore this signal.
373 continue;
374 } else if (cur_state == AL_NOT_INITIALIZED) {
375 // Start to initialize.
376 if (AttachListener::is_init_trigger()) {
377 // Attach Listener has been initialized.
378 // Accept subsequent request.
379 continue;
380 } else {
381 // Attach Listener could not be started.
382 // So we need to transit the state to AL_NOT_INITIALIZED.
383 AttachListener::set_state(AL_NOT_INITIALIZED);
384 }
385 } else if (AttachListener::check_socket_file()) {
386 // Attach Listener has been started, but unix domain socket file
387 // does not exist. So restart Attach Listener.
388 continue;
389 }
390 }
391 #endif
392 // Print stack traces
393 // Any SIGBREAK operations added here should make sure to flush
394 // the output stream (e.g. tty->flush()) after output. See 4803766.
395 // Each module also prints an extra carriage return after its output.
396 VM_PrintThreads op;
397 VMThread::execute(&op);
398 VM_PrintJNI jni_op;
399 VMThread::execute(&jni_op);
400 VM_FindDeadlocks op1(tty);
401 VMThread::execute(&op1);
402 Universe::print_heap_at_SIGBREAK();
403 if (PrintClassHistogram) {
404 VM_GC_HeapInspection op1(tty, true /* force full GC before heap inspection */);
405 VMThread::execute(&op1);
406 }
407 if (JvmtiExport::should_post_data_dump()) {
408 JvmtiExport::post_data_dump();
409 }
410 break;
411 }
412 default: {
413 // Dispatch the signal to java
414 HandleMark hm(THREAD);
415 Klass* klass = SystemDictionary::resolve_or_null(vmSymbols::jdk_internal_misc_Signal(), THREAD);
416 if (klass != NULL) {
417 JavaValue result(T_VOID);
418 JavaCallArguments args;
419 args.push_int(sig);
420 JavaCalls::call_static(
421 &result,
422 klass,
423 vmSymbols::dispatch_name(),
424 vmSymbols::int_void_signature(),
425 &args,
426 THREAD
427 );
428 }
429 if (HAS_PENDING_EXCEPTION) {
430 // tty is initialized early so we don't expect it to be null, but
431 // if it is we can't risk doing an initialization that might
432 // trigger additional out-of-memory conditions
433 if (tty != NULL) {
434 char klass_name[256];
435 char tmp_sig_name[16];
436 const char* sig_name = "UNKNOWN";
437 InstanceKlass::cast(PENDING_EXCEPTION->klass())->
438 name()->as_klass_external_name(klass_name, 256);
439 if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
440 sig_name = tmp_sig_name;
441 warning("Exception %s occurred dispatching signal %s to handler"
442 "- the VM may need to be forcibly terminated",
443 klass_name, sig_name );
444 }
445 CLEAR_PENDING_EXCEPTION;
446 }
447 }
448 }
449 }
450 }
451
452 void os::init_before_ergo() {
453 initialize_initial_active_processor_count();
454 // We need to initialize large page support here because ergonomics takes some
455 // decisions depending on large page support and the calculated large page size.
456 large_page_init();
457
458 // We need to adapt the configured number of stack protection pages given
459 // in 4K pages to the actual os page size. We must do this before setting
460 // up minimal stack sizes etc. in os::init_2().
461 JavaThread::set_stack_red_zone_size (align_up(StackRedPages * 4 * K, vm_page_size()));
462 JavaThread::set_stack_yellow_zone_size (align_up(StackYellowPages * 4 * K, vm_page_size()));
463 JavaThread::set_stack_reserved_zone_size(align_up(StackReservedPages * 4 * K, vm_page_size()));
464 JavaThread::set_stack_shadow_zone_size (align_up(StackShadowPages * 4 * K, vm_page_size()));
465
466 // VM version initialization identifies some characteristics of the
467 // platform that are used during ergonomic decisions.
468 VM_Version::init_before_ergo();
469 }
470
471 void os::initialize_jdk_signal_support(TRAPS) {
472 if (!ReduceSignalUsage) {
473 // Setup JavaThread for processing signals
474 const char thread_name[] = "Signal Dispatcher";
475 Handle string = java_lang_String::create_from_str(thread_name, CHECK);
476
477 // Initialize thread_oop to put it into the system threadGroup
478 Handle thread_group (THREAD, Universe::system_thread_group());
479 Handle thread_oop = JavaCalls::construct_new_instance(SystemDictionary::Thread_klass(),
480 vmSymbols::threadgroup_string_void_signature(),
481 thread_group,
482 string,
483 CHECK);
484
485 Klass* group = SystemDictionary::ThreadGroup_klass();
486 JavaValue result(T_VOID);
487 JavaCalls::call_special(&result,
488 thread_group,
489 group,
490 vmSymbols::add_method_name(),
491 vmSymbols::thread_void_signature(),
492 thread_oop,
493 CHECK);
494
495 { MutexLocker mu(Threads_lock);
496 JavaThread* signal_thread = new JavaThread(&signal_thread_entry);
497
498 // At this point it may be possible that no osthread was created for the
499 // JavaThread due to lack of memory. We would have to throw an exception
500 // in that case. However, since this must work and we do not allow
501 // exceptions anyway, check and abort if this fails.
502 if (signal_thread == NULL || signal_thread->osthread() == NULL) {
503 vm_exit_during_initialization("java.lang.OutOfMemoryError",
504 os::native_thread_creation_failed_msg());
505 }
506
507 java_lang_Thread::set_thread(thread_oop(), signal_thread);
508 java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
509 java_lang_Thread::set_daemon(thread_oop());
510
511 signal_thread->set_threadObj(thread_oop());
512 Threads::add(signal_thread);
513 Thread::start(signal_thread);
514 }
515 // Handle ^BREAK
516 os::signal(SIGBREAK, os::user_handler());
517 }
518 }
519
520
521 void os::terminate_signal_thread() {
522 if (!ReduceSignalUsage)
523 signal_notify(sigexitnum_pd());
524 }
525
526
527 // --------------------- loading libraries ---------------------
528
529 typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *);
530 extern struct JavaVM_ main_vm;
531
532 static void* _native_java_library = NULL;
533
534 void* os::native_java_library() {
535 if (_native_java_library == NULL) {
536 char buffer[JVM_MAXPATHLEN];
537 char ebuf[1024];
538
539 // Load java dll
540 if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
541 "java")) {
542 _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf));
543 }
544 if (_native_java_library == NULL) {
545 vm_exit_during_initialization("Unable to load native library", ebuf);
546 }
547
548 #if defined(__OpenBSD__)
549 // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so
550 // ignore errors
551 if (dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
552 "net")) {
553 dll_load(buffer, ebuf, sizeof(ebuf));
554 }
555 #endif
556 }
557 return _native_java_library;
558 }
559
560 /*
561 * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists.
562 * If check_lib == true then we are looking for an
563 * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if
564 * this library is statically linked into the image.
565 * If check_lib == false then we will look for the appropriate symbol in the
566 * executable if agent_lib->is_static_lib() == true or in the shared library
567 * referenced by 'handle'.
568 */
569 void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
570 const char *syms[], size_t syms_len) {
571 assert(agent_lib != NULL, "sanity check");
572 const char *lib_name;
573 void *handle = agent_lib->os_lib();
574 void *entryName = NULL;
575 char *agent_function_name;
576 size_t i;
577
578 // If checking then use the agent name otherwise test is_static_lib() to
579 // see how to process this lookup
580 lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
581 for (i = 0; i < syms_len; i++) {
582 agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
583 if (agent_function_name == NULL) {
584 break;
585 }
586 entryName = dll_lookup(handle, agent_function_name);
587 FREE_C_HEAP_ARRAY(char, agent_function_name);
588 if (entryName != NULL) {
589 break;
590 }
591 }
592 return entryName;
593 }
594
595 // See if the passed in agent is statically linked into the VM image.
596 bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
597 size_t syms_len) {
598 void *ret;
599 void *proc_handle;
600 void *save_handle;
601
602 assert(agent_lib != NULL, "sanity check");
603 if (agent_lib->name() == NULL) {
604 return false;
605 }
606 proc_handle = get_default_process_handle();
607 // Check for Agent_OnLoad/Attach_lib_name function
608 save_handle = agent_lib->os_lib();
609 // We want to look in this process' symbol table.
610 agent_lib->set_os_lib(proc_handle);
611 ret = find_agent_function(agent_lib, true, syms, syms_len);
612 if (ret != NULL) {
613 // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
614 agent_lib->set_valid();
615 agent_lib->set_static_lib(true);
616 return true;
617 }
618 agent_lib->set_os_lib(save_handle);
619 return false;
620 }
621
622 // --------------------- heap allocation utilities ---------------------
623
624 char *os::strdup(const char *str, MEMFLAGS flags) {
625 size_t size = strlen(str);
626 char *dup_str = (char *)malloc(size + 1, flags);
627 if (dup_str == NULL) return NULL;
628 strcpy(dup_str, str);
629 return dup_str;
630 }
631
632 char* os::strdup_check_oom(const char* str, MEMFLAGS flags) {
633 char* p = os::strdup(str, flags);
634 if (p == NULL) {
635 vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom");
636 }
637 return p;
638 }
639
640
641 #define paranoid 0 /* only set to 1 if you suspect checking code has bug */
642
643 #ifdef ASSERT
644
645 static void verify_memory(void* ptr) {
646 GuardedMemory guarded(ptr);
647 if (!guarded.verify_guards()) {
648 LogTarget(Warning, malloc, free) lt;
649 ResourceMark rm;
650 LogStream ls(lt);
651 ls.print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
652 ls.print_cr("## memory stomp:");
653 guarded.print_on(&ls);
654 fatal("memory stomping error");
655 }
656 }
657
658 #endif
659
660 //
661 // This function supports testing of the malloc out of memory
662 // condition without really running the system out of memory.
663 //
664 static bool has_reached_max_malloc_test_peak(size_t alloc_size) {
665 if (MallocMaxTestWords > 0) {
666 size_t words = (alloc_size / BytesPerWord);
667
668 if ((cur_malloc_words + words) > MallocMaxTestWords) {
669 return true;
670 }
671 Atomic::add(words, &cur_malloc_words);
672 }
673 return false;
674 }
675
676 void* os::malloc(size_t size, MEMFLAGS flags) {
677 return os::malloc(size, flags, CALLER_PC);
678 }
679
680 void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
681 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
682 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
683
684 // Since os::malloc can be called when the libjvm.{dll,so} is
685 // first loaded and we don't have a thread yet we must accept NULL also here.
686 assert(!os::ThreadCrashProtection::is_crash_protected(Thread::current_or_null()),
687 "malloc() not allowed when crash protection is set");
688
689 if (size == 0) {
690 // return a valid pointer if size is zero
691 // if NULL is returned the calling functions assume out of memory.
692 size = 1;
693 }
694
695 // NMT support
696 NMT_TrackingLevel level = MemTracker::tracking_level();
697 size_t nmt_header_size = MemTracker::malloc_header_size(level);
698
699 #ifndef ASSERT
700 const size_t alloc_size = size + nmt_header_size;
701 #else
702 const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size);
703 if (size + nmt_header_size > alloc_size) { // Check for rollover.
704 return NULL;
705 }
706 #endif
707
708 // For the test flag -XX:MallocMaxTestWords
709 if (has_reached_max_malloc_test_peak(size)) {
710 return NULL;
711 }
712
713 u_char* ptr;
714 ptr = (u_char*)::malloc(alloc_size);
715
716 #ifdef ASSERT
717 if (ptr == NULL) {
718 return NULL;
719 }
720 // Wrap memory with guard
721 GuardedMemory guarded(ptr, size + nmt_header_size);
722 ptr = guarded.get_user_ptr();
723
724 if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
725 log_warning(malloc, free)("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, p2i(ptr));
726 breakpoint();
727 }
728 if (paranoid) {
729 verify_memory(ptr);
730 }
731 #endif
732
733 // we do not track guard memory
734 return MemTracker::record_malloc((address)ptr, size, memflags, stack, level);
735 }
736
737 void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) {
738 return os::realloc(memblock, size, flags, CALLER_PC);
739 }
740
741 void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) {
742
743 // For the test flag -XX:MallocMaxTestWords
744 if (has_reached_max_malloc_test_peak(size)) {
745 return NULL;
746 }
747
748 if (size == 0) {
749 // return a valid pointer if size is zero
750 // if NULL is returned the calling functions assume out of memory.
751 size = 1;
752 }
753
754 #ifndef ASSERT
755 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
756 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
757 // NMT support
758 NMT_TrackingLevel level = MemTracker::tracking_level();
759 void* membase = MemTracker::record_free(memblock, level);
760 size_t nmt_header_size = MemTracker::malloc_header_size(level);
761 void* ptr = ::realloc(membase, size + nmt_header_size);
762 return MemTracker::record_malloc(ptr, size, memflags, stack, level);
763 #else
764 if (memblock == NULL) {
765 return os::malloc(size, memflags, stack);
766 }
767 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
768 log_warning(malloc, free)("os::realloc caught " PTR_FORMAT, p2i(memblock));
769 breakpoint();
770 }
771 // NMT support
772 void* membase = MemTracker::malloc_base(memblock);
773 verify_memory(membase);
774 // always move the block
775 void* ptr = os::malloc(size, memflags, stack);
776 // Copy to new memory if malloc didn't fail
777 if (ptr != NULL ) {
778 GuardedMemory guarded(MemTracker::malloc_base(memblock));
779 // Guard's user data contains NMT header
780 size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock);
781 memcpy(ptr, memblock, MIN2(size, memblock_size));
782 if (paranoid) {
783 verify_memory(MemTracker::malloc_base(ptr));
784 }
785 os::free(memblock);
786 }
787 return ptr;
788 #endif
789 }
790
791 // handles NULL pointers
792 void os::free(void *memblock) {
793 NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
794 #ifdef ASSERT
795 if (memblock == NULL) return;
796 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
797 log_warning(malloc, free)("os::free caught " PTR_FORMAT, p2i(memblock));
798 breakpoint();
799 }
800 void* membase = MemTracker::record_free(memblock, MemTracker::tracking_level());
801 verify_memory(membase);
802
803 GuardedMemory guarded(membase);
804 size_t size = guarded.get_user_size();
805 inc_stat_counter(&free_bytes, size);
806 membase = guarded.release_for_freeing();
807 ::free(membase);
808 #else
809 void* membase = MemTracker::record_free(memblock, MemTracker::tracking_level());
810 ::free(membase);
811 #endif
812 }
813
814 void os::init_random(unsigned int initval) {
815 _rand_seed = initval;
816 }
817
818
819 static int random_helper(unsigned int rand_seed) {
820 /* standard, well-known linear congruential random generator with
821 * next_rand = (16807*seed) mod (2**31-1)
822 * see
823 * (1) "Random Number Generators: Good Ones Are Hard to Find",
824 * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988),
825 * (2) "Two Fast Implementations of the 'Minimal Standard' Random
826 * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88.
827 */
828 const unsigned int a = 16807;
829 const unsigned int m = 2147483647;
830 const int q = m / a; assert(q == 127773, "weird math");
831 const int r = m % a; assert(r == 2836, "weird math");
832
833 // compute az=2^31p+q
834 unsigned int lo = a * (rand_seed & 0xFFFF);
835 unsigned int hi = a * (rand_seed >> 16);
836 lo += (hi & 0x7FFF) << 16;
837
838 // if q overflowed, ignore the overflow and increment q
839 if (lo > m) {
840 lo &= m;
841 ++lo;
842 }
843 lo += hi >> 15;
844
845 // if (p+q) overflowed, ignore the overflow and increment (p+q)
846 if (lo > m) {
847 lo &= m;
848 ++lo;
849 }
850 return lo;
851 }
852
853 int os::random() {
854 // Make updating the random seed thread safe.
855 while (true) {
856 unsigned int seed = _rand_seed;
857 unsigned int rand = random_helper(seed);
858 if (Atomic::cmpxchg(rand, &_rand_seed, seed) == seed) {
859 return static_cast<int>(rand);
860 }
861 }
862 }
863
864 // The INITIALIZED state is distinguished from the SUSPENDED state because the
865 // conditions in which a thread is first started are different from those in which
866 // a suspension is resumed. These differences make it hard for us to apply the
867 // tougher checks when starting threads that we want to do when resuming them.
868 // However, when start_thread is called as a result of Thread.start, on a Java
869 // thread, the operation is synchronized on the Java Thread object. So there
870 // cannot be a race to start the thread and hence for the thread to exit while
871 // we are working on it. Non-Java threads that start Java threads either have
872 // to do so in a context in which races are impossible, or should do appropriate
873 // locking.
874
875 void os::start_thread(Thread* thread) {
876 // guard suspend/resume
877 MutexLocker ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
878 OSThread* osthread = thread->osthread();
879 osthread->set_state(RUNNABLE);
880 pd_start_thread(thread);
881 }
882
883 void os::abort(bool dump_core) {
884 abort(dump_core && CreateCoredumpOnCrash, NULL, NULL);
885 }
886
887 //---------------------------------------------------------------------------
888 // Helper functions for fatal error handler
889
890 void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) {
891 assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking");
892
893 start = align_down(start, unitsize);
894
895 int cols = 0;
896 int cols_per_line = 0;
897 switch (unitsize) {
898 case 1: cols_per_line = 16; break;
899 case 2: cols_per_line = 8; break;
900 case 4: cols_per_line = 4; break;
901 case 8: cols_per_line = 2; break;
902 default: return;
903 }
904
905 address p = start;
906 st->print(PTR_FORMAT ": ", p2i(start));
907 while (p < end) {
908 if (is_readable_pointer(p)) {
909 switch (unitsize) {
910 case 1: st->print("%02x", *(u1*)p); break;
911 case 2: st->print("%04x", *(u2*)p); break;
912 case 4: st->print("%08x", *(u4*)p); break;
913 case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break;
914 }
915 } else {
916 st->print("%*.*s", 2*unitsize, 2*unitsize, "????????????????");
917 }
918 p += unitsize;
919 cols++;
920 if (cols >= cols_per_line && p < end) {
921 cols = 0;
922 st->cr();
923 st->print(PTR_FORMAT ": ", p2i(p));
924 } else {
925 st->print(" ");
926 }
927 }
928 st->cr();
929 }
930
931 void os::print_instructions(outputStream* st, address pc, int unitsize) {
932 st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
933 print_hex_dump(st, pc - 256, pc + 256, unitsize);
934 }
935
936 void os::print_environment_variables(outputStream* st, const char** env_list) {
937 if (env_list) {
938 st->print_cr("Environment Variables:");
939
940 for (int i = 0; env_list[i] != NULL; i++) {
941 char *envvar = ::getenv(env_list[i]);
942 if (envvar != NULL) {
943 st->print("%s", env_list[i]);
944 st->print("=");
945 st->print_cr("%s", envvar);
946 }
947 }
948 }
949 }
950
951 void os::print_cpu_info(outputStream* st, char* buf, size_t buflen) {
952 // cpu
953 st->print("CPU:");
954 st->print("total %d", os::processor_count());
955 // It's not safe to query number of active processors after crash
956 // st->print("(active %d)", os::active_processor_count()); but we can
957 // print the initial number of active processors.
958 // We access the raw value here because the assert in the accessor will
959 // fail if the crash occurs before initialization of this value.
960 st->print(" (initial active %d)", _initial_active_processor_count);
961 st->print(" %s", VM_Version::features_string());
962 st->cr();
963 pd_print_cpu_info(st, buf, buflen);
964 }
965
966 // Print a one line string summarizing the cpu, number of cores, memory, and operating system version
967 void os::print_summary_info(outputStream* st, char* buf, size_t buflen) {
968 st->print("Host: ");
969 #ifndef PRODUCT
970 if (get_host_name(buf, buflen)) {
971 st->print("%s, ", buf);
972 }
973 #endif // PRODUCT
974 get_summary_cpu_info(buf, buflen);
975 st->print("%s, ", buf);
976 size_t mem = physical_memory()/G;
977 if (mem == 0) { // for low memory systems
978 mem = physical_memory()/M;
979 st->print("%d cores, " SIZE_FORMAT "M, ", processor_count(), mem);
980 } else {
981 st->print("%d cores, " SIZE_FORMAT "G, ", processor_count(), mem);
982 }
983 get_summary_os_info(buf, buflen);
984 st->print_raw(buf);
985 st->cr();
986 }
987
988 void os::print_date_and_time(outputStream *st, char* buf, size_t buflen) {
989 const int secs_per_day = 86400;
990 const int secs_per_hour = 3600;
991 const int secs_per_min = 60;
992
993 time_t tloc;
994 (void)time(&tloc);
995 char* timestring = ctime(&tloc); // ctime adds newline.
996 // edit out the newline
997 char* nl = strchr(timestring, '\n');
998 if (nl != NULL) {
999 *nl = '\0';
1000 }
1001
1002 struct tm tz;
1003 if (localtime_pd(&tloc, &tz) != NULL) {
1004 ::strftime(buf, buflen, "%Z", &tz);
1005 st->print("Time: %s %s", timestring, buf);
1006 } else {
1007 st->print("Time: %s", timestring);
1008 }
1009
1010 double t = os::elapsedTime();
1011 // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
1012 // Linux. Must be a bug in glibc ? Workaround is to round "t" to int
1013 // before printf. We lost some precision, but who cares?
1014 int eltime = (int)t; // elapsed time in seconds
1015
1016 // print elapsed time in a human-readable format:
1017 int eldays = eltime / secs_per_day;
1018 int day_secs = eldays * secs_per_day;
1019 int elhours = (eltime - day_secs) / secs_per_hour;
1020 int hour_secs = elhours * secs_per_hour;
1021 int elmins = (eltime - day_secs - hour_secs) / secs_per_min;
1022 int minute_secs = elmins * secs_per_min;
1023 int elsecs = (eltime - day_secs - hour_secs - minute_secs);
1024 st->print_cr(" elapsed time: %d seconds (%dd %dh %dm %ds)", eltime, eldays, elhours, elmins, elsecs);
1025 }
1026
1027
1028 // Check if pointer can be read from (4-byte read access).
1029 // Helps to prove validity of a not-NULL pointer.
1030 // Returns true in very early stages of VM life when stub is not yet generated.
1031 #define SAFEFETCH_DEFAULT true
1032 bool os::is_readable_pointer(const void* p) {
1033 if (!CanUseSafeFetch32()) {
1034 return SAFEFETCH_DEFAULT;
1035 }
1036 int* const aligned = (int*) align_down((intptr_t)p, 4);
1037 int cafebabe = 0xcafebabe; // tester value 1
1038 int deadbeef = 0xdeadbeef; // tester value 2
1039 return (SafeFetch32(aligned, cafebabe) != cafebabe) || (SafeFetch32(aligned, deadbeef) != deadbeef);
1040 }
1041
1042 bool os::is_readable_range(const void* from, const void* to) {
1043 if ((uintptr_t)from >= (uintptr_t)to) return false;
1044 for (uintptr_t p = align_down((uintptr_t)from, min_page_size()); p < (uintptr_t)to; p += min_page_size()) {
1045 if (!is_readable_pointer((const void*)p)) {
1046 return false;
1047 }
1048 }
1049 return true;
1050 }
1051
1052
1053 // moved from debug.cpp (used to be find()) but still called from there
1054 // The verbose parameter is only set by the debug code in one case
1055 void os::print_location(outputStream* st, intptr_t x, bool verbose) {
1056 address addr = (address)x;
1057 // Handle NULL first, so later checks don't need to protect against it.
1058 if (addr == NULL) {
1059 st->print_cr("0x0 is NULL");
1060 return;
1061 }
1062
1063 // Check if addr points into a code blob.
1064 CodeBlob* b = CodeCache::find_blob_unsafe(addr);
1065 if (b != NULL) {
1066 b->dump_for_addr(addr, st, verbose);
1067 return;
1068 }
1069
1070 // Check if addr points into Java heap.
1071 if (Universe::heap()->print_location(st, addr)) {
1072 return;
1073 }
1074
1075 bool accessible = is_readable_pointer(addr);
1076
1077 // Check if addr is a JNI handle.
1078 if (align_down((intptr_t)addr, sizeof(intptr_t)) != 0 && accessible) {
1079 if (JNIHandles::is_global_handle((jobject) addr)) {
1080 st->print_cr(INTPTR_FORMAT " is a global jni handle", p2i(addr));
1081 return;
1082 }
1083 if (JNIHandles::is_weak_global_handle((jobject) addr)) {
1084 st->print_cr(INTPTR_FORMAT " is a weak global jni handle", p2i(addr));
1085 return;
1086 }
1087 #ifndef PRODUCT
1088 // we don't keep the block list in product mode
1089 if (JNIHandles::is_local_handle((jobject) addr)) {
1090 st->print_cr(INTPTR_FORMAT " is a local jni handle", p2i(addr));
1091 return;
1092 }
1093 #endif
1094 }
1095
1096 // Check if addr belongs to a Java thread.
1097 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
1098 // If the addr is a java thread print information about that.
1099 if (addr == (address)thread) {
1100 if (verbose) {
1101 thread->print_on(st);
1102 } else {
1103 st->print_cr(INTPTR_FORMAT " is a thread", p2i(addr));
1104 }
1105 return;
1106 }
1107 // If the addr is in the stack region for this thread then report that
1108 // and print thread info
1109 if (thread->on_local_stack(addr)) {
1110 st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
1111 INTPTR_FORMAT, p2i(addr), p2i(thread));
1112 if (verbose) thread->print_on(st);
1113 return;
1114 }
1115 }
1116
1117 // Check if in metaspace and print types that have vptrs
1118 if (Metaspace::contains(addr)) {
1119 if (Klass::is_valid((Klass*)addr)) {
1120 st->print_cr(INTPTR_FORMAT " is a pointer to class: ", p2i(addr));
1121 ((Klass*)addr)->print_on(st);
1122 } else if (Method::is_valid_method((const Method*)addr)) {
1123 ((Method*)addr)->print_value_on(st);
1124 st->cr();
1125 } else {
1126 // Use addr->print() from the debugger instead (not here)
1127 st->print_cr(INTPTR_FORMAT " is pointing into metadata", p2i(addr));
1128 }
1129 return;
1130 }
1131
1132 // Compressed klass needs to be decoded first.
1133 #ifdef _LP64
1134 if (UseCompressedClassPointers && ((uintptr_t)addr &~ (uintptr_t)max_juint) == 0) {
1135 narrowKlass narrow_klass = (narrowKlass)(uintptr_t)addr;
1136 Klass* k = CompressedKlassPointers::decode_raw(narrow_klass);
1137
1138 if (Klass::is_valid(k)) {
1139 st->print_cr(UINT32_FORMAT " is a compressed pointer to class: " INTPTR_FORMAT, narrow_klass, p2i((HeapWord*)k));
1140 k->print_on(st);
1141 return;
1142 }
1143 }
1144 #endif
1145
1146 // Try an OS specific find
1147 if (os::find(addr, st)) {
1148 return;
1149 }
1150
1151 if (accessible) {
1152 st->print(INTPTR_FORMAT " points into unknown readable memory:", p2i(addr));
1153 for (address p = addr; p < align_up(addr + 1, sizeof(intptr_t)); ++p) {
1154 st->print(" %02x", *(u1*)p);
1155 }
1156 st->cr();
1157 return;
1158 }
1159
1160 st->print_cr(INTPTR_FORMAT " is an unknown value", p2i(addr));
1161 }
1162
1163 // Looks like all platforms can use the same function to check if C
1164 // stack is walkable beyond current frame. The check for fp() is not
1165 // necessary on Sparc, but it's harmless.
1166 bool os::is_first_C_frame(frame* fr) {
1167 // Load up sp, fp, sender sp and sender fp, check for reasonable values.
1168 // Check usp first, because if that's bad the other accessors may fault
1169 // on some architectures. Ditto ufp second, etc.
1170 uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1);
1171 // sp on amd can be 32 bit aligned.
1172 uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1);
1173
1174 uintptr_t usp = (uintptr_t)fr->sp();
1175 if ((usp & sp_align_mask) != 0) return true;
1176
1177 uintptr_t ufp = (uintptr_t)fr->fp();
1178 if ((ufp & fp_align_mask) != 0) return true;
1179
1180 uintptr_t old_sp = (uintptr_t)fr->sender_sp();
1181 if ((old_sp & sp_align_mask) != 0) return true;
1182 if (old_sp == 0 || old_sp == (uintptr_t)-1) return true;
1183
1184 uintptr_t old_fp = (uintptr_t)fr->link();
1185 if ((old_fp & fp_align_mask) != 0) return true;
1186 if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true;
1187
1188 // stack grows downwards; if old_fp is below current fp or if the stack
1189 // frame is too large, either the stack is corrupted or fp is not saved
1190 // on stack (i.e. on x86, ebp may be used as general register). The stack
1191 // is not walkable beyond current frame.
1192 if (old_fp < ufp) return true;
1193 if (old_fp - ufp > 64 * K) return true;
1194
1195 return false;
1196 }
1197
1198
1199 // Set up the boot classpath.
1200
1201 char* os::format_boot_path(const char* format_string,
1202 const char* home,
1203 int home_len,
1204 char fileSep,
1205 char pathSep) {
1206 assert((fileSep == '/' && pathSep == ':') ||
1207 (fileSep == '\\' && pathSep == ';'), "unexpected separator chars");
1208
1209 // Scan the format string to determine the length of the actual
1210 // boot classpath, and handle platform dependencies as well.
1211 int formatted_path_len = 0;
1212 const char* p;
1213 for (p = format_string; *p != 0; ++p) {
1214 if (*p == '%') formatted_path_len += home_len - 1;
1215 ++formatted_path_len;
1216 }
1217
1218 char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal);
1219
1220 // Create boot classpath from format, substituting separator chars and
1221 // java home directory.
1222 char* q = formatted_path;
1223 for (p = format_string; *p != 0; ++p) {
1224 switch (*p) {
1225 case '%':
1226 strcpy(q, home);
1227 q += home_len;
1228 break;
1229 case '/':
1230 *q++ = fileSep;
1231 break;
1232 case ':':
1233 *q++ = pathSep;
1234 break;
1235 default:
1236 *q++ = *p;
1237 }
1238 }
1239 *q = '\0';
1240
1241 assert((q - formatted_path) == formatted_path_len, "formatted_path size botched");
1242 return formatted_path;
1243 }
1244
1245 // This function is a proxy to fopen, it tries to add a non standard flag ('e' or 'N')
1246 // that ensures automatic closing of the file on exec. If it can not find support in
1247 // the underlying c library, it will make an extra system call (fcntl) to ensure automatic
1248 // closing of the file on exec.
1249 FILE* os::fopen(const char* path, const char* mode) {
1250 char modified_mode[20];
1251 assert(strlen(mode) + 1 < sizeof(modified_mode), "mode chars plus one extra must fit in buffer");
1252 sprintf(modified_mode, "%s" LINUX_ONLY("e") BSD_ONLY("e") WINDOWS_ONLY("N"), mode);
1253 FILE* file = ::fopen(path, modified_mode);
1254
1255 #if !(defined LINUX || defined BSD || defined _WINDOWS)
1256 // assume fcntl FD_CLOEXEC support as a backup solution when 'e' or 'N'
1257 // is not supported as mode in fopen
1258 if (file != NULL) {
1259 int fd = fileno(file);
1260 if (fd != -1) {
1261 int fd_flags = fcntl(fd, F_GETFD);
1262 if (fd_flags != -1) {
1263 fcntl(fd, F_SETFD, fd_flags | FD_CLOEXEC);
1264 }
1265 }
1266 }
1267 #endif
1268
1269 return file;
1270 }
1271
1272 bool os::set_boot_path(char fileSep, char pathSep) {
1273 const char* home = Arguments::get_java_home();
1274 int home_len = (int)strlen(home);
1275
1276 struct stat st;
1277
1278 // modular image if "modules" jimage exists
1279 char* jimage = format_boot_path("%/lib/" MODULES_IMAGE_NAME, home, home_len, fileSep, pathSep);
1280 if (jimage == NULL) return false;
1281 bool has_jimage = (os::stat(jimage, &st) == 0);
1282 if (has_jimage) {
1283 Arguments::set_sysclasspath(jimage, true);
1284 FREE_C_HEAP_ARRAY(char, jimage);
1285 return true;
1286 }
1287 FREE_C_HEAP_ARRAY(char, jimage);
1288
1289 // check if developer build with exploded modules
1290 char* base_classes = format_boot_path("%/modules/" JAVA_BASE_NAME, home, home_len, fileSep, pathSep);
1291 if (base_classes == NULL) return false;
1292 if (os::stat(base_classes, &st) == 0) {
1293 Arguments::set_sysclasspath(base_classes, false);
1294 FREE_C_HEAP_ARRAY(char, base_classes);
1295 return true;
1296 }
1297 FREE_C_HEAP_ARRAY(char, base_classes);
1298
1299 return false;
1300 }
1301
1302 // Splits a path, based on its separator, the number of
1303 // elements is returned back in "elements".
1304 // file_name_length is used as a modifier for each path's
1305 // length when compared to JVM_MAXPATHLEN. So if you know
1306 // each returned path will have something appended when
1307 // in use, you can pass the length of that in
1308 // file_name_length, to ensure we detect if any path
1309 // exceeds the maximum path length once prepended onto
1310 // the sub-path/file name.
1311 // It is the callers responsibility to:
1312 // a> check the value of "elements", which may be 0.
1313 // b> ignore any empty path elements
1314 // c> free up the data.
1315 char** os::split_path(const char* path, size_t* elements, size_t file_name_length) {
1316 *elements = (size_t)0;
1317 if (path == NULL || strlen(path) == 0 || file_name_length == (size_t)NULL) {
1318 return NULL;
1319 }
1320 const char psepchar = *os::path_separator();
1321 char* inpath = NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal);
1322 strcpy(inpath, path);
1323 size_t count = 1;
1324 char* p = strchr(inpath, psepchar);
1325 // Get a count of elements to allocate memory
1326 while (p != NULL) {
1327 count++;
1328 p++;
1329 p = strchr(p, psepchar);
1330 }
1331
1332 char** opath = NEW_C_HEAP_ARRAY(char*, count, mtInternal);
1333
1334 // do the actual splitting
1335 p = inpath;
1336 for (size_t i = 0 ; i < count ; i++) {
1337 size_t len = strcspn(p, os::path_separator());
1338 if (len + file_name_length > JVM_MAXPATHLEN) {
1339 // release allocated storage before exiting the vm
1340 free_array_of_char_arrays(opath, i++);
1341 vm_exit_during_initialization("The VM tried to use a path that exceeds the maximum path length for "
1342 "this system. Review path-containing parameters and properties, such as "
1343 "sun.boot.library.path, to identify potential sources for this path.");
1344 }
1345 // allocate the string and add terminator storage
1346 char* s = NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
1347 strncpy(s, p, len);
1348 s[len] = '\0';
1349 opath[i] = s;
1350 p += len + 1;
1351 }
1352 FREE_C_HEAP_ARRAY(char, inpath);
1353 *elements = count;
1354 return opath;
1355 }
1356
1357 // Returns true if the current stack pointer is above the stack shadow
1358 // pages, false otherwise.
1359 bool os::stack_shadow_pages_available(Thread *thread, const methodHandle& method, address sp) {
1360 if (!thread->is_Java_thread()) return false;
1361 // Check if we have StackShadowPages above the yellow zone. This parameter
1362 // is dependent on the depth of the maximum VM call stack possible from
1363 // the handler for stack overflow. 'instanceof' in the stack overflow
1364 // handler or a println uses at least 8k stack of VM and native code
1365 // respectively.
1366 const int framesize_in_bytes =
1367 Interpreter::size_top_interpreter_activation(method()) * wordSize;
1368
1369 address limit = ((JavaThread*)thread)->stack_end() +
1370 (JavaThread::stack_guard_zone_size() + JavaThread::stack_shadow_zone_size());
1371
1372 return sp > (limit + framesize_in_bytes);
1373 }
1374
1375 size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) {
1376 assert(min_pages > 0, "sanity");
1377 if (UseLargePages) {
1378 const size_t max_page_size = region_size / min_pages;
1379
1380 for (size_t i = 0; _page_sizes[i] != 0; ++i) {
1381 const size_t page_size = _page_sizes[i];
1382 if (page_size <= max_page_size) {
1383 if (!must_be_aligned || is_aligned(region_size, page_size)) {
1384 return page_size;
1385 }
1386 }
1387 }
1388 }
1389
1390 return vm_page_size();
1391 }
1392
1393 size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) {
1394 return page_size_for_region(region_size, min_pages, true);
1395 }
1396
1397 size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) {
1398 return page_size_for_region(region_size, min_pages, false);
1399 }
1400
1401 static const char* errno_to_string (int e, bool short_text) {
1402 #define ALL_SHARED_ENUMS(X) \
1403 X(E2BIG, "Argument list too long") \
1404 X(EACCES, "Permission denied") \
1405 X(EADDRINUSE, "Address in use") \
1406 X(EADDRNOTAVAIL, "Address not available") \
1407 X(EAFNOSUPPORT, "Address family not supported") \
1408 X(EAGAIN, "Resource unavailable, try again") \
1409 X(EALREADY, "Connection already in progress") \
1410 X(EBADF, "Bad file descriptor") \
1411 X(EBADMSG, "Bad message") \
1412 X(EBUSY, "Device or resource busy") \
1413 X(ECANCELED, "Operation canceled") \
1414 X(ECHILD, "No child processes") \
1415 X(ECONNABORTED, "Connection aborted") \
1416 X(ECONNREFUSED, "Connection refused") \
1417 X(ECONNRESET, "Connection reset") \
1418 X(EDEADLK, "Resource deadlock would occur") \
1419 X(EDESTADDRREQ, "Destination address required") \
1420 X(EDOM, "Mathematics argument out of domain of function") \
1421 X(EEXIST, "File exists") \
1422 X(EFAULT, "Bad address") \
1423 X(EFBIG, "File too large") \
1424 X(EHOSTUNREACH, "Host is unreachable") \
1425 X(EIDRM, "Identifier removed") \
1426 X(EILSEQ, "Illegal byte sequence") \
1427 X(EINPROGRESS, "Operation in progress") \
1428 X(EINTR, "Interrupted function") \
1429 X(EINVAL, "Invalid argument") \
1430 X(EIO, "I/O error") \
1431 X(EISCONN, "Socket is connected") \
1432 X(EISDIR, "Is a directory") \
1433 X(ELOOP, "Too many levels of symbolic links") \
1434 X(EMFILE, "Too many open files") \
1435 X(EMLINK, "Too many links") \
1436 X(EMSGSIZE, "Message too large") \
1437 X(ENAMETOOLONG, "Filename too long") \
1438 X(ENETDOWN, "Network is down") \
1439 X(ENETRESET, "Connection aborted by network") \
1440 X(ENETUNREACH, "Network unreachable") \
1441 X(ENFILE, "Too many files open in system") \
1442 X(ENOBUFS, "No buffer space available") \
1443 X(ENODATA, "No message is available on the STREAM head read queue") \
1444 X(ENODEV, "No such device") \
1445 X(ENOENT, "No such file or directory") \
1446 X(ENOEXEC, "Executable file format error") \
1447 X(ENOLCK, "No locks available") \
1448 X(ENOLINK, "Reserved") \
1449 X(ENOMEM, "Not enough space") \
1450 X(ENOMSG, "No message of the desired type") \
1451 X(ENOPROTOOPT, "Protocol not available") \
1452 X(ENOSPC, "No space left on device") \
1453 X(ENOSR, "No STREAM resources") \
1454 X(ENOSTR, "Not a STREAM") \
1455 X(ENOSYS, "Function not supported") \
1456 X(ENOTCONN, "The socket is not connected") \
1457 X(ENOTDIR, "Not a directory") \
1458 X(ENOTEMPTY, "Directory not empty") \
1459 X(ENOTSOCK, "Not a socket") \
1460 X(ENOTSUP, "Not supported") \
1461 X(ENOTTY, "Inappropriate I/O control operation") \
1462 X(ENXIO, "No such device or address") \
1463 X(EOPNOTSUPP, "Operation not supported on socket") \
1464 X(EOVERFLOW, "Value too large to be stored in data type") \
1465 X(EPERM, "Operation not permitted") \
1466 X(EPIPE, "Broken pipe") \
1467 X(EPROTO, "Protocol error") \
1468 X(EPROTONOSUPPORT, "Protocol not supported") \
1469 X(EPROTOTYPE, "Protocol wrong type for socket") \
1470 X(ERANGE, "Result too large") \
1471 X(EROFS, "Read-only file system") \
1472 X(ESPIPE, "Invalid seek") \
1473 X(ESRCH, "No such process") \
1474 X(ETIME, "Stream ioctl() timeout") \
1475 X(ETIMEDOUT, "Connection timed out") \
1476 X(ETXTBSY, "Text file busy") \
1477 X(EWOULDBLOCK, "Operation would block") \
1478 X(EXDEV, "Cross-device link")
1479
1480 #define DEFINE_ENTRY(e, text) { e, #e, text },
1481
1482 static const struct {
1483 int v;
1484 const char* short_text;
1485 const char* long_text;
1486 } table [] = {
1487
1488 ALL_SHARED_ENUMS(DEFINE_ENTRY)
1489
1490 // The following enums are not defined on all platforms.
1491 #ifdef ESTALE
1492 DEFINE_ENTRY(ESTALE, "Reserved")
1493 #endif
1494 #ifdef EDQUOT
1495 DEFINE_ENTRY(EDQUOT, "Reserved")
1496 #endif
1497 #ifdef EMULTIHOP
1498 DEFINE_ENTRY(EMULTIHOP, "Reserved")
1499 #endif
1500
1501 // End marker.
1502 { -1, "Unknown errno", "Unknown error" }
1503
1504 };
1505
1506 #undef DEFINE_ENTRY
1507 #undef ALL_FLAGS
1508
1509 int i = 0;
1510 while (table[i].v != -1 && table[i].v != e) {
1511 i ++;
1512 }
1513
1514 return short_text ? table[i].short_text : table[i].long_text;
1515
1516 }
1517
1518 const char* os::strerror(int e) {
1519 return errno_to_string(e, false);
1520 }
1521
1522 const char* os::errno_name(int e) {
1523 return errno_to_string(e, true);
1524 }
1525
1526 void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) {
1527 LogTarget(Info, pagesize) log;
1528 if (log.is_enabled()) {
1529 LogStream out(log);
1530
1531 out.print("%s: ", str);
1532 for (int i = 0; i < count; ++i) {
1533 out.print(" " SIZE_FORMAT, page_sizes[i]);
1534 }
1535 out.cr();
1536 }
1537 }
1538
1539 #define trace_page_size_params(size) byte_size_in_exact_unit(size), exact_unit_for_byte_size(size)
1540
1541 void os::trace_page_sizes(const char* str,
1542 const size_t region_min_size,
1543 const size_t region_max_size,
1544 const size_t page_size,
1545 const char* base,
1546 const size_t size) {
1547
1548 log_info(pagesize)("%s: "
1549 " min=" SIZE_FORMAT "%s"
1550 " max=" SIZE_FORMAT "%s"
1551 " base=" PTR_FORMAT
1552 " page_size=" SIZE_FORMAT "%s"
1553 " size=" SIZE_FORMAT "%s",
1554 str,
1555 trace_page_size_params(region_min_size),
1556 trace_page_size_params(region_max_size),
1557 p2i(base),
1558 trace_page_size_params(page_size),
1559 trace_page_size_params(size));
1560 }
1561
1562 void os::trace_page_sizes_for_requested_size(const char* str,
1563 const size_t requested_size,
1564 const size_t page_size,
1565 const size_t alignment,
1566 const char* base,
1567 const size_t size) {
1568
1569 log_info(pagesize)("%s:"
1570 " req_size=" SIZE_FORMAT "%s"
1571 " base=" PTR_FORMAT
1572 " page_size=" SIZE_FORMAT "%s"
1573 " alignment=" SIZE_FORMAT "%s"
1574 " size=" SIZE_FORMAT "%s",
1575 str,
1576 trace_page_size_params(requested_size),
1577 p2i(base),
1578 trace_page_size_params(page_size),
1579 trace_page_size_params(alignment),
1580 trace_page_size_params(size));
1581 }
1582
1583
1584 // This is the working definition of a server class machine:
1585 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
1586 // because the graphics memory (?) sometimes masks physical memory.
1587 // If you want to change the definition of a server class machine
1588 // on some OS or platform, e.g., >=4GB on Windows platforms,
1589 // then you'll have to parameterize this method based on that state,
1590 // as was done for logical processors here, or replicate and
1591 // specialize this method for each platform. (Or fix os to have
1592 // some inheritance structure and use subclassing. Sigh.)
1593 // If you want some platform to always or never behave as a server
1594 // class machine, change the setting of AlwaysActAsServerClassMachine
1595 // and NeverActAsServerClassMachine in globals*.hpp.
1596 bool os::is_server_class_machine() {
1597 // First check for the early returns
1598 if (NeverActAsServerClassMachine) {
1599 return false;
1600 }
1601 if (AlwaysActAsServerClassMachine) {
1602 return true;
1603 }
1604 // Then actually look at the machine
1605 bool result = false;
1606 const unsigned int server_processors = 2;
1607 const julong server_memory = 2UL * G;
1608 // We seem not to get our full complement of memory.
1609 // We allow some part (1/8?) of the memory to be "missing",
1610 // based on the sizes of DIMMs, and maybe graphics cards.
1611 const julong missing_memory = 256UL * M;
1612
1613 /* Is this a server class machine? */
1614 if ((os::active_processor_count() >= (int)server_processors) &&
1615 (os::physical_memory() >= (server_memory - missing_memory))) {
1616 const unsigned int logical_processors =
1617 VM_Version::logical_processors_per_package();
1618 if (logical_processors > 1) {
1619 const unsigned int physical_packages =
1620 os::active_processor_count() / logical_processors;
1621 if (physical_packages >= server_processors) {
1622 result = true;
1623 }
1624 } else {
1625 result = true;
1626 }
1627 }
1628 return result;
1629 }
1630
1631 void os::initialize_initial_active_processor_count() {
1632 assert(_initial_active_processor_count == 0, "Initial active processor count already set.");
1633 _initial_active_processor_count = active_processor_count();
1634 log_debug(os)("Initial active processor count set to %d" , _initial_active_processor_count);
1635 }
1636
1637 void os::SuspendedThreadTask::run() {
1638 internal_do_task();
1639 _done = true;
1640 }
1641
1642 bool os::create_stack_guard_pages(char* addr, size_t bytes) {
1643 return os::pd_create_stack_guard_pages(addr, bytes);
1644 }
1645
1646 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, int file_desc) {
1647 char* result = NULL;
1648
1649 if (file_desc != -1) {
1650 // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(),
1651 // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file.
1652 result = os::map_memory_to_file(addr, bytes, file_desc);
1653 if (result != NULL) {
1654 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
1655 }
1656 } else {
1657 result = pd_reserve_memory(bytes, addr, alignment_hint);
1658 if (result != NULL) {
1659 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
1660 }
1661 }
1662
1663 return result;
1664 }
1665
1666 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
1667 MEMFLAGS flags) {
1668 char* result = pd_reserve_memory(bytes, addr, alignment_hint);
1669 if (result != NULL) {
1670 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
1671 MemTracker::record_virtual_memory_type((address)result, flags);
1672 }
1673
1674 return result;
1675 }
1676
1677 char* os::attempt_reserve_memory_at(size_t bytes, char* addr, int file_desc) {
1678 char* result = NULL;
1679 if (file_desc != -1) {
1680 result = pd_attempt_reserve_memory_at(bytes, addr, file_desc);
1681 if (result != NULL) {
1682 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
1683 }
1684 } else {
1685 result = pd_attempt_reserve_memory_at(bytes, addr);
1686 if (result != NULL) {
1687 MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
1688 }
1689 }
1690 return result;
1691 }
1692
1693 void os::split_reserved_memory(char *base, size_t size,
1694 size_t split, bool realloc) {
1695 pd_split_reserved_memory(base, size, split, realloc);
1696 }
1697
1698 bool os::commit_memory(char* addr, size_t bytes, bool executable) {
1699 bool res = pd_commit_memory(addr, bytes, executable);
1700 if (res) {
1701 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
1702 }
1703 return res;
1704 }
1705
1706 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
1707 bool executable) {
1708 bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
1709 if (res) {
1710 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
1711 }
1712 return res;
1713 }
1714
1715 void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
1716 const char* mesg) {
1717 pd_commit_memory_or_exit(addr, bytes, executable, mesg);
1718 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
1719 }
1720
1721 void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
1722 bool executable, const char* mesg) {
1723 os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
1724 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
1725 }
1726
1727 bool os::uncommit_memory(char* addr, size_t bytes) {
1728 bool res;
1729 if (MemTracker::tracking_level() > NMT_minimal) {
1730 Tracker tkr(Tracker::uncommit);
1731 res = pd_uncommit_memory(addr, bytes);
1732 if (res) {
1733 tkr.record((address)addr, bytes);
1734 }
1735 } else {
1736 res = pd_uncommit_memory(addr, bytes);
1737 }
1738 return res;
1739 }
1740
1741 bool os::release_memory(char* addr, size_t bytes) {
1742 bool res;
1743 if (MemTracker::tracking_level() > NMT_minimal) {
1744 Tracker tkr(Tracker::release);
1745 res = pd_release_memory(addr, bytes);
1746 if (res) {
1747 tkr.record((address)addr, bytes);
1748 }
1749 } else {
1750 res = pd_release_memory(addr, bytes);
1751 }
1752 return res;
1753 }
1754
1755 void os::pretouch_memory(void* start, void* end, size_t page_size) {
1756 for (volatile char *p = (char*)start; p < (char*)end; p += page_size) {
1757 *p = 0;
1758 }
1759 }
1760
1761 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
1762 char *addr, size_t bytes, bool read_only,
1763 bool allow_exec) {
1764 char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
1765 if (result != NULL) {
1766 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
1767 }
1768 return result;
1769 }
1770
1771 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
1772 char *addr, size_t bytes, bool read_only,
1773 bool allow_exec) {
1774 return pd_remap_memory(fd, file_name, file_offset, addr, bytes,
1775 read_only, allow_exec);
1776 }
1777
1778 bool os::unmap_memory(char *addr, size_t bytes) {
1779 bool result;
1780 if (MemTracker::tracking_level() > NMT_minimal) {
1781 Tracker tkr(Tracker::release);
1782 result = pd_unmap_memory(addr, bytes);
1783 if (result) {
1784 tkr.record((address)addr, bytes);
1785 }
1786 } else {
1787 result = pd_unmap_memory(addr, bytes);
1788 }
1789 return result;
1790 }
1791
1792 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
1793 pd_free_memory(addr, bytes, alignment_hint);
1794 }
1795
1796 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
1797 pd_realign_memory(addr, bytes, alignment_hint);
1798 }
1799
1800 #ifndef _WINDOWS
1801 /* try to switch state from state "from" to state "to"
1802 * returns the state set after the method is complete
1803 */
1804 os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
1805 os::SuspendResume::State to)
1806 {
1807 os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from);
1808 if (result == from) {
1809 // success
1810 return to;
1811 }
1812 return result;
1813 }
1814 #endif
1815
1816 // Convenience wrapper around naked_short_sleep to allow for longer sleep
1817 // times. Only for use by non-JavaThreads.
1818 void os::naked_sleep(jlong millis) {
1819 assert(!Thread::current()->is_Java_thread(), "not for use by JavaThreads");
1820 const jlong limit = 999;
1821 while (millis > limit) {
1822 naked_short_sleep(limit);
1823 millis -= limit;
1824 }
1825 naked_short_sleep(millis);
1826 }
--- EOF ---