1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/decoder.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/events.hpp" 74 #include "utilities/growableArray.hpp" 75 #include "utilities/macros.hpp" 76 #include "utilities/vmError.hpp" 77 #include "windbghelp.hpp" 78 79 80 #ifdef _DEBUG 81 #include <crtdbg.h> 82 #endif 83 84 85 #include <windows.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/timeb.h> 89 #include <objidl.h> 90 #include <shlobj.h> 91 92 #include <malloc.h> 93 #include <signal.h> 94 #include <direct.h> 95 #include <errno.h> 96 #include <fcntl.h> 97 #include <io.h> 98 #include <process.h> // For _beginthreadex(), _endthreadex() 99 #include <imagehlp.h> // For os::dll_address_to_function_name 100 // for enumerating dll libraries 101 #include <vdmdbg.h> 102 103 // for timer info max values which include all bits 104 #define ALL_64_BITS CONST64(-1) 105 106 // For DLL loading/load error detection 107 // Values of PE COFF 108 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 109 #define IMAGE_FILE_SIGNATURE_LENGTH 4 110 111 static HANDLE main_process; 112 static HANDLE main_thread; 113 static int main_thread_id; 114 115 static FILETIME process_creation_time; 116 static FILETIME process_exit_time; 117 static FILETIME process_user_time; 118 static FILETIME process_kernel_time; 119 120 #ifdef _M_AMD64 121 #define __CPU__ amd64 122 #else 123 #define __CPU__ i486 124 #endif 125 126 // save DLL module handle, used by GetModuleFileName 127 128 HINSTANCE vm_lib_handle; 129 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 131 switch (reason) { 132 case DLL_PROCESS_ATTACH: 133 vm_lib_handle = hinst; 134 if (ForceTimeHighResolution) { 135 timeBeginPeriod(1L); 136 } 137 break; 138 case DLL_PROCESS_DETACH: 139 if (ForceTimeHighResolution) { 140 timeEndPeriod(1L); 141 } 142 break; 143 default: 144 break; 145 } 146 return true; 147 } 148 149 static inline double fileTimeAsDouble(FILETIME* time) { 150 const double high = (double) ((unsigned int) ~0); 151 const double split = 10000000.0; 152 double result = (time->dwLowDateTime / split) + 153 time->dwHighDateTime * (high/split); 154 return result; 155 } 156 157 // Implementation of os 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 // previous UnhandledExceptionFilter, if there is one 178 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 179 180 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 181 182 void os::init_system_properties_values() { 183 // sysclasspath, java_home, dll_dir 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH + 1]; 190 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 191 192 if (alt_home_dir != NULL) { 193 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 194 home_dir[MAX_PATH] = '\0'; 195 } else { 196 os::jvm_path(home_dir, sizeof(home_dir)); 197 // Found the full path to jvm.dll. 198 // Now cut the path to <java_home>/jre if we can. 199 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) { 202 *pslash = '\0'; // get rid of \{client|server} 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \bin 206 } 207 } 208 } 209 210 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 211 if (home_path == NULL) { 212 return; 213 } 214 strcpy(home_path, home_dir); 215 Arguments::set_java_home(home_path); 216 FREE_C_HEAP_ARRAY(char, home_path); 217 218 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 219 mtInternal); 220 if (dll_path == NULL) { 221 return; 222 } 223 strcpy(dll_path, home_dir); 224 strcat(dll_path, bin); 225 Arguments::set_dll_dir(dll_path); 226 FREE_C_HEAP_ARRAY(char, dll_path); 227 228 if (!set_boot_path('\\', ';')) { 229 return; 230 } 231 } 232 233 // library_path 234 #define EXT_DIR "\\lib\\ext" 235 #define BIN_DIR "\\bin" 236 #define PACKAGE_DIR "\\Sun\\Java" 237 { 238 // Win32 library search order (See the documentation for LoadLibrary): 239 // 240 // 1. The directory from which application is loaded. 241 // 2. The system wide Java Extensions directory (Java only) 242 // 3. System directory (GetSystemDirectory) 243 // 4. Windows directory (GetWindowsDirectory) 244 // 5. The PATH environment variable 245 // 6. The current directory 246 247 char *library_path; 248 char tmp[MAX_PATH]; 249 char *path_str = ::getenv("PATH"); 250 251 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 252 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 253 254 library_path[0] = '\0'; 255 256 GetModuleFileName(NULL, tmp, sizeof(tmp)); 257 *(strrchr(tmp, '\\')) = '\0'; 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 strcat(library_path, PACKAGE_DIR BIN_DIR); 264 265 GetSystemDirectory(tmp, sizeof(tmp)); 266 strcat(library_path, ";"); 267 strcat(library_path, tmp); 268 269 GetWindowsDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 if (path_str) { 274 strcat(library_path, ";"); 275 strcat(library_path, path_str); 276 } 277 278 strcat(library_path, ";."); 279 280 Arguments::set_library_path(library_path); 281 FREE_C_HEAP_ARRAY(char, library_path); 282 } 283 284 // Default extensions directory 285 { 286 char path[MAX_PATH]; 287 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 288 GetWindowsDirectory(path, MAX_PATH); 289 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 290 path, PACKAGE_DIR, EXT_DIR); 291 Arguments::set_ext_dirs(buf); 292 } 293 #undef EXT_DIR 294 #undef BIN_DIR 295 #undef PACKAGE_DIR 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 316 // So far, this method is only used by Native Memory Tracking, which is 317 // only supported on Windows XP or later. 318 // 319 int os::get_native_stack(address* stack, int frames, int toSkip) { 320 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 321 for (int index = captured; index < frames; index ++) { 322 stack[index] = NULL; 323 } 324 return captured; 325 } 326 327 328 // os::current_stack_base() 329 // 330 // Returns the base of the stack, which is the stack's 331 // starting address. This function must be called 332 // while running on the stack of the thread being queried. 333 334 address os::current_stack_base() { 335 MEMORY_BASIC_INFORMATION minfo; 336 address stack_bottom; 337 size_t stack_size; 338 339 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 340 stack_bottom = (address)minfo.AllocationBase; 341 stack_size = minfo.RegionSize; 342 343 // Add up the sizes of all the regions with the same 344 // AllocationBase. 345 while (1) { 346 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 347 if (stack_bottom == (address)minfo.AllocationBase) { 348 stack_size += minfo.RegionSize; 349 } else { 350 break; 351 } 352 } 353 return stack_bottom + stack_size; 354 } 355 356 size_t os::current_stack_size() { 357 size_t sz; 358 MEMORY_BASIC_INFORMATION minfo; 359 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 360 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 361 return sz; 362 } 363 364 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 365 const struct tm* time_struct_ptr = localtime(clock); 366 if (time_struct_ptr != NULL) { 367 *res = *time_struct_ptr; 368 return res; 369 } 370 return NULL; 371 } 372 373 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 374 const struct tm* time_struct_ptr = gmtime(clock); 375 if (time_struct_ptr != NULL) { 376 *res = *time_struct_ptr; 377 return res; 378 } 379 return NULL; 380 } 381 382 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 383 384 // Thread start routine for all newly created threads 385 static unsigned __stdcall thread_native_entry(Thread* thread) { 386 // Try to randomize the cache line index of hot stack frames. 387 // This helps when threads of the same stack traces evict each other's 388 // cache lines. The threads can be either from the same JVM instance, or 389 // from different JVM instances. The benefit is especially true for 390 // processors with hyperthreading technology. 391 static int counter = 0; 392 int pid = os::current_process_id(); 393 _alloca(((pid ^ counter++) & 7) * 128); 394 395 thread->initialize_thread_current(); 396 397 OSThread* osthr = thread->osthread(); 398 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 399 400 if (UseNUMA) { 401 int lgrp_id = os::numa_get_group_id(); 402 if (lgrp_id != -1) { 403 thread->set_lgrp_id(lgrp_id); 404 } 405 } 406 407 // Diagnostic code to investigate JDK-6573254 408 int res = 30115; // non-java thread 409 if (thread->is_Java_thread()) { 410 res = 20115; // java thread 411 } 412 413 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 414 415 // Install a win32 structured exception handler around every thread created 416 // by VM, so VM can generate error dump when an exception occurred in non- 417 // Java thread (e.g. VM thread). 418 __try { 419 thread->run(); 420 } __except(topLevelExceptionFilter( 421 (_EXCEPTION_POINTERS*)_exception_info())) { 422 // Nothing to do. 423 } 424 425 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 426 427 // One less thread is executing 428 // When the VMThread gets here, the main thread may have already exited 429 // which frees the CodeHeap containing the Atomic::add code 430 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 431 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 432 } 433 434 // If a thread has not deleted itself ("delete this") as part of its 435 // termination sequence, we have to ensure thread-local-storage is 436 // cleared before we actually terminate. No threads should ever be 437 // deleted asynchronously with respect to their termination. 438 if (Thread::current_or_null_safe() != NULL) { 439 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 440 thread->clear_thread_current(); 441 } 442 443 // Thread must not return from exit_process_or_thread(), but if it does, 444 // let it proceed to exit normally 445 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 446 } 447 448 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 449 int thread_id) { 450 // Allocate the OSThread object 451 OSThread* osthread = new OSThread(NULL, NULL); 452 if (osthread == NULL) return NULL; 453 454 // Initialize support for Java interrupts 455 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 456 if (interrupt_event == NULL) { 457 delete osthread; 458 return NULL; 459 } 460 osthread->set_interrupt_event(interrupt_event); 461 462 // Store info on the Win32 thread into the OSThread 463 osthread->set_thread_handle(thread_handle); 464 osthread->set_thread_id(thread_id); 465 466 if (UseNUMA) { 467 int lgrp_id = os::numa_get_group_id(); 468 if (lgrp_id != -1) { 469 thread->set_lgrp_id(lgrp_id); 470 } 471 } 472 473 // Initial thread state is INITIALIZED, not SUSPENDED 474 osthread->set_state(INITIALIZED); 475 476 return osthread; 477 } 478 479 480 bool os::create_attached_thread(JavaThread* thread) { 481 #ifdef ASSERT 482 thread->verify_not_published(); 483 #endif 484 HANDLE thread_h; 485 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 486 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 487 fatal("DuplicateHandle failed\n"); 488 } 489 OSThread* osthread = create_os_thread(thread, thread_h, 490 (int)current_thread_id()); 491 if (osthread == NULL) { 492 return false; 493 } 494 495 // Initial thread state is RUNNABLE 496 osthread->set_state(RUNNABLE); 497 498 thread->set_osthread(osthread); 499 500 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 501 os::current_thread_id()); 502 503 return true; 504 } 505 506 bool os::create_main_thread(JavaThread* thread) { 507 #ifdef ASSERT 508 thread->verify_not_published(); 509 #endif 510 if (_starting_thread == NULL) { 511 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 512 if (_starting_thread == NULL) { 513 return false; 514 } 515 } 516 517 // The primordial thread is runnable from the start) 518 _starting_thread->set_state(RUNNABLE); 519 520 thread->set_osthread(_starting_thread); 521 return true; 522 } 523 524 // Helper function to trace _beginthreadex attributes, 525 // similar to os::Posix::describe_pthread_attr() 526 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 527 size_t stacksize, unsigned initflag) { 528 stringStream ss(buf, buflen); 529 if (stacksize == 0) { 530 ss.print("stacksize: default, "); 531 } else { 532 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 533 } 534 ss.print("flags: "); 535 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 536 #define ALL(X) \ 537 X(CREATE_SUSPENDED) \ 538 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 539 ALL(PRINT_FLAG) 540 #undef ALL 541 #undef PRINT_FLAG 542 return buf; 543 } 544 545 // Allocate and initialize a new OSThread 546 bool os::create_thread(Thread* thread, ThreadType thr_type, 547 size_t stack_size) { 548 unsigned thread_id; 549 550 // Allocate the OSThread object 551 OSThread* osthread = new OSThread(NULL, NULL); 552 if (osthread == NULL) { 553 return false; 554 } 555 556 // Initialize support for Java interrupts 557 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 558 if (interrupt_event == NULL) { 559 delete osthread; 560 return NULL; 561 } 562 osthread->set_interrupt_event(interrupt_event); 563 osthread->set_interrupted(false); 564 565 thread->set_osthread(osthread); 566 567 if (stack_size == 0) { 568 switch (thr_type) { 569 case os::java_thread: 570 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 571 if (JavaThread::stack_size_at_create() > 0) { 572 stack_size = JavaThread::stack_size_at_create(); 573 } 574 break; 575 case os::compiler_thread: 576 if (CompilerThreadStackSize > 0) { 577 stack_size = (size_t)(CompilerThreadStackSize * K); 578 break; 579 } // else fall through: 580 // use VMThreadStackSize if CompilerThreadStackSize is not defined 581 case os::vm_thread: 582 case os::pgc_thread: 583 case os::cgc_thread: 584 case os::watcher_thread: 585 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 586 break; 587 } 588 } 589 590 // Create the Win32 thread 591 // 592 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 593 // does not specify stack size. Instead, it specifies the size of 594 // initially committed space. The stack size is determined by 595 // PE header in the executable. If the committed "stack_size" is larger 596 // than default value in the PE header, the stack is rounded up to the 597 // nearest multiple of 1MB. For example if the launcher has default 598 // stack size of 320k, specifying any size less than 320k does not 599 // affect the actual stack size at all, it only affects the initial 600 // commitment. On the other hand, specifying 'stack_size' larger than 601 // default value may cause significant increase in memory usage, because 602 // not only the stack space will be rounded up to MB, but also the 603 // entire space is committed upfront. 604 // 605 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 606 // for CreateThread() that can treat 'stack_size' as stack size. However we 607 // are not supposed to call CreateThread() directly according to MSDN 608 // document because JVM uses C runtime library. The good news is that the 609 // flag appears to work with _beginthredex() as well. 610 611 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 612 HANDLE thread_handle = 613 (HANDLE)_beginthreadex(NULL, 614 (unsigned)stack_size, 615 (unsigned (__stdcall *)(void*)) thread_native_entry, 616 thread, 617 initflag, 618 &thread_id); 619 620 char buf[64]; 621 if (thread_handle != NULL) { 622 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 623 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 624 } else { 625 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 626 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 627 } 628 629 if (thread_handle == NULL) { 630 // Need to clean up stuff we've allocated so far 631 CloseHandle(osthread->interrupt_event()); 632 thread->set_osthread(NULL); 633 delete osthread; 634 return NULL; 635 } 636 637 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 638 639 // Store info on the Win32 thread into the OSThread 640 osthread->set_thread_handle(thread_handle); 641 osthread->set_thread_id(thread_id); 642 643 // Initial thread state is INITIALIZED, not SUSPENDED 644 osthread->set_state(INITIALIZED); 645 646 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 647 return true; 648 } 649 650 651 // Free Win32 resources related to the OSThread 652 void os::free_thread(OSThread* osthread) { 653 assert(osthread != NULL, "osthread not set"); 654 655 // We are told to free resources of the argument thread, 656 // but we can only really operate on the current thread. 657 assert(Thread::current()->osthread() == osthread, 658 "os::free_thread but not current thread"); 659 660 CloseHandle(osthread->thread_handle()); 661 CloseHandle(osthread->interrupt_event()); 662 delete osthread; 663 } 664 665 static jlong first_filetime; 666 static jlong initial_performance_count; 667 static jlong performance_frequency; 668 669 670 jlong as_long(LARGE_INTEGER x) { 671 jlong result = 0; // initialization to avoid warning 672 set_high(&result, x.HighPart); 673 set_low(&result, x.LowPart); 674 return result; 675 } 676 677 678 jlong os::elapsed_counter() { 679 LARGE_INTEGER count; 680 QueryPerformanceCounter(&count); 681 return as_long(count) - initial_performance_count; 682 } 683 684 685 jlong os::elapsed_frequency() { 686 return performance_frequency; 687 } 688 689 690 julong os::available_memory() { 691 return win32::available_memory(); 692 } 693 694 julong os::win32::available_memory() { 695 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 696 // value if total memory is larger than 4GB 697 MEMORYSTATUSEX ms; 698 ms.dwLength = sizeof(ms); 699 GlobalMemoryStatusEx(&ms); 700 701 return (julong)ms.ullAvailPhys; 702 } 703 704 julong os::physical_memory() { 705 return win32::physical_memory(); 706 } 707 708 bool os::has_allocatable_memory_limit(julong* limit) { 709 MEMORYSTATUSEX ms; 710 ms.dwLength = sizeof(ms); 711 GlobalMemoryStatusEx(&ms); 712 #ifdef _LP64 713 *limit = (julong)ms.ullAvailVirtual; 714 return true; 715 #else 716 // Limit to 1400m because of the 2gb address space wall 717 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 718 return true; 719 #endif 720 } 721 722 int os::active_processor_count() { 723 DWORD_PTR lpProcessAffinityMask = 0; 724 DWORD_PTR lpSystemAffinityMask = 0; 725 int proc_count = processor_count(); 726 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 727 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 728 // Nof active processors is number of bits in process affinity mask 729 int bitcount = 0; 730 while (lpProcessAffinityMask != 0) { 731 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 732 bitcount++; 733 } 734 return bitcount; 735 } else { 736 return proc_count; 737 } 738 } 739 740 void os::set_native_thread_name(const char *name) { 741 742 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 743 // 744 // Note that unfortunately this only works if the process 745 // is already attached to a debugger; debugger must observe 746 // the exception below to show the correct name. 747 748 // If there is no debugger attached skip raising the exception 749 if (!IsDebuggerPresent()) { 750 return; 751 } 752 753 const DWORD MS_VC_EXCEPTION = 0x406D1388; 754 struct { 755 DWORD dwType; // must be 0x1000 756 LPCSTR szName; // pointer to name (in user addr space) 757 DWORD dwThreadID; // thread ID (-1=caller thread) 758 DWORD dwFlags; // reserved for future use, must be zero 759 } info; 760 761 info.dwType = 0x1000; 762 info.szName = name; 763 info.dwThreadID = -1; 764 info.dwFlags = 0; 765 766 __try { 767 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 768 } __except(EXCEPTION_EXECUTE_HANDLER) {} 769 } 770 771 bool os::distribute_processes(uint length, uint* distribution) { 772 // Not yet implemented. 773 return false; 774 } 775 776 bool os::bind_to_processor(uint processor_id) { 777 // Not yet implemented. 778 return false; 779 } 780 781 void os::win32::initialize_performance_counter() { 782 LARGE_INTEGER count; 783 QueryPerformanceFrequency(&count); 784 performance_frequency = as_long(count); 785 QueryPerformanceCounter(&count); 786 initial_performance_count = as_long(count); 787 } 788 789 790 double os::elapsedTime() { 791 return (double) elapsed_counter() / (double) elapsed_frequency(); 792 } 793 794 795 // Windows format: 796 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 797 // Java format: 798 // Java standards require the number of milliseconds since 1/1/1970 799 800 // Constant offset - calculated using offset() 801 static jlong _offset = 116444736000000000; 802 // Fake time counter for reproducible results when debugging 803 static jlong fake_time = 0; 804 805 #ifdef ASSERT 806 // Just to be safe, recalculate the offset in debug mode 807 static jlong _calculated_offset = 0; 808 static int _has_calculated_offset = 0; 809 810 jlong offset() { 811 if (_has_calculated_offset) return _calculated_offset; 812 SYSTEMTIME java_origin; 813 java_origin.wYear = 1970; 814 java_origin.wMonth = 1; 815 java_origin.wDayOfWeek = 0; // ignored 816 java_origin.wDay = 1; 817 java_origin.wHour = 0; 818 java_origin.wMinute = 0; 819 java_origin.wSecond = 0; 820 java_origin.wMilliseconds = 0; 821 FILETIME jot; 822 if (!SystemTimeToFileTime(&java_origin, &jot)) { 823 fatal("Error = %d\nWindows error", GetLastError()); 824 } 825 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 826 _has_calculated_offset = 1; 827 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 828 return _calculated_offset; 829 } 830 #else 831 jlong offset() { 832 return _offset; 833 } 834 #endif 835 836 jlong windows_to_java_time(FILETIME wt) { 837 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 838 return (a - offset()) / 10000; 839 } 840 841 // Returns time ticks in (10th of micro seconds) 842 jlong windows_to_time_ticks(FILETIME wt) { 843 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 844 return (a - offset()); 845 } 846 847 FILETIME java_to_windows_time(jlong l) { 848 jlong a = (l * 10000) + offset(); 849 FILETIME result; 850 result.dwHighDateTime = high(a); 851 result.dwLowDateTime = low(a); 852 return result; 853 } 854 855 bool os::supports_vtime() { return true; } 856 bool os::enable_vtime() { return false; } 857 bool os::vtime_enabled() { return false; } 858 859 double os::elapsedVTime() { 860 FILETIME created; 861 FILETIME exited; 862 FILETIME kernel; 863 FILETIME user; 864 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 865 // the resolution of windows_to_java_time() should be sufficient (ms) 866 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 867 } else { 868 return elapsedTime(); 869 } 870 } 871 872 jlong os::javaTimeMillis() { 873 if (UseFakeTimers) { 874 return fake_time++; 875 } else { 876 FILETIME wt; 877 GetSystemTimeAsFileTime(&wt); 878 return windows_to_java_time(wt); 879 } 880 } 881 882 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 883 FILETIME wt; 884 GetSystemTimeAsFileTime(&wt); 885 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 886 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 887 seconds = secs; 888 nanos = jlong(ticks - (secs*10000000)) * 100; 889 } 890 891 jlong os::javaTimeNanos() { 892 LARGE_INTEGER current_count; 893 QueryPerformanceCounter(¤t_count); 894 double current = as_long(current_count); 895 double freq = performance_frequency; 896 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 897 return time; 898 } 899 900 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 901 jlong freq = performance_frequency; 902 if (freq < NANOSECS_PER_SEC) { 903 // the performance counter is 64 bits and we will 904 // be multiplying it -- so no wrap in 64 bits 905 info_ptr->max_value = ALL_64_BITS; 906 } else if (freq > NANOSECS_PER_SEC) { 907 // use the max value the counter can reach to 908 // determine the max value which could be returned 909 julong max_counter = (julong)ALL_64_BITS; 910 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 911 } else { 912 // the performance counter is 64 bits and we will 913 // be using it directly -- so no wrap in 64 bits 914 info_ptr->max_value = ALL_64_BITS; 915 } 916 917 // using a counter, so no skipping 918 info_ptr->may_skip_backward = false; 919 info_ptr->may_skip_forward = false; 920 921 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 922 } 923 924 char* os::local_time_string(char *buf, size_t buflen) { 925 SYSTEMTIME st; 926 GetLocalTime(&st); 927 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 928 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 929 return buf; 930 } 931 932 bool os::getTimesSecs(double* process_real_time, 933 double* process_user_time, 934 double* process_system_time) { 935 HANDLE h_process = GetCurrentProcess(); 936 FILETIME create_time, exit_time, kernel_time, user_time; 937 BOOL result = GetProcessTimes(h_process, 938 &create_time, 939 &exit_time, 940 &kernel_time, 941 &user_time); 942 if (result != 0) { 943 FILETIME wt; 944 GetSystemTimeAsFileTime(&wt); 945 jlong rtc_millis = windows_to_java_time(wt); 946 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 947 *process_user_time = 948 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 949 *process_system_time = 950 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 951 return true; 952 } else { 953 return false; 954 } 955 } 956 957 void os::shutdown() { 958 // allow PerfMemory to attempt cleanup of any persistent resources 959 perfMemory_exit(); 960 961 // flush buffered output, finish log files 962 ostream_abort(); 963 964 // Check for abort hook 965 abort_hook_t abort_hook = Arguments::abort_hook(); 966 if (abort_hook != NULL) { 967 abort_hook(); 968 } 969 } 970 971 972 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 973 PMINIDUMP_EXCEPTION_INFORMATION, 974 PMINIDUMP_USER_STREAM_INFORMATION, 975 PMINIDUMP_CALLBACK_INFORMATION); 976 977 static HANDLE dumpFile = NULL; 978 979 // Check if dump file can be created. 980 void os::check_dump_limit(char* buffer, size_t buffsz) { 981 bool status = true; 982 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 983 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 984 status = false; 985 } 986 987 #ifndef ASSERT 988 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 989 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 990 status = false; 991 } 992 #endif 993 994 if (status) { 995 const char* cwd = get_current_directory(NULL, 0); 996 int pid = current_process_id(); 997 if (cwd != NULL) { 998 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 999 } else { 1000 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1001 } 1002 1003 if (dumpFile == NULL && 1004 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1005 == INVALID_HANDLE_VALUE) { 1006 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1007 status = false; 1008 } 1009 } 1010 VMError::record_coredump_status(buffer, status); 1011 } 1012 1013 void os::abort(bool dump_core, void* siginfo, const void* context) { 1014 EXCEPTION_POINTERS ep; 1015 MINIDUMP_EXCEPTION_INFORMATION mei; 1016 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1017 1018 HANDLE hProcess = GetCurrentProcess(); 1019 DWORD processId = GetCurrentProcessId(); 1020 MINIDUMP_TYPE dumpType; 1021 1022 shutdown(); 1023 if (!dump_core || dumpFile == NULL) { 1024 if (dumpFile != NULL) { 1025 CloseHandle(dumpFile); 1026 } 1027 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1028 } 1029 1030 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1031 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1032 1033 if (siginfo != NULL && context != NULL) { 1034 ep.ContextRecord = (PCONTEXT) context; 1035 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1036 1037 mei.ThreadId = GetCurrentThreadId(); 1038 mei.ExceptionPointers = &ep; 1039 pmei = &mei; 1040 } else { 1041 pmei = NULL; 1042 } 1043 1044 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1045 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1046 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1047 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1048 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1049 } 1050 CloseHandle(dumpFile); 1051 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1052 } 1053 1054 // Die immediately, no exit hook, no abort hook, no cleanup. 1055 void os::die() { 1056 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1057 } 1058 1059 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1060 // * dirent_md.c 1.15 00/02/02 1061 // 1062 // The declarations for DIR and struct dirent are in jvm_win32.h. 1063 1064 // Caller must have already run dirname through JVM_NativePath, which removes 1065 // duplicate slashes and converts all instances of '/' into '\\'. 1066 1067 DIR * os::opendir(const char *dirname) { 1068 assert(dirname != NULL, "just checking"); // hotspot change 1069 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1070 DWORD fattr; // hotspot change 1071 char alt_dirname[4] = { 0, 0, 0, 0 }; 1072 1073 if (dirp == 0) { 1074 errno = ENOMEM; 1075 return 0; 1076 } 1077 1078 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1079 // as a directory in FindFirstFile(). We detect this case here and 1080 // prepend the current drive name. 1081 // 1082 if (dirname[1] == '\0' && dirname[0] == '\\') { 1083 alt_dirname[0] = _getdrive() + 'A' - 1; 1084 alt_dirname[1] = ':'; 1085 alt_dirname[2] = '\\'; 1086 alt_dirname[3] = '\0'; 1087 dirname = alt_dirname; 1088 } 1089 1090 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1091 if (dirp->path == 0) { 1092 free(dirp); 1093 errno = ENOMEM; 1094 return 0; 1095 } 1096 strcpy(dirp->path, dirname); 1097 1098 fattr = GetFileAttributes(dirp->path); 1099 if (fattr == 0xffffffff) { 1100 free(dirp->path); 1101 free(dirp); 1102 errno = ENOENT; 1103 return 0; 1104 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1105 free(dirp->path); 1106 free(dirp); 1107 errno = ENOTDIR; 1108 return 0; 1109 } 1110 1111 // Append "*.*", or possibly "\\*.*", to path 1112 if (dirp->path[1] == ':' && 1113 (dirp->path[2] == '\0' || 1114 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1115 // No '\\' needed for cases like "Z:" or "Z:\" 1116 strcat(dirp->path, "*.*"); 1117 } else { 1118 strcat(dirp->path, "\\*.*"); 1119 } 1120 1121 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1122 if (dirp->handle == INVALID_HANDLE_VALUE) { 1123 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1124 free(dirp->path); 1125 free(dirp); 1126 errno = EACCES; 1127 return 0; 1128 } 1129 } 1130 return dirp; 1131 } 1132 1133 // parameter dbuf unused on Windows 1134 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1135 assert(dirp != NULL, "just checking"); // hotspot change 1136 if (dirp->handle == INVALID_HANDLE_VALUE) { 1137 return 0; 1138 } 1139 1140 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1141 1142 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1143 if (GetLastError() == ERROR_INVALID_HANDLE) { 1144 errno = EBADF; 1145 return 0; 1146 } 1147 FindClose(dirp->handle); 1148 dirp->handle = INVALID_HANDLE_VALUE; 1149 } 1150 1151 return &dirp->dirent; 1152 } 1153 1154 int os::closedir(DIR *dirp) { 1155 assert(dirp != NULL, "just checking"); // hotspot change 1156 if (dirp->handle != INVALID_HANDLE_VALUE) { 1157 if (!FindClose(dirp->handle)) { 1158 errno = EBADF; 1159 return -1; 1160 } 1161 dirp->handle = INVALID_HANDLE_VALUE; 1162 } 1163 free(dirp->path); 1164 free(dirp); 1165 return 0; 1166 } 1167 1168 // This must be hard coded because it's the system's temporary 1169 // directory not the java application's temp directory, ala java.io.tmpdir. 1170 const char* os::get_temp_directory() { 1171 static char path_buf[MAX_PATH]; 1172 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1173 return path_buf; 1174 } else { 1175 path_buf[0] = '\0'; 1176 return path_buf; 1177 } 1178 } 1179 1180 // Needs to be in os specific directory because windows requires another 1181 // header file <direct.h> 1182 const char* os::get_current_directory(char *buf, size_t buflen) { 1183 int n = static_cast<int>(buflen); 1184 if (buflen > INT_MAX) n = INT_MAX; 1185 return _getcwd(buf, n); 1186 } 1187 1188 //----------------------------------------------------------- 1189 // Helper functions for fatal error handler 1190 #ifdef _WIN64 1191 // Helper routine which returns true if address in 1192 // within the NTDLL address space. 1193 // 1194 static bool _addr_in_ntdll(address addr) { 1195 HMODULE hmod; 1196 MODULEINFO minfo; 1197 1198 hmod = GetModuleHandle("NTDLL.DLL"); 1199 if (hmod == NULL) return false; 1200 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1201 &minfo, sizeof(MODULEINFO))) { 1202 return false; 1203 } 1204 1205 if ((addr >= minfo.lpBaseOfDll) && 1206 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1207 return true; 1208 } else { 1209 return false; 1210 } 1211 } 1212 #endif 1213 1214 struct _modinfo { 1215 address addr; 1216 char* full_path; // point to a char buffer 1217 int buflen; // size of the buffer 1218 address base_addr; 1219 }; 1220 1221 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1222 address top_address, void * param) { 1223 struct _modinfo *pmod = (struct _modinfo *)param; 1224 if (!pmod) return -1; 1225 1226 if (base_addr <= pmod->addr && 1227 top_address > pmod->addr) { 1228 // if a buffer is provided, copy path name to the buffer 1229 if (pmod->full_path) { 1230 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1231 } 1232 pmod->base_addr = base_addr; 1233 return 1; 1234 } 1235 return 0; 1236 } 1237 1238 bool os::dll_address_to_library_name(address addr, char* buf, 1239 int buflen, int* offset) { 1240 // buf is not optional, but offset is optional 1241 assert(buf != NULL, "sanity check"); 1242 1243 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1244 // return the full path to the DLL file, sometimes it returns path 1245 // to the corresponding PDB file (debug info); sometimes it only 1246 // returns partial path, which makes life painful. 1247 1248 struct _modinfo mi; 1249 mi.addr = addr; 1250 mi.full_path = buf; 1251 mi.buflen = buflen; 1252 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1253 // buf already contains path name 1254 if (offset) *offset = addr - mi.base_addr; 1255 return true; 1256 } 1257 1258 buf[0] = '\0'; 1259 if (offset) *offset = -1; 1260 return false; 1261 } 1262 1263 bool os::dll_address_to_function_name(address addr, char *buf, 1264 int buflen, int *offset, 1265 bool demangle) { 1266 // buf is not optional, but offset is optional 1267 assert(buf != NULL, "sanity check"); 1268 1269 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1270 return true; 1271 } 1272 if (offset != NULL) *offset = -1; 1273 buf[0] = '\0'; 1274 return false; 1275 } 1276 1277 // save the start and end address of jvm.dll into param[0] and param[1] 1278 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1279 address top_address, void * param) { 1280 if (!param) return -1; 1281 1282 if (base_addr <= (address)_locate_jvm_dll && 1283 top_address > (address)_locate_jvm_dll) { 1284 ((address*)param)[0] = base_addr; 1285 ((address*)param)[1] = top_address; 1286 return 1; 1287 } 1288 return 0; 1289 } 1290 1291 address vm_lib_location[2]; // start and end address of jvm.dll 1292 1293 // check if addr is inside jvm.dll 1294 bool os::address_is_in_vm(address addr) { 1295 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1296 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1297 assert(false, "Can't find jvm module."); 1298 return false; 1299 } 1300 } 1301 1302 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1303 } 1304 1305 // print module info; param is outputStream* 1306 static int _print_module(const char* fname, address base_address, 1307 address top_address, void* param) { 1308 if (!param) return -1; 1309 1310 outputStream* st = (outputStream*)param; 1311 1312 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1313 return 0; 1314 } 1315 1316 // Loads .dll/.so and 1317 // in case of error it checks if .dll/.so was built for the 1318 // same architecture as Hotspot is running on 1319 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1320 void * result = LoadLibrary(name); 1321 if (result != NULL) { 1322 return result; 1323 } 1324 1325 DWORD errcode = GetLastError(); 1326 if (errcode == ERROR_MOD_NOT_FOUND) { 1327 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1328 ebuf[ebuflen - 1] = '\0'; 1329 return NULL; 1330 } 1331 1332 // Parsing dll below 1333 // If we can read dll-info and find that dll was built 1334 // for an architecture other than Hotspot is running in 1335 // - then print to buffer "DLL was built for a different architecture" 1336 // else call os::lasterror to obtain system error message 1337 1338 // Read system error message into ebuf 1339 // It may or may not be overwritten below (in the for loop and just above) 1340 lasterror(ebuf, (size_t) ebuflen); 1341 ebuf[ebuflen - 1] = '\0'; 1342 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1343 if (fd < 0) { 1344 return NULL; 1345 } 1346 1347 uint32_t signature_offset; 1348 uint16_t lib_arch = 0; 1349 bool failed_to_get_lib_arch = 1350 ( // Go to position 3c in the dll 1351 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1352 || 1353 // Read location of signature 1354 (sizeof(signature_offset) != 1355 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1356 || 1357 // Go to COFF File Header in dll 1358 // that is located after "signature" (4 bytes long) 1359 (os::seek_to_file_offset(fd, 1360 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1361 || 1362 // Read field that contains code of architecture 1363 // that dll was built for 1364 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1365 ); 1366 1367 ::close(fd); 1368 if (failed_to_get_lib_arch) { 1369 // file i/o error - report os::lasterror(...) msg 1370 return NULL; 1371 } 1372 1373 typedef struct { 1374 uint16_t arch_code; 1375 char* arch_name; 1376 } arch_t; 1377 1378 static const arch_t arch_array[] = { 1379 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1380 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1381 }; 1382 #if (defined _M_AMD64) 1383 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1384 #elif (defined _M_IX86) 1385 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1386 #else 1387 #error Method os::dll_load requires that one of following \ 1388 is defined :_M_AMD64 or _M_IX86 1389 #endif 1390 1391 1392 // Obtain a string for printf operation 1393 // lib_arch_str shall contain string what platform this .dll was built for 1394 // running_arch_str shall string contain what platform Hotspot was built for 1395 char *running_arch_str = NULL, *lib_arch_str = NULL; 1396 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1397 if (lib_arch == arch_array[i].arch_code) { 1398 lib_arch_str = arch_array[i].arch_name; 1399 } 1400 if (running_arch == arch_array[i].arch_code) { 1401 running_arch_str = arch_array[i].arch_name; 1402 } 1403 } 1404 1405 assert(running_arch_str, 1406 "Didn't find running architecture code in arch_array"); 1407 1408 // If the architecture is right 1409 // but some other error took place - report os::lasterror(...) msg 1410 if (lib_arch == running_arch) { 1411 return NULL; 1412 } 1413 1414 if (lib_arch_str != NULL) { 1415 ::_snprintf(ebuf, ebuflen - 1, 1416 "Can't load %s-bit .dll on a %s-bit platform", 1417 lib_arch_str, running_arch_str); 1418 } else { 1419 // don't know what architecture this dll was build for 1420 ::_snprintf(ebuf, ebuflen - 1, 1421 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1422 lib_arch, running_arch_str); 1423 } 1424 1425 return NULL; 1426 } 1427 1428 void os::print_dll_info(outputStream *st) { 1429 st->print_cr("Dynamic libraries:"); 1430 get_loaded_modules_info(_print_module, (void *)st); 1431 } 1432 1433 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1434 HANDLE hProcess; 1435 1436 # define MAX_NUM_MODULES 128 1437 HMODULE modules[MAX_NUM_MODULES]; 1438 static char filename[MAX_PATH]; 1439 int result = 0; 1440 1441 int pid = os::current_process_id(); 1442 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1443 FALSE, pid); 1444 if (hProcess == NULL) return 0; 1445 1446 DWORD size_needed; 1447 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1448 CloseHandle(hProcess); 1449 return 0; 1450 } 1451 1452 // number of modules that are currently loaded 1453 int num_modules = size_needed / sizeof(HMODULE); 1454 1455 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1456 // Get Full pathname: 1457 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1458 filename[0] = '\0'; 1459 } 1460 1461 MODULEINFO modinfo; 1462 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1463 modinfo.lpBaseOfDll = NULL; 1464 modinfo.SizeOfImage = 0; 1465 } 1466 1467 // Invoke callback function 1468 result = callback(filename, (address)modinfo.lpBaseOfDll, 1469 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1470 if (result) break; 1471 } 1472 1473 CloseHandle(hProcess); 1474 return result; 1475 } 1476 1477 bool os::get_host_name(char* buf, size_t buflen) { 1478 DWORD size = (DWORD)buflen; 1479 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1480 } 1481 1482 void os::get_summary_os_info(char* buf, size_t buflen) { 1483 stringStream sst(buf, buflen); 1484 os::win32::print_windows_version(&sst); 1485 // chop off newline character 1486 char* nl = strchr(buf, '\n'); 1487 if (nl != NULL) *nl = '\0'; 1488 } 1489 1490 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1491 int ret = vsnprintf(buf, len, fmt, args); 1492 // Get the correct buffer size if buf is too small 1493 if (ret < 0) { 1494 return _vscprintf(fmt, args); 1495 } 1496 return ret; 1497 } 1498 1499 static inline time_t get_mtime(const char* filename) { 1500 struct stat st; 1501 int ret = os::stat(filename, &st); 1502 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1503 return st.st_mtime; 1504 } 1505 1506 int os::compare_file_modified_times(const char* file1, const char* file2) { 1507 time_t t1 = get_mtime(file1); 1508 time_t t2 = get_mtime(file2); 1509 return t1 - t2; 1510 } 1511 1512 void os::print_os_info_brief(outputStream* st) { 1513 os::print_os_info(st); 1514 } 1515 1516 void os::print_os_info(outputStream* st) { 1517 #ifdef ASSERT 1518 char buffer[1024]; 1519 st->print("HostName: "); 1520 if (get_host_name(buffer, sizeof(buffer))) { 1521 st->print("%s ", buffer); 1522 } else { 1523 st->print("N/A "); 1524 } 1525 #endif 1526 st->print("OS:"); 1527 os::win32::print_windows_version(st); 1528 } 1529 1530 void os::win32::print_windows_version(outputStream* st) { 1531 OSVERSIONINFOEX osvi; 1532 VS_FIXEDFILEINFO *file_info; 1533 TCHAR kernel32_path[MAX_PATH]; 1534 UINT len, ret; 1535 1536 // Use the GetVersionEx information to see if we're on a server or 1537 // workstation edition of Windows. Starting with Windows 8.1 we can't 1538 // trust the OS version information returned by this API. 1539 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1540 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1541 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1542 st->print_cr("Call to GetVersionEx failed"); 1543 return; 1544 } 1545 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1546 1547 // Get the full path to \Windows\System32\kernel32.dll and use that for 1548 // determining what version of Windows we're running on. 1549 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1550 ret = GetSystemDirectory(kernel32_path, len); 1551 if (ret == 0 || ret > len) { 1552 st->print_cr("Call to GetSystemDirectory failed"); 1553 return; 1554 } 1555 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1556 1557 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1558 if (version_size == 0) { 1559 st->print_cr("Call to GetFileVersionInfoSize failed"); 1560 return; 1561 } 1562 1563 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1564 if (version_info == NULL) { 1565 st->print_cr("Failed to allocate version_info"); 1566 return; 1567 } 1568 1569 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1570 os::free(version_info); 1571 st->print_cr("Call to GetFileVersionInfo failed"); 1572 return; 1573 } 1574 1575 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1576 os::free(version_info); 1577 st->print_cr("Call to VerQueryValue failed"); 1578 return; 1579 } 1580 1581 int major_version = HIWORD(file_info->dwProductVersionMS); 1582 int minor_version = LOWORD(file_info->dwProductVersionMS); 1583 int build_number = HIWORD(file_info->dwProductVersionLS); 1584 int build_minor = LOWORD(file_info->dwProductVersionLS); 1585 int os_vers = major_version * 1000 + minor_version; 1586 os::free(version_info); 1587 1588 st->print(" Windows "); 1589 switch (os_vers) { 1590 1591 case 6000: 1592 if (is_workstation) { 1593 st->print("Vista"); 1594 } else { 1595 st->print("Server 2008"); 1596 } 1597 break; 1598 1599 case 6001: 1600 if (is_workstation) { 1601 st->print("7"); 1602 } else { 1603 st->print("Server 2008 R2"); 1604 } 1605 break; 1606 1607 case 6002: 1608 if (is_workstation) { 1609 st->print("8"); 1610 } else { 1611 st->print("Server 2012"); 1612 } 1613 break; 1614 1615 case 6003: 1616 if (is_workstation) { 1617 st->print("8.1"); 1618 } else { 1619 st->print("Server 2012 R2"); 1620 } 1621 break; 1622 1623 case 10000: 1624 if (is_workstation) { 1625 st->print("10"); 1626 } else { 1627 st->print("Server 2016"); 1628 } 1629 break; 1630 1631 default: 1632 // Unrecognized windows, print out its major and minor versions 1633 st->print("%d.%d", major_version, minor_version); 1634 break; 1635 } 1636 1637 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1638 // find out whether we are running on 64 bit processor or not 1639 SYSTEM_INFO si; 1640 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1641 GetNativeSystemInfo(&si); 1642 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1643 st->print(" , 64 bit"); 1644 } 1645 1646 st->print(" Build %d", build_number); 1647 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1648 st->cr(); 1649 } 1650 1651 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1652 // Nothing to do for now. 1653 } 1654 1655 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1656 HKEY key; 1657 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1658 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1659 if (status == ERROR_SUCCESS) { 1660 DWORD size = (DWORD)buflen; 1661 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1662 if (status != ERROR_SUCCESS) { 1663 strncpy(buf, "## __CPU__", buflen); 1664 } 1665 RegCloseKey(key); 1666 } else { 1667 // Put generic cpu info to return 1668 strncpy(buf, "## __CPU__", buflen); 1669 } 1670 } 1671 1672 void os::print_memory_info(outputStream* st) { 1673 st->print("Memory:"); 1674 st->print(" %dk page", os::vm_page_size()>>10); 1675 1676 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1677 // value if total memory is larger than 4GB 1678 MEMORYSTATUSEX ms; 1679 ms.dwLength = sizeof(ms); 1680 GlobalMemoryStatusEx(&ms); 1681 1682 st->print(", physical %uk", os::physical_memory() >> 10); 1683 st->print("(%uk free)", os::available_memory() >> 10); 1684 1685 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1686 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1687 st->cr(); 1688 } 1689 1690 void os::print_siginfo(outputStream *st, const void* siginfo) { 1691 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1692 st->print("siginfo:"); 1693 1694 char tmp[64]; 1695 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1696 strcpy(tmp, "EXCEPTION_??"); 1697 } 1698 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1699 1700 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1701 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1702 er->NumberParameters >= 2) { 1703 switch (er->ExceptionInformation[0]) { 1704 case 0: st->print(", reading address"); break; 1705 case 1: st->print(", writing address"); break; 1706 case 8: st->print(", data execution prevention violation at address"); break; 1707 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1708 er->ExceptionInformation[0]); 1709 } 1710 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1711 } else { 1712 int num = er->NumberParameters; 1713 if (num > 0) { 1714 st->print(", ExceptionInformation="); 1715 for (int i = 0; i < num; i++) { 1716 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1717 } 1718 } 1719 } 1720 st->cr(); 1721 } 1722 1723 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1724 // do nothing 1725 } 1726 1727 static char saved_jvm_path[MAX_PATH] = {0}; 1728 1729 // Find the full path to the current module, jvm.dll 1730 void os::jvm_path(char *buf, jint buflen) { 1731 // Error checking. 1732 if (buflen < MAX_PATH) { 1733 assert(false, "must use a large-enough buffer"); 1734 buf[0] = '\0'; 1735 return; 1736 } 1737 // Lazy resolve the path to current module. 1738 if (saved_jvm_path[0] != 0) { 1739 strcpy(buf, saved_jvm_path); 1740 return; 1741 } 1742 1743 buf[0] = '\0'; 1744 if (Arguments::sun_java_launcher_is_altjvm()) { 1745 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1746 // for a JAVA_HOME environment variable and fix up the path so it 1747 // looks like jvm.dll is installed there (append a fake suffix 1748 // hotspot/jvm.dll). 1749 char* java_home_var = ::getenv("JAVA_HOME"); 1750 if (java_home_var != NULL && java_home_var[0] != 0 && 1751 strlen(java_home_var) < (size_t)buflen) { 1752 strncpy(buf, java_home_var, buflen); 1753 1754 // determine if this is a legacy image or modules image 1755 // modules image doesn't have "jre" subdirectory 1756 size_t len = strlen(buf); 1757 char* jrebin_p = buf + len; 1758 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1759 if (0 != _access(buf, 0)) { 1760 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1761 } 1762 len = strlen(buf); 1763 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1764 } 1765 } 1766 1767 if (buf[0] == '\0') { 1768 GetModuleFileName(vm_lib_handle, buf, buflen); 1769 } 1770 strncpy(saved_jvm_path, buf, MAX_PATH); 1771 saved_jvm_path[MAX_PATH - 1] = '\0'; 1772 } 1773 1774 1775 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1776 #ifndef _WIN64 1777 st->print("_"); 1778 #endif 1779 } 1780 1781 1782 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1783 #ifndef _WIN64 1784 st->print("@%d", args_size * sizeof(int)); 1785 #endif 1786 } 1787 1788 // This method is a copy of JDK's sysGetLastErrorString 1789 // from src/windows/hpi/src/system_md.c 1790 1791 size_t os::lasterror(char* buf, size_t len) { 1792 DWORD errval; 1793 1794 if ((errval = GetLastError()) != 0) { 1795 // DOS error 1796 size_t n = (size_t)FormatMessage( 1797 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1798 NULL, 1799 errval, 1800 0, 1801 buf, 1802 (DWORD)len, 1803 NULL); 1804 if (n > 3) { 1805 // Drop final '.', CR, LF 1806 if (buf[n - 1] == '\n') n--; 1807 if (buf[n - 1] == '\r') n--; 1808 if (buf[n - 1] == '.') n--; 1809 buf[n] = '\0'; 1810 } 1811 return n; 1812 } 1813 1814 if (errno != 0) { 1815 // C runtime error that has no corresponding DOS error code 1816 const char* s = os::strerror(errno); 1817 size_t n = strlen(s); 1818 if (n >= len) n = len - 1; 1819 strncpy(buf, s, n); 1820 buf[n] = '\0'; 1821 return n; 1822 } 1823 1824 return 0; 1825 } 1826 1827 int os::get_last_error() { 1828 DWORD error = GetLastError(); 1829 if (error == 0) { 1830 error = errno; 1831 } 1832 return (int)error; 1833 } 1834 1835 WindowsSemaphore::WindowsSemaphore(uint value) { 1836 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1837 1838 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1839 } 1840 1841 WindowsSemaphore::~WindowsSemaphore() { 1842 ::CloseHandle(_semaphore); 1843 } 1844 1845 void WindowsSemaphore::signal(uint count) { 1846 if (count > 0) { 1847 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1848 1849 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1850 } 1851 } 1852 1853 void WindowsSemaphore::wait() { 1854 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1855 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1856 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1857 } 1858 1859 bool WindowsSemaphore::trywait() { 1860 DWORD ret = ::WaitForSingleObject(_semaphore, 0); 1861 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1862 return ret == WAIT_OBJECT_0; 1863 } 1864 1865 // sun.misc.Signal 1866 // NOTE that this is a workaround for an apparent kernel bug where if 1867 // a signal handler for SIGBREAK is installed then that signal handler 1868 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1869 // See bug 4416763. 1870 static void (*sigbreakHandler)(int) = NULL; 1871 1872 static void UserHandler(int sig, void *siginfo, void *context) { 1873 os::signal_notify(sig); 1874 // We need to reinstate the signal handler each time... 1875 os::signal(sig, (void*)UserHandler); 1876 } 1877 1878 void* os::user_handler() { 1879 return (void*) UserHandler; 1880 } 1881 1882 void* os::signal(int signal_number, void* handler) { 1883 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1884 void (*oldHandler)(int) = sigbreakHandler; 1885 sigbreakHandler = (void (*)(int)) handler; 1886 return (void*) oldHandler; 1887 } else { 1888 return (void*)::signal(signal_number, (void (*)(int))handler); 1889 } 1890 } 1891 1892 void os::signal_raise(int signal_number) { 1893 raise(signal_number); 1894 } 1895 1896 // The Win32 C runtime library maps all console control events other than ^C 1897 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1898 // logoff, and shutdown events. We therefore install our own console handler 1899 // that raises SIGTERM for the latter cases. 1900 // 1901 static BOOL WINAPI consoleHandler(DWORD event) { 1902 switch (event) { 1903 case CTRL_C_EVENT: 1904 if (VMError::is_error_reported()) { 1905 // Ctrl-C is pressed during error reporting, likely because the error 1906 // handler fails to abort. Let VM die immediately. 1907 os::die(); 1908 } 1909 1910 os::signal_raise(SIGINT); 1911 return TRUE; 1912 break; 1913 case CTRL_BREAK_EVENT: 1914 if (sigbreakHandler != NULL) { 1915 (*sigbreakHandler)(SIGBREAK); 1916 } 1917 return TRUE; 1918 break; 1919 case CTRL_LOGOFF_EVENT: { 1920 // Don't terminate JVM if it is running in a non-interactive session, 1921 // such as a service process. 1922 USEROBJECTFLAGS flags; 1923 HANDLE handle = GetProcessWindowStation(); 1924 if (handle != NULL && 1925 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1926 sizeof(USEROBJECTFLAGS), NULL)) { 1927 // If it is a non-interactive session, let next handler to deal 1928 // with it. 1929 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1930 return FALSE; 1931 } 1932 } 1933 } 1934 case CTRL_CLOSE_EVENT: 1935 case CTRL_SHUTDOWN_EVENT: 1936 os::signal_raise(SIGTERM); 1937 return TRUE; 1938 break; 1939 default: 1940 break; 1941 } 1942 return FALSE; 1943 } 1944 1945 // The following code is moved from os.cpp for making this 1946 // code platform specific, which it is by its very nature. 1947 1948 // Return maximum OS signal used + 1 for internal use only 1949 // Used as exit signal for signal_thread 1950 int os::sigexitnum_pd() { 1951 return NSIG; 1952 } 1953 1954 // a counter for each possible signal value, including signal_thread exit signal 1955 static volatile jint pending_signals[NSIG+1] = { 0 }; 1956 static HANDLE sig_sem = NULL; 1957 1958 void os::signal_init_pd() { 1959 // Initialize signal structures 1960 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1961 1962 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1963 1964 // Programs embedding the VM do not want it to attempt to receive 1965 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1966 // shutdown hooks mechanism introduced in 1.3. For example, when 1967 // the VM is run as part of a Windows NT service (i.e., a servlet 1968 // engine in a web server), the correct behavior is for any console 1969 // control handler to return FALSE, not TRUE, because the OS's 1970 // "final" handler for such events allows the process to continue if 1971 // it is a service (while terminating it if it is not a service). 1972 // To make this behavior uniform and the mechanism simpler, we 1973 // completely disable the VM's usage of these console events if -Xrs 1974 // (=ReduceSignalUsage) is specified. This means, for example, that 1975 // the CTRL-BREAK thread dump mechanism is also disabled in this 1976 // case. See bugs 4323062, 4345157, and related bugs. 1977 1978 if (!ReduceSignalUsage) { 1979 // Add a CTRL-C handler 1980 SetConsoleCtrlHandler(consoleHandler, TRUE); 1981 } 1982 } 1983 1984 void os::signal_notify(int signal_number) { 1985 BOOL ret; 1986 if (sig_sem != NULL) { 1987 Atomic::inc(&pending_signals[signal_number]); 1988 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1989 assert(ret != 0, "ReleaseSemaphore() failed"); 1990 } 1991 } 1992 1993 static int check_pending_signals(bool wait_for_signal) { 1994 DWORD ret; 1995 while (true) { 1996 for (int i = 0; i < NSIG + 1; i++) { 1997 jint n = pending_signals[i]; 1998 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1999 return i; 2000 } 2001 } 2002 if (!wait_for_signal) { 2003 return -1; 2004 } 2005 2006 JavaThread *thread = JavaThread::current(); 2007 2008 ThreadBlockInVM tbivm(thread); 2009 2010 bool threadIsSuspended; 2011 do { 2012 thread->set_suspend_equivalent(); 2013 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2014 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2015 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2016 2017 // were we externally suspended while we were waiting? 2018 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2019 if (threadIsSuspended) { 2020 // The semaphore has been incremented, but while we were waiting 2021 // another thread suspended us. We don't want to continue running 2022 // while suspended because that would surprise the thread that 2023 // suspended us. 2024 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2025 assert(ret != 0, "ReleaseSemaphore() failed"); 2026 2027 thread->java_suspend_self(); 2028 } 2029 } while (threadIsSuspended); 2030 } 2031 } 2032 2033 int os::signal_lookup() { 2034 return check_pending_signals(false); 2035 } 2036 2037 int os::signal_wait() { 2038 return check_pending_signals(true); 2039 } 2040 2041 // Implicit OS exception handling 2042 2043 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2044 address handler) { 2045 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2046 // Save pc in thread 2047 #ifdef _M_AMD64 2048 // Do not blow up if no thread info available. 2049 if (thread) { 2050 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2051 } 2052 // Set pc to handler 2053 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2054 #else 2055 // Do not blow up if no thread info available. 2056 if (thread) { 2057 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2058 } 2059 // Set pc to handler 2060 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2061 #endif 2062 2063 // Continue the execution 2064 return EXCEPTION_CONTINUE_EXECUTION; 2065 } 2066 2067 2068 // Used for PostMortemDump 2069 extern "C" void safepoints(); 2070 extern "C" void find(int x); 2071 extern "C" void events(); 2072 2073 // According to Windows API documentation, an illegal instruction sequence should generate 2074 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2075 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2076 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2077 2078 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2079 2080 // From "Execution Protection in the Windows Operating System" draft 0.35 2081 // Once a system header becomes available, the "real" define should be 2082 // included or copied here. 2083 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2084 2085 // Windows Vista/2008 heap corruption check 2086 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2087 2088 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2089 // C++ compiler contain this error code. Because this is a compiler-generated 2090 // error, the code is not listed in the Win32 API header files. 2091 // The code is actually a cryptic mnemonic device, with the initial "E" 2092 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2093 // ASCII values of "msc". 2094 2095 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2096 2097 #define def_excpt(val) { #val, (val) } 2098 2099 static const struct { char* name; uint number; } exceptlabels[] = { 2100 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2101 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2102 def_excpt(EXCEPTION_BREAKPOINT), 2103 def_excpt(EXCEPTION_SINGLE_STEP), 2104 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2105 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2106 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2107 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2108 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2109 def_excpt(EXCEPTION_FLT_OVERFLOW), 2110 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2111 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2112 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2113 def_excpt(EXCEPTION_INT_OVERFLOW), 2114 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2115 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2116 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2117 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2118 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2119 def_excpt(EXCEPTION_STACK_OVERFLOW), 2120 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2121 def_excpt(EXCEPTION_GUARD_PAGE), 2122 def_excpt(EXCEPTION_INVALID_HANDLE), 2123 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2124 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2125 }; 2126 2127 #undef def_excpt 2128 2129 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2130 uint code = static_cast<uint>(exception_code); 2131 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2132 if (exceptlabels[i].number == code) { 2133 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2134 return buf; 2135 } 2136 } 2137 2138 return NULL; 2139 } 2140 2141 //----------------------------------------------------------------------------- 2142 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2143 // handle exception caused by idiv; should only happen for -MinInt/-1 2144 // (division by zero is handled explicitly) 2145 #ifdef _M_AMD64 2146 PCONTEXT ctx = exceptionInfo->ContextRecord; 2147 address pc = (address)ctx->Rip; 2148 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2149 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2150 if (pc[0] == 0xF7) { 2151 // set correct result values and continue after idiv instruction 2152 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2153 } else { 2154 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2155 } 2156 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2157 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2158 // idiv opcode (0xF7). 2159 ctx->Rdx = (DWORD)0; // remainder 2160 // Continue the execution 2161 #else 2162 PCONTEXT ctx = exceptionInfo->ContextRecord; 2163 address pc = (address)ctx->Eip; 2164 assert(pc[0] == 0xF7, "not an idiv opcode"); 2165 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2166 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2167 // set correct result values and continue after idiv instruction 2168 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2169 ctx->Eax = (DWORD)min_jint; // result 2170 ctx->Edx = (DWORD)0; // remainder 2171 // Continue the execution 2172 #endif 2173 return EXCEPTION_CONTINUE_EXECUTION; 2174 } 2175 2176 //----------------------------------------------------------------------------- 2177 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2178 PCONTEXT ctx = exceptionInfo->ContextRecord; 2179 #ifndef _WIN64 2180 // handle exception caused by native method modifying control word 2181 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2182 2183 switch (exception_code) { 2184 case EXCEPTION_FLT_DENORMAL_OPERAND: 2185 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2186 case EXCEPTION_FLT_INEXACT_RESULT: 2187 case EXCEPTION_FLT_INVALID_OPERATION: 2188 case EXCEPTION_FLT_OVERFLOW: 2189 case EXCEPTION_FLT_STACK_CHECK: 2190 case EXCEPTION_FLT_UNDERFLOW: 2191 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2192 if (fp_control_word != ctx->FloatSave.ControlWord) { 2193 // Restore FPCW and mask out FLT exceptions 2194 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2195 // Mask out pending FLT exceptions 2196 ctx->FloatSave.StatusWord &= 0xffffff00; 2197 return EXCEPTION_CONTINUE_EXECUTION; 2198 } 2199 } 2200 2201 if (prev_uef_handler != NULL) { 2202 // We didn't handle this exception so pass it to the previous 2203 // UnhandledExceptionFilter. 2204 return (prev_uef_handler)(exceptionInfo); 2205 } 2206 #else // !_WIN64 2207 // On Windows, the mxcsr control bits are non-volatile across calls 2208 // See also CR 6192333 2209 // 2210 jint MxCsr = INITIAL_MXCSR; 2211 // we can't use StubRoutines::addr_mxcsr_std() 2212 // because in Win64 mxcsr is not saved there 2213 if (MxCsr != ctx->MxCsr) { 2214 ctx->MxCsr = MxCsr; 2215 return EXCEPTION_CONTINUE_EXECUTION; 2216 } 2217 #endif // !_WIN64 2218 2219 return EXCEPTION_CONTINUE_SEARCH; 2220 } 2221 2222 static inline void report_error(Thread* t, DWORD exception_code, 2223 address addr, void* siginfo, void* context) { 2224 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2225 2226 // If UseOsErrorReporting, this will return here and save the error file 2227 // somewhere where we can find it in the minidump. 2228 } 2229 2230 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2231 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2232 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2233 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2234 if (Interpreter::contains(pc)) { 2235 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2236 if (!fr->is_first_java_frame()) { 2237 // get_frame_at_stack_banging_point() is only called when we 2238 // have well defined stacks so java_sender() calls do not need 2239 // to assert safe_for_sender() first. 2240 *fr = fr->java_sender(); 2241 } 2242 } else { 2243 // more complex code with compiled code 2244 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2245 CodeBlob* cb = CodeCache::find_blob(pc); 2246 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2247 // Not sure where the pc points to, fallback to default 2248 // stack overflow handling 2249 return false; 2250 } else { 2251 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2252 // in compiled code, the stack banging is performed just after the return pc 2253 // has been pushed on the stack 2254 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2255 if (!fr->is_java_frame()) { 2256 // See java_sender() comment above. 2257 *fr = fr->java_sender(); 2258 } 2259 } 2260 } 2261 assert(fr->is_java_frame(), "Safety check"); 2262 return true; 2263 } 2264 2265 //----------------------------------------------------------------------------- 2266 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2267 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2268 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2269 #ifdef _M_AMD64 2270 address pc = (address) exceptionInfo->ContextRecord->Rip; 2271 #else 2272 address pc = (address) exceptionInfo->ContextRecord->Eip; 2273 #endif 2274 Thread* t = Thread::current_or_null_safe(); 2275 2276 // Handle SafeFetch32 and SafeFetchN exceptions. 2277 if (StubRoutines::is_safefetch_fault(pc)) { 2278 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2279 } 2280 2281 #ifndef _WIN64 2282 // Execution protection violation - win32 running on AMD64 only 2283 // Handled first to avoid misdiagnosis as a "normal" access violation; 2284 // This is safe to do because we have a new/unique ExceptionInformation 2285 // code for this condition. 2286 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2287 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2288 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2289 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2290 2291 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2292 int page_size = os::vm_page_size(); 2293 2294 // Make sure the pc and the faulting address are sane. 2295 // 2296 // If an instruction spans a page boundary, and the page containing 2297 // the beginning of the instruction is executable but the following 2298 // page is not, the pc and the faulting address might be slightly 2299 // different - we still want to unguard the 2nd page in this case. 2300 // 2301 // 15 bytes seems to be a (very) safe value for max instruction size. 2302 bool pc_is_near_addr = 2303 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2304 bool instr_spans_page_boundary = 2305 (align_down((intptr_t) pc ^ (intptr_t) addr, 2306 (intptr_t) page_size) > 0); 2307 2308 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2309 static volatile address last_addr = 2310 (address) os::non_memory_address_word(); 2311 2312 // In conservative mode, don't unguard unless the address is in the VM 2313 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2314 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2315 2316 // Set memory to RWX and retry 2317 address page_start = align_down(addr, page_size); 2318 bool res = os::protect_memory((char*) page_start, page_size, 2319 os::MEM_PROT_RWX); 2320 2321 log_debug(os)("Execution protection violation " 2322 "at " INTPTR_FORMAT 2323 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2324 p2i(page_start), (res ? "success" : os::strerror(errno))); 2325 2326 // Set last_addr so if we fault again at the same address, we don't 2327 // end up in an endless loop. 2328 // 2329 // There are two potential complications here. Two threads trapping 2330 // at the same address at the same time could cause one of the 2331 // threads to think it already unguarded, and abort the VM. Likely 2332 // very rare. 2333 // 2334 // The other race involves two threads alternately trapping at 2335 // different addresses and failing to unguard the page, resulting in 2336 // an endless loop. This condition is probably even more unlikely 2337 // than the first. 2338 // 2339 // Although both cases could be avoided by using locks or thread 2340 // local last_addr, these solutions are unnecessary complication: 2341 // this handler is a best-effort safety net, not a complete solution. 2342 // It is disabled by default and should only be used as a workaround 2343 // in case we missed any no-execute-unsafe VM code. 2344 2345 last_addr = addr; 2346 2347 return EXCEPTION_CONTINUE_EXECUTION; 2348 } 2349 } 2350 2351 // Last unguard failed or not unguarding 2352 tty->print_raw_cr("Execution protection violation"); 2353 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2354 exceptionInfo->ContextRecord); 2355 return EXCEPTION_CONTINUE_SEARCH; 2356 } 2357 } 2358 #endif // _WIN64 2359 2360 // Check to see if we caught the safepoint code in the 2361 // process of write protecting the memory serialization page. 2362 // It write enables the page immediately after protecting it 2363 // so just return. 2364 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2365 if (t != NULL && t->is_Java_thread()) { 2366 JavaThread* thread = (JavaThread*) t; 2367 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2368 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2369 if (os::is_memory_serialize_page(thread, addr)) { 2370 // Block current thread until the memory serialize page permission restored. 2371 os::block_on_serialize_page_trap(); 2372 return EXCEPTION_CONTINUE_EXECUTION; 2373 } 2374 } 2375 } 2376 2377 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2378 VM_Version::is_cpuinfo_segv_addr(pc)) { 2379 // Verify that OS save/restore AVX registers. 2380 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2381 } 2382 2383 if (t != NULL && t->is_Java_thread()) { 2384 JavaThread* thread = (JavaThread*) t; 2385 bool in_java = thread->thread_state() == _thread_in_Java; 2386 2387 // Handle potential stack overflows up front. 2388 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2389 if (thread->stack_guards_enabled()) { 2390 if (in_java) { 2391 frame fr; 2392 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2393 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2394 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2395 assert(fr.is_java_frame(), "Must be a Java frame"); 2396 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2397 } 2398 } 2399 // Yellow zone violation. The o/s has unprotected the first yellow 2400 // zone page for us. Note: must call disable_stack_yellow_zone to 2401 // update the enabled status, even if the zone contains only one page. 2402 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2403 thread->disable_stack_yellow_reserved_zone(); 2404 // If not in java code, return and hope for the best. 2405 return in_java 2406 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2407 : EXCEPTION_CONTINUE_EXECUTION; 2408 } else { 2409 // Fatal red zone violation. 2410 thread->disable_stack_red_zone(); 2411 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2412 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2413 exceptionInfo->ContextRecord); 2414 return EXCEPTION_CONTINUE_SEARCH; 2415 } 2416 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2417 // Either stack overflow or null pointer exception. 2418 if (in_java) { 2419 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2420 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2421 address stack_end = thread->stack_end(); 2422 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2423 // Stack overflow. 2424 assert(!os::uses_stack_guard_pages(), 2425 "should be caught by red zone code above."); 2426 return Handle_Exception(exceptionInfo, 2427 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2428 } 2429 // Check for safepoint polling and implicit null 2430 // We only expect null pointers in the stubs (vtable) 2431 // the rest are checked explicitly now. 2432 CodeBlob* cb = CodeCache::find_blob(pc); 2433 if (cb != NULL) { 2434 if (os::is_poll_address(addr)) { 2435 address stub = SharedRuntime::get_poll_stub(pc); 2436 return Handle_Exception(exceptionInfo, stub); 2437 } 2438 } 2439 { 2440 #ifdef _WIN64 2441 // If it's a legal stack address map the entire region in 2442 // 2443 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2444 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2445 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2446 addr = (address)((uintptr_t)addr & 2447 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2448 os::commit_memory((char *)addr, thread->stack_base() - addr, 2449 !ExecMem); 2450 return EXCEPTION_CONTINUE_EXECUTION; 2451 } else 2452 #endif 2453 { 2454 // Null pointer exception. 2455 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2456 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2457 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2458 } 2459 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2460 exceptionInfo->ContextRecord); 2461 return EXCEPTION_CONTINUE_SEARCH; 2462 } 2463 } 2464 } 2465 2466 #ifdef _WIN64 2467 // Special care for fast JNI field accessors. 2468 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2469 // in and the heap gets shrunk before the field access. 2470 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2471 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2472 if (addr != (address)-1) { 2473 return Handle_Exception(exceptionInfo, addr); 2474 } 2475 } 2476 #endif 2477 2478 // Stack overflow or null pointer exception in native code. 2479 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2480 exceptionInfo->ContextRecord); 2481 return EXCEPTION_CONTINUE_SEARCH; 2482 } // /EXCEPTION_ACCESS_VIOLATION 2483 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2484 2485 if (in_java) { 2486 switch (exception_code) { 2487 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2488 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2489 2490 case EXCEPTION_INT_OVERFLOW: 2491 return Handle_IDiv_Exception(exceptionInfo); 2492 2493 } // switch 2494 } 2495 if (((thread->thread_state() == _thread_in_Java) || 2496 (thread->thread_state() == _thread_in_native)) && 2497 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2498 LONG result=Handle_FLT_Exception(exceptionInfo); 2499 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2500 } 2501 } 2502 2503 if (exception_code != EXCEPTION_BREAKPOINT) { 2504 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2505 exceptionInfo->ContextRecord); 2506 } 2507 return EXCEPTION_CONTINUE_SEARCH; 2508 } 2509 2510 #ifndef _WIN64 2511 // Special care for fast JNI accessors. 2512 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2513 // the heap gets shrunk before the field access. 2514 // Need to install our own structured exception handler since native code may 2515 // install its own. 2516 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2517 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2518 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2519 address pc = (address) exceptionInfo->ContextRecord->Eip; 2520 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2521 if (addr != (address)-1) { 2522 return Handle_Exception(exceptionInfo, addr); 2523 } 2524 } 2525 return EXCEPTION_CONTINUE_SEARCH; 2526 } 2527 2528 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2529 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2530 jobject obj, \ 2531 jfieldID fieldID) { \ 2532 __try { \ 2533 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2534 obj, \ 2535 fieldID); \ 2536 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2537 _exception_info())) { \ 2538 } \ 2539 return 0; \ 2540 } 2541 2542 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2543 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2544 DEFINE_FAST_GETFIELD(jchar, char, Char) 2545 DEFINE_FAST_GETFIELD(jshort, short, Short) 2546 DEFINE_FAST_GETFIELD(jint, int, Int) 2547 DEFINE_FAST_GETFIELD(jlong, long, Long) 2548 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2549 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2550 2551 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2552 switch (type) { 2553 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2554 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2555 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2556 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2557 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2558 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2559 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2560 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2561 default: ShouldNotReachHere(); 2562 } 2563 return (address)-1; 2564 } 2565 #endif 2566 2567 // Virtual Memory 2568 2569 int os::vm_page_size() { return os::win32::vm_page_size(); } 2570 int os::vm_allocation_granularity() { 2571 return os::win32::vm_allocation_granularity(); 2572 } 2573 2574 // Windows large page support is available on Windows 2003. In order to use 2575 // large page memory, the administrator must first assign additional privilege 2576 // to the user: 2577 // + select Control Panel -> Administrative Tools -> Local Security Policy 2578 // + select Local Policies -> User Rights Assignment 2579 // + double click "Lock pages in memory", add users and/or groups 2580 // + reboot 2581 // Note the above steps are needed for administrator as well, as administrators 2582 // by default do not have the privilege to lock pages in memory. 2583 // 2584 // Note about Windows 2003: although the API supports committing large page 2585 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2586 // scenario, I found through experiment it only uses large page if the entire 2587 // memory region is reserved and committed in a single VirtualAlloc() call. 2588 // This makes Windows large page support more or less like Solaris ISM, in 2589 // that the entire heap must be committed upfront. This probably will change 2590 // in the future, if so the code below needs to be revisited. 2591 2592 #ifndef MEM_LARGE_PAGES 2593 #define MEM_LARGE_PAGES 0x20000000 2594 #endif 2595 2596 static HANDLE _hProcess; 2597 static HANDLE _hToken; 2598 2599 // Container for NUMA node list info 2600 class NUMANodeListHolder { 2601 private: 2602 int *_numa_used_node_list; // allocated below 2603 int _numa_used_node_count; 2604 2605 void free_node_list() { 2606 if (_numa_used_node_list != NULL) { 2607 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2608 } 2609 } 2610 2611 public: 2612 NUMANodeListHolder() { 2613 _numa_used_node_count = 0; 2614 _numa_used_node_list = NULL; 2615 // do rest of initialization in build routine (after function pointers are set up) 2616 } 2617 2618 ~NUMANodeListHolder() { 2619 free_node_list(); 2620 } 2621 2622 bool build() { 2623 DWORD_PTR proc_aff_mask; 2624 DWORD_PTR sys_aff_mask; 2625 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2626 ULONG highest_node_number; 2627 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2628 free_node_list(); 2629 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2630 for (unsigned int i = 0; i <= highest_node_number; i++) { 2631 ULONGLONG proc_mask_numa_node; 2632 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2633 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2634 _numa_used_node_list[_numa_used_node_count++] = i; 2635 } 2636 } 2637 return (_numa_used_node_count > 1); 2638 } 2639 2640 int get_count() { return _numa_used_node_count; } 2641 int get_node_list_entry(int n) { 2642 // for indexes out of range, returns -1 2643 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2644 } 2645 2646 } numa_node_list_holder; 2647 2648 2649 2650 static size_t _large_page_size = 0; 2651 2652 static bool request_lock_memory_privilege() { 2653 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2654 os::current_process_id()); 2655 2656 LUID luid; 2657 if (_hProcess != NULL && 2658 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2659 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2660 2661 TOKEN_PRIVILEGES tp; 2662 tp.PrivilegeCount = 1; 2663 tp.Privileges[0].Luid = luid; 2664 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2665 2666 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2667 // privilege. Check GetLastError() too. See MSDN document. 2668 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2669 (GetLastError() == ERROR_SUCCESS)) { 2670 return true; 2671 } 2672 } 2673 2674 return false; 2675 } 2676 2677 static void cleanup_after_large_page_init() { 2678 if (_hProcess) CloseHandle(_hProcess); 2679 _hProcess = NULL; 2680 if (_hToken) CloseHandle(_hToken); 2681 _hToken = NULL; 2682 } 2683 2684 static bool numa_interleaving_init() { 2685 bool success = false; 2686 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2687 2688 // print a warning if UseNUMAInterleaving flag is specified on command line 2689 bool warn_on_failure = use_numa_interleaving_specified; 2690 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2691 2692 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2693 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2694 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2695 2696 if (numa_node_list_holder.build()) { 2697 if (log_is_enabled(Debug, os, cpu)) { 2698 Log(os, cpu) log; 2699 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2700 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2701 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2702 } 2703 } 2704 success = true; 2705 } else { 2706 WARN("Process does not cover multiple NUMA nodes."); 2707 } 2708 if (!success) { 2709 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2710 } 2711 return success; 2712 #undef WARN 2713 } 2714 2715 // this routine is used whenever we need to reserve a contiguous VA range 2716 // but we need to make separate VirtualAlloc calls for each piece of the range 2717 // Reasons for doing this: 2718 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2719 // * UseNUMAInterleaving requires a separate node for each piece 2720 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2721 DWORD prot, 2722 bool should_inject_error = false) { 2723 char * p_buf; 2724 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2725 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2726 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2727 2728 // first reserve enough address space in advance since we want to be 2729 // able to break a single contiguous virtual address range into multiple 2730 // large page commits but WS2003 does not allow reserving large page space 2731 // so we just use 4K pages for reserve, this gives us a legal contiguous 2732 // address space. then we will deallocate that reservation, and re alloc 2733 // using large pages 2734 const size_t size_of_reserve = bytes + chunk_size; 2735 if (bytes > size_of_reserve) { 2736 // Overflowed. 2737 return NULL; 2738 } 2739 p_buf = (char *) VirtualAlloc(addr, 2740 size_of_reserve, // size of Reserve 2741 MEM_RESERVE, 2742 PAGE_READWRITE); 2743 // If reservation failed, return NULL 2744 if (p_buf == NULL) return NULL; 2745 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2746 os::release_memory(p_buf, bytes + chunk_size); 2747 2748 // we still need to round up to a page boundary (in case we are using large pages) 2749 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2750 // instead we handle this in the bytes_to_rq computation below 2751 p_buf = align_up(p_buf, page_size); 2752 2753 // now go through and allocate one chunk at a time until all bytes are 2754 // allocated 2755 size_t bytes_remaining = bytes; 2756 // An overflow of align_up() would have been caught above 2757 // in the calculation of size_of_reserve. 2758 char * next_alloc_addr = p_buf; 2759 HANDLE hProc = GetCurrentProcess(); 2760 2761 #ifdef ASSERT 2762 // Variable for the failure injection 2763 int ran_num = os::random(); 2764 size_t fail_after = ran_num % bytes; 2765 #endif 2766 2767 int count=0; 2768 while (bytes_remaining) { 2769 // select bytes_to_rq to get to the next chunk_size boundary 2770 2771 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2772 // Note allocate and commit 2773 char * p_new; 2774 2775 #ifdef ASSERT 2776 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2777 #else 2778 const bool inject_error_now = false; 2779 #endif 2780 2781 if (inject_error_now) { 2782 p_new = NULL; 2783 } else { 2784 if (!UseNUMAInterleaving) { 2785 p_new = (char *) VirtualAlloc(next_alloc_addr, 2786 bytes_to_rq, 2787 flags, 2788 prot); 2789 } else { 2790 // get the next node to use from the used_node_list 2791 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2792 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2793 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2794 } 2795 } 2796 2797 if (p_new == NULL) { 2798 // Free any allocated pages 2799 if (next_alloc_addr > p_buf) { 2800 // Some memory was committed so release it. 2801 size_t bytes_to_release = bytes - bytes_remaining; 2802 // NMT has yet to record any individual blocks, so it 2803 // need to create a dummy 'reserve' record to match 2804 // the release. 2805 MemTracker::record_virtual_memory_reserve((address)p_buf, 2806 bytes_to_release, CALLER_PC); 2807 os::release_memory(p_buf, bytes_to_release); 2808 } 2809 #ifdef ASSERT 2810 if (should_inject_error) { 2811 log_develop_debug(pagesize)("Reserving pages individually failed."); 2812 } 2813 #endif 2814 return NULL; 2815 } 2816 2817 bytes_remaining -= bytes_to_rq; 2818 next_alloc_addr += bytes_to_rq; 2819 count++; 2820 } 2821 // Although the memory is allocated individually, it is returned as one. 2822 // NMT records it as one block. 2823 if ((flags & MEM_COMMIT) != 0) { 2824 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2825 } else { 2826 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2827 } 2828 2829 // made it this far, success 2830 return p_buf; 2831 } 2832 2833 2834 2835 void os::large_page_init() { 2836 if (!UseLargePages) return; 2837 2838 // print a warning if any large page related flag is specified on command line 2839 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2840 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2841 bool success = false; 2842 2843 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2844 if (request_lock_memory_privilege()) { 2845 size_t s = GetLargePageMinimum(); 2846 if (s) { 2847 #if defined(IA32) || defined(AMD64) 2848 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2849 WARN("JVM cannot use large pages bigger than 4mb."); 2850 } else { 2851 #endif 2852 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2853 _large_page_size = LargePageSizeInBytes; 2854 } else { 2855 _large_page_size = s; 2856 } 2857 success = true; 2858 #if defined(IA32) || defined(AMD64) 2859 } 2860 #endif 2861 } else { 2862 WARN("Large page is not supported by the processor."); 2863 } 2864 } else { 2865 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2866 } 2867 #undef WARN 2868 2869 const size_t default_page_size = (size_t) vm_page_size(); 2870 if (success && _large_page_size > default_page_size) { 2871 _page_sizes[0] = _large_page_size; 2872 _page_sizes[1] = default_page_size; 2873 _page_sizes[2] = 0; 2874 } 2875 2876 cleanup_after_large_page_init(); 2877 UseLargePages = success; 2878 } 2879 2880 // On win32, one cannot release just a part of reserved memory, it's an 2881 // all or nothing deal. When we split a reservation, we must break the 2882 // reservation into two reservations. 2883 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2884 bool realloc) { 2885 if (size > 0) { 2886 release_memory(base, size); 2887 if (realloc) { 2888 reserve_memory(split, base); 2889 } 2890 if (size != split) { 2891 reserve_memory(size - split, base + split); 2892 } 2893 } 2894 } 2895 2896 // Multiple threads can race in this code but it's not possible to unmap small sections of 2897 // virtual space to get requested alignment, like posix-like os's. 2898 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2899 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 2900 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 2901 "Alignment must be a multiple of allocation granularity (page size)"); 2902 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 2903 2904 size_t extra_size = size + alignment; 2905 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 2906 2907 char* aligned_base = NULL; 2908 2909 do { 2910 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 2911 if (extra_base == NULL) { 2912 return NULL; 2913 } 2914 // Do manual alignment 2915 aligned_base = align_up(extra_base, alignment); 2916 2917 os::release_memory(extra_base, extra_size); 2918 2919 aligned_base = os::reserve_memory(size, aligned_base); 2920 2921 } while (aligned_base == NULL); 2922 2923 return aligned_base; 2924 } 2925 2926 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 2927 assert((size_t)addr % os::vm_allocation_granularity() == 0, 2928 "reserve alignment"); 2929 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 2930 char* res; 2931 // note that if UseLargePages is on, all the areas that require interleaving 2932 // will go thru reserve_memory_special rather than thru here. 2933 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 2934 if (!use_individual) { 2935 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 2936 } else { 2937 elapsedTimer reserveTimer; 2938 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 2939 // in numa interleaving, we have to allocate pages individually 2940 // (well really chunks of NUMAInterleaveGranularity size) 2941 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 2942 if (res == NULL) { 2943 warning("NUMA page allocation failed"); 2944 } 2945 if (Verbose && PrintMiscellaneous) { 2946 reserveTimer.stop(); 2947 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 2948 reserveTimer.milliseconds(), reserveTimer.ticks()); 2949 } 2950 } 2951 assert(res == NULL || addr == NULL || addr == res, 2952 "Unexpected address from reserve."); 2953 2954 return res; 2955 } 2956 2957 // Reserve memory at an arbitrary address, only if that area is 2958 // available (and not reserved for something else). 2959 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2960 // Windows os::reserve_memory() fails of the requested address range is 2961 // not avilable. 2962 return reserve_memory(bytes, requested_addr); 2963 } 2964 2965 size_t os::large_page_size() { 2966 return _large_page_size; 2967 } 2968 2969 bool os::can_commit_large_page_memory() { 2970 // Windows only uses large page memory when the entire region is reserved 2971 // and committed in a single VirtualAlloc() call. This may change in the 2972 // future, but with Windows 2003 it's not possible to commit on demand. 2973 return false; 2974 } 2975 2976 bool os::can_execute_large_page_memory() { 2977 return true; 2978 } 2979 2980 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 2981 bool exec) { 2982 assert(UseLargePages, "only for large pages"); 2983 2984 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 2985 return NULL; // Fallback to small pages. 2986 } 2987 2988 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 2989 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 2990 2991 // with large pages, there are two cases where we need to use Individual Allocation 2992 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 2993 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 2994 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 2995 log_debug(pagesize)("Reserving large pages individually."); 2996 2997 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 2998 if (p_buf == NULL) { 2999 // give an appropriate warning message 3000 if (UseNUMAInterleaving) { 3001 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3002 } 3003 if (UseLargePagesIndividualAllocation) { 3004 warning("Individually allocated large pages failed, " 3005 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3006 } 3007 return NULL; 3008 } 3009 3010 return p_buf; 3011 3012 } else { 3013 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3014 3015 // normal policy just allocate it all at once 3016 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3017 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3018 if (res != NULL) { 3019 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3020 } 3021 3022 return res; 3023 } 3024 } 3025 3026 bool os::release_memory_special(char* base, size_t bytes) { 3027 assert(base != NULL, "Sanity check"); 3028 return release_memory(base, bytes); 3029 } 3030 3031 void os::print_statistics() { 3032 } 3033 3034 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3035 int err = os::get_last_error(); 3036 char buf[256]; 3037 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3038 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3039 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3040 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3041 } 3042 3043 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3044 if (bytes == 0) { 3045 // Don't bother the OS with noops. 3046 return true; 3047 } 3048 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3049 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3050 // Don't attempt to print anything if the OS call fails. We're 3051 // probably low on resources, so the print itself may cause crashes. 3052 3053 // unless we have NUMAInterleaving enabled, the range of a commit 3054 // is always within a reserve covered by a single VirtualAlloc 3055 // in that case we can just do a single commit for the requested size 3056 if (!UseNUMAInterleaving) { 3057 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3058 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3059 return false; 3060 } 3061 if (exec) { 3062 DWORD oldprot; 3063 // Windows doc says to use VirtualProtect to get execute permissions 3064 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3065 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3066 return false; 3067 } 3068 } 3069 return true; 3070 } else { 3071 3072 // when NUMAInterleaving is enabled, the commit might cover a range that 3073 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3074 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3075 // returns represents the number of bytes that can be committed in one step. 3076 size_t bytes_remaining = bytes; 3077 char * next_alloc_addr = addr; 3078 while (bytes_remaining > 0) { 3079 MEMORY_BASIC_INFORMATION alloc_info; 3080 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3081 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3082 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3083 PAGE_READWRITE) == NULL) { 3084 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3085 exec);) 3086 return false; 3087 } 3088 if (exec) { 3089 DWORD oldprot; 3090 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3091 PAGE_EXECUTE_READWRITE, &oldprot)) { 3092 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3093 exec);) 3094 return false; 3095 } 3096 } 3097 bytes_remaining -= bytes_to_rq; 3098 next_alloc_addr += bytes_to_rq; 3099 } 3100 } 3101 // if we made it this far, return true 3102 return true; 3103 } 3104 3105 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3106 bool exec) { 3107 // alignment_hint is ignored on this OS 3108 return pd_commit_memory(addr, size, exec); 3109 } 3110 3111 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3112 const char* mesg) { 3113 assert(mesg != NULL, "mesg must be specified"); 3114 if (!pd_commit_memory(addr, size, exec)) { 3115 warn_fail_commit_memory(addr, size, exec); 3116 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3117 } 3118 } 3119 3120 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3121 size_t alignment_hint, bool exec, 3122 const char* mesg) { 3123 // alignment_hint is ignored on this OS 3124 pd_commit_memory_or_exit(addr, size, exec, mesg); 3125 } 3126 3127 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3128 if (bytes == 0) { 3129 // Don't bother the OS with noops. 3130 return true; 3131 } 3132 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3133 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3134 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3135 } 3136 3137 bool os::pd_release_memory(char* addr, size_t bytes) { 3138 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3139 } 3140 3141 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3142 return os::commit_memory(addr, size, !ExecMem); 3143 } 3144 3145 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3146 return os::uncommit_memory(addr, size); 3147 } 3148 3149 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3150 uint count = 0; 3151 bool ret = false; 3152 size_t bytes_remaining = bytes; 3153 char * next_protect_addr = addr; 3154 3155 // Use VirtualQuery() to get the chunk size. 3156 while (bytes_remaining) { 3157 MEMORY_BASIC_INFORMATION alloc_info; 3158 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3159 return false; 3160 } 3161 3162 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3163 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3164 // but we don't distinguish here as both cases are protected by same API. 3165 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3166 warning("Failed protecting pages individually for chunk #%u", count); 3167 if (!ret) { 3168 return false; 3169 } 3170 3171 bytes_remaining -= bytes_to_protect; 3172 next_protect_addr += bytes_to_protect; 3173 count++; 3174 } 3175 return ret; 3176 } 3177 3178 // Set protections specified 3179 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3180 bool is_committed) { 3181 unsigned int p = 0; 3182 switch (prot) { 3183 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3184 case MEM_PROT_READ: p = PAGE_READONLY; break; 3185 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3186 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3187 default: 3188 ShouldNotReachHere(); 3189 } 3190 3191 DWORD old_status; 3192 3193 // Strange enough, but on Win32 one can change protection only for committed 3194 // memory, not a big deal anyway, as bytes less or equal than 64K 3195 if (!is_committed) { 3196 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3197 "cannot commit protection page"); 3198 } 3199 // One cannot use os::guard_memory() here, as on Win32 guard page 3200 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3201 // 3202 // Pages in the region become guard pages. Any attempt to access a guard page 3203 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3204 // the guard page status. Guard pages thus act as a one-time access alarm. 3205 bool ret; 3206 if (UseNUMAInterleaving) { 3207 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3208 // so we must protect the chunks individually. 3209 ret = protect_pages_individually(addr, bytes, p, &old_status); 3210 } else { 3211 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3212 } 3213 #ifdef ASSERT 3214 if (!ret) { 3215 int err = os::get_last_error(); 3216 char buf[256]; 3217 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3218 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3219 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3220 buf_len != 0 ? buf : "<no_error_string>", err); 3221 } 3222 #endif 3223 return ret; 3224 } 3225 3226 bool os::guard_memory(char* addr, size_t bytes) { 3227 DWORD old_status; 3228 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3229 } 3230 3231 bool os::unguard_memory(char* addr, size_t bytes) { 3232 DWORD old_status; 3233 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3234 } 3235 3236 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3237 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3238 void os::numa_make_global(char *addr, size_t bytes) { } 3239 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3240 bool os::numa_topology_changed() { return false; } 3241 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3242 int os::numa_get_group_id() { return 0; } 3243 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3244 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3245 // Provide an answer for UMA systems 3246 ids[0] = 0; 3247 return 1; 3248 } else { 3249 // check for size bigger than actual groups_num 3250 size = MIN2(size, numa_get_groups_num()); 3251 for (int i = 0; i < (int)size; i++) { 3252 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3253 } 3254 return size; 3255 } 3256 } 3257 3258 bool os::get_page_info(char *start, page_info* info) { 3259 return false; 3260 } 3261 3262 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3263 page_info* page_found) { 3264 return end; 3265 } 3266 3267 char* os::non_memory_address_word() { 3268 // Must never look like an address returned by reserve_memory, 3269 // even in its subfields (as defined by the CPU immediate fields, 3270 // if the CPU splits constants across multiple instructions). 3271 return (char*)-1; 3272 } 3273 3274 #define MAX_ERROR_COUNT 100 3275 #define SYS_THREAD_ERROR 0xffffffffUL 3276 3277 void os::pd_start_thread(Thread* thread) { 3278 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3279 // Returns previous suspend state: 3280 // 0: Thread was not suspended 3281 // 1: Thread is running now 3282 // >1: Thread is still suspended. 3283 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3284 } 3285 3286 class HighResolutionInterval : public CHeapObj<mtThread> { 3287 // The default timer resolution seems to be 10 milliseconds. 3288 // (Where is this written down?) 3289 // If someone wants to sleep for only a fraction of the default, 3290 // then we set the timer resolution down to 1 millisecond for 3291 // the duration of their interval. 3292 // We carefully set the resolution back, since otherwise we 3293 // seem to incur an overhead (3%?) that we don't need. 3294 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3295 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3296 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3297 // timeBeginPeriod() if the relative error exceeded some threshold. 3298 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3299 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3300 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3301 // resolution timers running. 3302 private: 3303 jlong resolution; 3304 public: 3305 HighResolutionInterval(jlong ms) { 3306 resolution = ms % 10L; 3307 if (resolution != 0) { 3308 MMRESULT result = timeBeginPeriod(1L); 3309 } 3310 } 3311 ~HighResolutionInterval() { 3312 if (resolution != 0) { 3313 MMRESULT result = timeEndPeriod(1L); 3314 } 3315 resolution = 0L; 3316 } 3317 }; 3318 3319 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3320 jlong limit = (jlong) MAXDWORD; 3321 3322 while (ms > limit) { 3323 int res; 3324 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3325 return res; 3326 } 3327 ms -= limit; 3328 } 3329 3330 assert(thread == Thread::current(), "thread consistency check"); 3331 OSThread* osthread = thread->osthread(); 3332 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3333 int result; 3334 if (interruptable) { 3335 assert(thread->is_Java_thread(), "must be java thread"); 3336 JavaThread *jt = (JavaThread *) thread; 3337 ThreadBlockInVM tbivm(jt); 3338 3339 jt->set_suspend_equivalent(); 3340 // cleared by handle_special_suspend_equivalent_condition() or 3341 // java_suspend_self() via check_and_wait_while_suspended() 3342 3343 HANDLE events[1]; 3344 events[0] = osthread->interrupt_event(); 3345 HighResolutionInterval *phri=NULL; 3346 if (!ForceTimeHighResolution) { 3347 phri = new HighResolutionInterval(ms); 3348 } 3349 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3350 result = OS_TIMEOUT; 3351 } else { 3352 ResetEvent(osthread->interrupt_event()); 3353 osthread->set_interrupted(false); 3354 result = OS_INTRPT; 3355 } 3356 delete phri; //if it is NULL, harmless 3357 3358 // were we externally suspended while we were waiting? 3359 jt->check_and_wait_while_suspended(); 3360 } else { 3361 assert(!thread->is_Java_thread(), "must not be java thread"); 3362 Sleep((long) ms); 3363 result = OS_TIMEOUT; 3364 } 3365 return result; 3366 } 3367 3368 // Short sleep, direct OS call. 3369 // 3370 // ms = 0, means allow others (if any) to run. 3371 // 3372 void os::naked_short_sleep(jlong ms) { 3373 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3374 Sleep(ms); 3375 } 3376 3377 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3378 void os::infinite_sleep() { 3379 while (true) { // sleep forever ... 3380 Sleep(100000); // ... 100 seconds at a time 3381 } 3382 } 3383 3384 typedef BOOL (WINAPI * STTSignature)(void); 3385 3386 void os::naked_yield() { 3387 // Consider passing back the return value from SwitchToThread(). 3388 SwitchToThread(); 3389 } 3390 3391 // Win32 only gives you access to seven real priorities at a time, 3392 // so we compress Java's ten down to seven. It would be better 3393 // if we dynamically adjusted relative priorities. 3394 3395 int os::java_to_os_priority[CriticalPriority + 1] = { 3396 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3397 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3398 THREAD_PRIORITY_LOWEST, // 2 3399 THREAD_PRIORITY_BELOW_NORMAL, // 3 3400 THREAD_PRIORITY_BELOW_NORMAL, // 4 3401 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3402 THREAD_PRIORITY_NORMAL, // 6 3403 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3404 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3405 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3406 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3407 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3408 }; 3409 3410 int prio_policy1[CriticalPriority + 1] = { 3411 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3412 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3413 THREAD_PRIORITY_LOWEST, // 2 3414 THREAD_PRIORITY_BELOW_NORMAL, // 3 3415 THREAD_PRIORITY_BELOW_NORMAL, // 4 3416 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3417 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3418 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3419 THREAD_PRIORITY_HIGHEST, // 8 3420 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3421 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3422 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3423 }; 3424 3425 static int prio_init() { 3426 // If ThreadPriorityPolicy is 1, switch tables 3427 if (ThreadPriorityPolicy == 1) { 3428 int i; 3429 for (i = 0; i < CriticalPriority + 1; i++) { 3430 os::java_to_os_priority[i] = prio_policy1[i]; 3431 } 3432 } 3433 if (UseCriticalJavaThreadPriority) { 3434 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3435 } 3436 return 0; 3437 } 3438 3439 OSReturn os::set_native_priority(Thread* thread, int priority) { 3440 if (!UseThreadPriorities) return OS_OK; 3441 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3442 return ret ? OS_OK : OS_ERR; 3443 } 3444 3445 OSReturn os::get_native_priority(const Thread* const thread, 3446 int* priority_ptr) { 3447 if (!UseThreadPriorities) { 3448 *priority_ptr = java_to_os_priority[NormPriority]; 3449 return OS_OK; 3450 } 3451 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3452 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3453 assert(false, "GetThreadPriority failed"); 3454 return OS_ERR; 3455 } 3456 *priority_ptr = os_prio; 3457 return OS_OK; 3458 } 3459 3460 3461 // Hint to the underlying OS that a task switch would not be good. 3462 // Void return because it's a hint and can fail. 3463 void os::hint_no_preempt() {} 3464 3465 void os::interrupt(Thread* thread) { 3466 assert(!thread->is_Java_thread() || Thread::current() == thread || 3467 Threads_lock->owned_by_self(), 3468 "possibility of dangling Thread pointer"); 3469 3470 OSThread* osthread = thread->osthread(); 3471 osthread->set_interrupted(true); 3472 // More than one thread can get here with the same value of osthread, 3473 // resulting in multiple notifications. We do, however, want the store 3474 // to interrupted() to be visible to other threads before we post 3475 // the interrupt event. 3476 OrderAccess::release(); 3477 SetEvent(osthread->interrupt_event()); 3478 // For JSR166: unpark after setting status 3479 if (thread->is_Java_thread()) { 3480 ((JavaThread*)thread)->parker()->unpark(); 3481 } 3482 3483 ParkEvent * ev = thread->_ParkEvent; 3484 if (ev != NULL) ev->unpark(); 3485 } 3486 3487 3488 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3489 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3490 "possibility of dangling Thread pointer"); 3491 3492 OSThread* osthread = thread->osthread(); 3493 // There is no synchronization between the setting of the interrupt 3494 // and it being cleared here. It is critical - see 6535709 - that 3495 // we only clear the interrupt state, and reset the interrupt event, 3496 // if we are going to report that we were indeed interrupted - else 3497 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3498 // depending on the timing. By checking thread interrupt event to see 3499 // if the thread gets real interrupt thus prevent spurious wakeup. 3500 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3501 if (interrupted && clear_interrupted) { 3502 osthread->set_interrupted(false); 3503 ResetEvent(osthread->interrupt_event()); 3504 } // Otherwise leave the interrupted state alone 3505 3506 return interrupted; 3507 } 3508 3509 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3510 ExtendedPC os::get_thread_pc(Thread* thread) { 3511 CONTEXT context; 3512 context.ContextFlags = CONTEXT_CONTROL; 3513 HANDLE handle = thread->osthread()->thread_handle(); 3514 if (GetThreadContext(handle, &context)) { 3515 #ifdef _M_AMD64 3516 return ExtendedPC((address) context.Rip); 3517 #else 3518 return ExtendedPC((address) context.Eip); 3519 #endif 3520 } else { 3521 return ExtendedPC(NULL); 3522 } 3523 } 3524 3525 // GetCurrentThreadId() returns DWORD 3526 intx os::current_thread_id() { return GetCurrentThreadId(); } 3527 3528 static int _initial_pid = 0; 3529 3530 int os::current_process_id() { 3531 return (_initial_pid ? _initial_pid : _getpid()); 3532 } 3533 3534 int os::win32::_vm_page_size = 0; 3535 int os::win32::_vm_allocation_granularity = 0; 3536 int os::win32::_processor_type = 0; 3537 // Processor level is not available on non-NT systems, use vm_version instead 3538 int os::win32::_processor_level = 0; 3539 julong os::win32::_physical_memory = 0; 3540 size_t os::win32::_default_stack_size = 0; 3541 3542 intx os::win32::_os_thread_limit = 0; 3543 volatile intx os::win32::_os_thread_count = 0; 3544 3545 bool os::win32::_is_windows_server = false; 3546 3547 // 6573254 3548 // Currently, the bug is observed across all the supported Windows releases, 3549 // including the latest one (as of this writing - Windows Server 2012 R2) 3550 bool os::win32::_has_exit_bug = true; 3551 3552 void os::win32::initialize_system_info() { 3553 SYSTEM_INFO si; 3554 GetSystemInfo(&si); 3555 _vm_page_size = si.dwPageSize; 3556 _vm_allocation_granularity = si.dwAllocationGranularity; 3557 _processor_type = si.dwProcessorType; 3558 _processor_level = si.wProcessorLevel; 3559 set_processor_count(si.dwNumberOfProcessors); 3560 3561 MEMORYSTATUSEX ms; 3562 ms.dwLength = sizeof(ms); 3563 3564 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3565 // dwMemoryLoad (% of memory in use) 3566 GlobalMemoryStatusEx(&ms); 3567 _physical_memory = ms.ullTotalPhys; 3568 3569 if (FLAG_IS_DEFAULT(MaxRAM)) { 3570 // Adjust MaxRAM according to the maximum virtual address space available. 3571 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3572 } 3573 3574 OSVERSIONINFOEX oi; 3575 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3576 GetVersionEx((OSVERSIONINFO*)&oi); 3577 switch (oi.dwPlatformId) { 3578 case VER_PLATFORM_WIN32_NT: 3579 { 3580 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3581 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3582 oi.wProductType == VER_NT_SERVER) { 3583 _is_windows_server = true; 3584 } 3585 } 3586 break; 3587 default: fatal("Unknown platform"); 3588 } 3589 3590 _default_stack_size = os::current_stack_size(); 3591 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3592 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3593 "stack size not a multiple of page size"); 3594 3595 initialize_performance_counter(); 3596 } 3597 3598 3599 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3600 int ebuflen) { 3601 char path[MAX_PATH]; 3602 DWORD size; 3603 DWORD pathLen = (DWORD)sizeof(path); 3604 HINSTANCE result = NULL; 3605 3606 // only allow library name without path component 3607 assert(strchr(name, '\\') == NULL, "path not allowed"); 3608 assert(strchr(name, ':') == NULL, "path not allowed"); 3609 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3610 jio_snprintf(ebuf, ebuflen, 3611 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3612 return NULL; 3613 } 3614 3615 // search system directory 3616 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3617 if (size >= pathLen) { 3618 return NULL; // truncated 3619 } 3620 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3621 return NULL; // truncated 3622 } 3623 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3624 return result; 3625 } 3626 } 3627 3628 // try Windows directory 3629 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3630 if (size >= pathLen) { 3631 return NULL; // truncated 3632 } 3633 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3634 return NULL; // truncated 3635 } 3636 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3637 return result; 3638 } 3639 } 3640 3641 jio_snprintf(ebuf, ebuflen, 3642 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3643 return NULL; 3644 } 3645 3646 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3647 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3648 3649 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3650 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3651 return TRUE; 3652 } 3653 3654 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3655 // Basic approach: 3656 // - Each exiting thread registers its intent to exit and then does so. 3657 // - A thread trying to terminate the process must wait for all 3658 // threads currently exiting to complete their exit. 3659 3660 if (os::win32::has_exit_bug()) { 3661 // The array holds handles of the threads that have started exiting by calling 3662 // _endthreadex(). 3663 // Should be large enough to avoid blocking the exiting thread due to lack of 3664 // a free slot. 3665 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3666 static int handle_count = 0; 3667 3668 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3669 static CRITICAL_SECTION crit_sect; 3670 static volatile jint process_exiting = 0; 3671 int i, j; 3672 DWORD res; 3673 HANDLE hproc, hthr; 3674 3675 // We only attempt to register threads until a process exiting 3676 // thread manages to set the process_exiting flag. Any threads 3677 // that come through here after the process_exiting flag is set 3678 // are unregistered and will be caught in the SuspendThread() 3679 // infinite loop below. 3680 bool registered = false; 3681 3682 // The first thread that reached this point, initializes the critical section. 3683 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3684 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3685 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3686 if (what != EPT_THREAD) { 3687 // Atomically set process_exiting before the critical section 3688 // to increase the visibility between racing threads. 3689 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3690 } 3691 EnterCriticalSection(&crit_sect); 3692 3693 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3694 // Remove from the array those handles of the threads that have completed exiting. 3695 for (i = 0, j = 0; i < handle_count; ++i) { 3696 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3697 if (res == WAIT_TIMEOUT) { 3698 handles[j++] = handles[i]; 3699 } else { 3700 if (res == WAIT_FAILED) { 3701 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3702 GetLastError(), __FILE__, __LINE__); 3703 } 3704 // Don't keep the handle, if we failed waiting for it. 3705 CloseHandle(handles[i]); 3706 } 3707 } 3708 3709 // If there's no free slot in the array of the kept handles, we'll have to 3710 // wait until at least one thread completes exiting. 3711 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3712 // Raise the priority of the oldest exiting thread to increase its chances 3713 // to complete sooner. 3714 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3715 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3716 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3717 i = (res - WAIT_OBJECT_0); 3718 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3719 for (; i < handle_count; ++i) { 3720 handles[i] = handles[i + 1]; 3721 } 3722 } else { 3723 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3724 (res == WAIT_FAILED ? "failed" : "timed out"), 3725 GetLastError(), __FILE__, __LINE__); 3726 // Don't keep handles, if we failed waiting for them. 3727 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3728 CloseHandle(handles[i]); 3729 } 3730 handle_count = 0; 3731 } 3732 } 3733 3734 // Store a duplicate of the current thread handle in the array of handles. 3735 hproc = GetCurrentProcess(); 3736 hthr = GetCurrentThread(); 3737 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3738 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3739 warning("DuplicateHandle failed (%u) in %s: %d\n", 3740 GetLastError(), __FILE__, __LINE__); 3741 3742 // We can't register this thread (no more handles) so this thread 3743 // may be racing with a thread that is calling exit(). If the thread 3744 // that is calling exit() has managed to set the process_exiting 3745 // flag, then this thread will be caught in the SuspendThread() 3746 // infinite loop below which closes that race. A small timing 3747 // window remains before the process_exiting flag is set, but it 3748 // is only exposed when we are out of handles. 3749 } else { 3750 ++handle_count; 3751 registered = true; 3752 3753 // The current exiting thread has stored its handle in the array, and now 3754 // should leave the critical section before calling _endthreadex(). 3755 } 3756 3757 } else if (what != EPT_THREAD && handle_count > 0) { 3758 jlong start_time, finish_time, timeout_left; 3759 // Before ending the process, make sure all the threads that had called 3760 // _endthreadex() completed. 3761 3762 // Set the priority level of the current thread to the same value as 3763 // the priority level of exiting threads. 3764 // This is to ensure it will be given a fair chance to execute if 3765 // the timeout expires. 3766 hthr = GetCurrentThread(); 3767 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3768 start_time = os::javaTimeNanos(); 3769 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3770 for (i = 0; ; ) { 3771 int portion_count = handle_count - i; 3772 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3773 portion_count = MAXIMUM_WAIT_OBJECTS; 3774 } 3775 for (j = 0; j < portion_count; ++j) { 3776 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3777 } 3778 timeout_left = (finish_time - start_time) / 1000000L; 3779 if (timeout_left < 0) { 3780 timeout_left = 0; 3781 } 3782 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3783 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3784 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3785 (res == WAIT_FAILED ? "failed" : "timed out"), 3786 GetLastError(), __FILE__, __LINE__); 3787 // Reset portion_count so we close the remaining 3788 // handles due to this error. 3789 portion_count = handle_count - i; 3790 } 3791 for (j = 0; j < portion_count; ++j) { 3792 CloseHandle(handles[i + j]); 3793 } 3794 if ((i += portion_count) >= handle_count) { 3795 break; 3796 } 3797 start_time = os::javaTimeNanos(); 3798 } 3799 handle_count = 0; 3800 } 3801 3802 LeaveCriticalSection(&crit_sect); 3803 } 3804 3805 if (!registered && 3806 OrderAccess::load_acquire(&process_exiting) != 0 && 3807 process_exiting != (jint)GetCurrentThreadId()) { 3808 // Some other thread is about to call exit(), so we don't let 3809 // the current unregistered thread proceed to exit() or _endthreadex() 3810 while (true) { 3811 SuspendThread(GetCurrentThread()); 3812 // Avoid busy-wait loop, if SuspendThread() failed. 3813 Sleep(EXIT_TIMEOUT); 3814 } 3815 } 3816 } 3817 3818 // We are here if either 3819 // - there's no 'race at exit' bug on this OS release; 3820 // - initialization of the critical section failed (unlikely); 3821 // - the current thread has registered itself and left the critical section; 3822 // - the process-exiting thread has raised the flag and left the critical section. 3823 if (what == EPT_THREAD) { 3824 _endthreadex((unsigned)exit_code); 3825 } else if (what == EPT_PROCESS) { 3826 ::exit(exit_code); 3827 } else { 3828 _exit(exit_code); 3829 } 3830 3831 // Should not reach here 3832 return exit_code; 3833 } 3834 3835 #undef EXIT_TIMEOUT 3836 3837 void os::win32::setmode_streams() { 3838 _setmode(_fileno(stdin), _O_BINARY); 3839 _setmode(_fileno(stdout), _O_BINARY); 3840 _setmode(_fileno(stderr), _O_BINARY); 3841 } 3842 3843 3844 bool os::is_debugger_attached() { 3845 return IsDebuggerPresent() ? true : false; 3846 } 3847 3848 3849 void os::wait_for_keypress_at_exit(void) { 3850 if (PauseAtExit) { 3851 fprintf(stderr, "Press any key to continue...\n"); 3852 fgetc(stdin); 3853 } 3854 } 3855 3856 3857 bool os::message_box(const char* title, const char* message) { 3858 int result = MessageBox(NULL, message, title, 3859 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3860 return result == IDYES; 3861 } 3862 3863 #ifndef PRODUCT 3864 #ifndef _WIN64 3865 // Helpers to check whether NX protection is enabled 3866 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3867 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3868 pex->ExceptionRecord->NumberParameters > 0 && 3869 pex->ExceptionRecord->ExceptionInformation[0] == 3870 EXCEPTION_INFO_EXEC_VIOLATION) { 3871 return EXCEPTION_EXECUTE_HANDLER; 3872 } 3873 return EXCEPTION_CONTINUE_SEARCH; 3874 } 3875 3876 void nx_check_protection() { 3877 // If NX is enabled we'll get an exception calling into code on the stack 3878 char code[] = { (char)0xC3 }; // ret 3879 void *code_ptr = (void *)code; 3880 __try { 3881 __asm call code_ptr 3882 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3883 tty->print_raw_cr("NX protection detected."); 3884 } 3885 } 3886 #endif // _WIN64 3887 #endif // PRODUCT 3888 3889 // This is called _before_ the global arguments have been parsed 3890 void os::init(void) { 3891 _initial_pid = _getpid(); 3892 3893 init_random(1234567); 3894 3895 win32::initialize_system_info(); 3896 win32::setmode_streams(); 3897 init_page_sizes((size_t) win32::vm_page_size()); 3898 3899 // This may be overridden later when argument processing is done. 3900 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3901 3902 // Initialize main_process and main_thread 3903 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3904 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3905 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3906 fatal("DuplicateHandle failed\n"); 3907 } 3908 main_thread_id = (int) GetCurrentThreadId(); 3909 3910 // initialize fast thread access - only used for 32-bit 3911 win32::initialize_thread_ptr_offset(); 3912 } 3913 3914 // To install functions for atexit processing 3915 extern "C" { 3916 static void perfMemory_exit_helper() { 3917 perfMemory_exit(); 3918 } 3919 } 3920 3921 static jint initSock(); 3922 3923 // this is called _after_ the global arguments have been parsed 3924 jint os::init_2(void) { 3925 // Allocate a single page and mark it as readable for safepoint polling 3926 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3927 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3928 3929 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3930 guarantee(return_page != NULL, "Commit Failed for polling page"); 3931 3932 os::set_polling_page(polling_page); 3933 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page)); 3934 3935 if (!UseMembar) { 3936 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3937 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3938 3939 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3940 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3941 3942 os::set_memory_serialize_page(mem_serialize_page); 3943 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); 3944 } 3945 3946 // Setup Windows Exceptions 3947 3948 // for debugging float code generation bugs 3949 if (ForceFloatExceptions) { 3950 #ifndef _WIN64 3951 static long fp_control_word = 0; 3952 __asm { fstcw fp_control_word } 3953 // see Intel PPro Manual, Vol. 2, p 7-16 3954 const long precision = 0x20; 3955 const long underflow = 0x10; 3956 const long overflow = 0x08; 3957 const long zero_div = 0x04; 3958 const long denorm = 0x02; 3959 const long invalid = 0x01; 3960 fp_control_word |= invalid; 3961 __asm { fldcw fp_control_word } 3962 #endif 3963 } 3964 3965 // If stack_commit_size is 0, windows will reserve the default size, 3966 // but only commit a small portion of it. 3967 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 3968 size_t default_reserve_size = os::win32::default_stack_size(); 3969 size_t actual_reserve_size = stack_commit_size; 3970 if (stack_commit_size < default_reserve_size) { 3971 // If stack_commit_size == 0, we want this too 3972 actual_reserve_size = default_reserve_size; 3973 } 3974 3975 // Check minimum allowable stack size for thread creation and to initialize 3976 // the java system classes, including StackOverflowError - depends on page 3977 // size. Add two 4K pages for compiler2 recursion in main thread. 3978 // Add in 4*BytesPerWord 4K pages to account for VM stack during 3979 // class initialization depending on 32 or 64 bit VM. 3980 size_t min_stack_allowed = 3981 (size_t)(JavaThread::stack_guard_zone_size() + 3982 JavaThread::stack_shadow_zone_size() + 3983 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 3984 3985 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 3986 3987 if (actual_reserve_size < min_stack_allowed) { 3988 tty->print_cr("\nThe Java thread stack size specified is too small. " 3989 "Specify at least %dk", 3990 min_stack_allowed / K); 3991 return JNI_ERR; 3992 } 3993 3994 JavaThread::set_stack_size_at_create(stack_commit_size); 3995 3996 // Calculate theoretical max. size of Threads to guard gainst artifical 3997 // out-of-memory situations, where all available address-space has been 3998 // reserved by thread stacks. 3999 assert(actual_reserve_size != 0, "Must have a stack"); 4000 4001 // Calculate the thread limit when we should start doing Virtual Memory 4002 // banging. Currently when the threads will have used all but 200Mb of space. 4003 // 4004 // TODO: consider performing a similar calculation for commit size instead 4005 // as reserve size, since on a 64-bit platform we'll run into that more 4006 // often than running out of virtual memory space. We can use the 4007 // lower value of the two calculations as the os_thread_limit. 4008 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4009 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4010 4011 // at exit methods are called in the reverse order of their registration. 4012 // there is no limit to the number of functions registered. atexit does 4013 // not set errno. 4014 4015 if (PerfAllowAtExitRegistration) { 4016 // only register atexit functions if PerfAllowAtExitRegistration is set. 4017 // atexit functions can be delayed until process exit time, which 4018 // can be problematic for embedded VM situations. Embedded VMs should 4019 // call DestroyJavaVM() to assure that VM resources are released. 4020 4021 // note: perfMemory_exit_helper atexit function may be removed in 4022 // the future if the appropriate cleanup code can be added to the 4023 // VM_Exit VMOperation's doit method. 4024 if (atexit(perfMemory_exit_helper) != 0) { 4025 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4026 } 4027 } 4028 4029 #ifndef _WIN64 4030 // Print something if NX is enabled (win32 on AMD64) 4031 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4032 #endif 4033 4034 // initialize thread priority policy 4035 prio_init(); 4036 4037 if (UseNUMA && !ForceNUMA) { 4038 UseNUMA = false; // We don't fully support this yet 4039 } 4040 4041 if (UseNUMAInterleaving) { 4042 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4043 bool success = numa_interleaving_init(); 4044 if (!success) UseNUMAInterleaving = false; 4045 } 4046 4047 if (initSock() != JNI_OK) { 4048 return JNI_ERR; 4049 } 4050 4051 return JNI_OK; 4052 } 4053 4054 // Mark the polling page as unreadable 4055 void os::make_polling_page_unreadable(void) { 4056 DWORD old_status; 4057 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4058 PAGE_NOACCESS, &old_status)) { 4059 fatal("Could not disable polling page"); 4060 } 4061 } 4062 4063 // Mark the polling page as readable 4064 void os::make_polling_page_readable(void) { 4065 DWORD old_status; 4066 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4067 PAGE_READONLY, &old_status)) { 4068 fatal("Could not enable polling page"); 4069 } 4070 } 4071 4072 4073 int os::stat(const char *path, struct stat *sbuf) { 4074 char pathbuf[MAX_PATH]; 4075 if (strlen(path) > MAX_PATH - 1) { 4076 errno = ENAMETOOLONG; 4077 return -1; 4078 } 4079 os::native_path(strcpy(pathbuf, path)); 4080 int ret = ::stat(pathbuf, sbuf); 4081 if (sbuf != NULL && UseUTCFileTimestamp) { 4082 // Fix for 6539723. st_mtime returned from stat() is dependent on 4083 // the system timezone and so can return different values for the 4084 // same file if/when daylight savings time changes. This adjustment 4085 // makes sure the same timestamp is returned regardless of the TZ. 4086 // 4087 // See: 4088 // http://msdn.microsoft.com/library/ 4089 // default.asp?url=/library/en-us/sysinfo/base/ 4090 // time_zone_information_str.asp 4091 // and 4092 // http://msdn.microsoft.com/library/default.asp?url= 4093 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4094 // 4095 // NOTE: there is a insidious bug here: If the timezone is changed 4096 // after the call to stat() but before 'GetTimeZoneInformation()', then 4097 // the adjustment we do here will be wrong and we'll return the wrong 4098 // value (which will likely end up creating an invalid class data 4099 // archive). Absent a better API for this, or some time zone locking 4100 // mechanism, we'll have to live with this risk. 4101 TIME_ZONE_INFORMATION tz; 4102 DWORD tzid = GetTimeZoneInformation(&tz); 4103 int daylightBias = 4104 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4105 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4106 } 4107 return ret; 4108 } 4109 4110 4111 #define FT2INT64(ft) \ 4112 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4113 4114 4115 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4116 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4117 // of a thread. 4118 // 4119 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4120 // the fast estimate available on the platform. 4121 4122 // current_thread_cpu_time() is not optimized for Windows yet 4123 jlong os::current_thread_cpu_time() { 4124 // return user + sys since the cost is the same 4125 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4126 } 4127 4128 jlong os::thread_cpu_time(Thread* thread) { 4129 // consistent with what current_thread_cpu_time() returns. 4130 return os::thread_cpu_time(thread, true /* user+sys */); 4131 } 4132 4133 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4134 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4135 } 4136 4137 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4138 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4139 // If this function changes, os::is_thread_cpu_time_supported() should too 4140 FILETIME CreationTime; 4141 FILETIME ExitTime; 4142 FILETIME KernelTime; 4143 FILETIME UserTime; 4144 4145 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4146 &ExitTime, &KernelTime, &UserTime) == 0) { 4147 return -1; 4148 } else if (user_sys_cpu_time) { 4149 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4150 } else { 4151 return FT2INT64(UserTime) * 100; 4152 } 4153 } 4154 4155 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4156 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4157 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4158 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4159 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4160 } 4161 4162 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4163 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4164 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4165 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4166 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4167 } 4168 4169 bool os::is_thread_cpu_time_supported() { 4170 // see os::thread_cpu_time 4171 FILETIME CreationTime; 4172 FILETIME ExitTime; 4173 FILETIME KernelTime; 4174 FILETIME UserTime; 4175 4176 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4177 &KernelTime, &UserTime) == 0) { 4178 return false; 4179 } else { 4180 return true; 4181 } 4182 } 4183 4184 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4185 // It does have primitives (PDH API) to get CPU usage and run queue length. 4186 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4187 // If we wanted to implement loadavg on Windows, we have a few options: 4188 // 4189 // a) Query CPU usage and run queue length and "fake" an answer by 4190 // returning the CPU usage if it's under 100%, and the run queue 4191 // length otherwise. It turns out that querying is pretty slow 4192 // on Windows, on the order of 200 microseconds on a fast machine. 4193 // Note that on the Windows the CPU usage value is the % usage 4194 // since the last time the API was called (and the first call 4195 // returns 100%), so we'd have to deal with that as well. 4196 // 4197 // b) Sample the "fake" answer using a sampling thread and store 4198 // the answer in a global variable. The call to loadavg would 4199 // just return the value of the global, avoiding the slow query. 4200 // 4201 // c) Sample a better answer using exponential decay to smooth the 4202 // value. This is basically the algorithm used by UNIX kernels. 4203 // 4204 // Note that sampling thread starvation could affect both (b) and (c). 4205 int os::loadavg(double loadavg[], int nelem) { 4206 return -1; 4207 } 4208 4209 4210 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4211 bool os::dont_yield() { 4212 return DontYieldALot; 4213 } 4214 4215 // This method is a slightly reworked copy of JDK's sysOpen 4216 // from src/windows/hpi/src/sys_api_md.c 4217 4218 int os::open(const char *path, int oflag, int mode) { 4219 char pathbuf[MAX_PATH]; 4220 4221 if (strlen(path) > MAX_PATH - 1) { 4222 errno = ENAMETOOLONG; 4223 return -1; 4224 } 4225 os::native_path(strcpy(pathbuf, path)); 4226 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4227 } 4228 4229 FILE* os::open(int fd, const char* mode) { 4230 return ::_fdopen(fd, mode); 4231 } 4232 4233 // Is a (classpath) directory empty? 4234 bool os::dir_is_empty(const char* path) { 4235 WIN32_FIND_DATA fd; 4236 HANDLE f = FindFirstFile(path, &fd); 4237 if (f == INVALID_HANDLE_VALUE) { 4238 return true; 4239 } 4240 FindClose(f); 4241 return false; 4242 } 4243 4244 // create binary file, rewriting existing file if required 4245 int os::create_binary_file(const char* path, bool rewrite_existing) { 4246 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4247 if (!rewrite_existing) { 4248 oflags |= _O_EXCL; 4249 } 4250 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4251 } 4252 4253 // return current position of file pointer 4254 jlong os::current_file_offset(int fd) { 4255 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4256 } 4257 4258 // move file pointer to the specified offset 4259 jlong os::seek_to_file_offset(int fd, jlong offset) { 4260 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4261 } 4262 4263 4264 jlong os::lseek(int fd, jlong offset, int whence) { 4265 return (jlong) ::_lseeki64(fd, offset, whence); 4266 } 4267 4268 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4269 OVERLAPPED ov; 4270 DWORD nread; 4271 BOOL result; 4272 4273 ZeroMemory(&ov, sizeof(ov)); 4274 ov.Offset = (DWORD)offset; 4275 ov.OffsetHigh = (DWORD)(offset >> 32); 4276 4277 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4278 4279 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4280 4281 return result ? nread : 0; 4282 } 4283 4284 4285 // This method is a slightly reworked copy of JDK's sysNativePath 4286 // from src/windows/hpi/src/path_md.c 4287 4288 // Convert a pathname to native format. On win32, this involves forcing all 4289 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4290 // sometimes rejects '/') and removing redundant separators. The input path is 4291 // assumed to have been converted into the character encoding used by the local 4292 // system. Because this might be a double-byte encoding, care is taken to 4293 // treat double-byte lead characters correctly. 4294 // 4295 // This procedure modifies the given path in place, as the result is never 4296 // longer than the original. There is no error return; this operation always 4297 // succeeds. 4298 char * os::native_path(char *path) { 4299 char *src = path, *dst = path, *end = path; 4300 char *colon = NULL; // If a drive specifier is found, this will 4301 // point to the colon following the drive letter 4302 4303 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4304 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4305 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4306 4307 // Check for leading separators 4308 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4309 while (isfilesep(*src)) { 4310 src++; 4311 } 4312 4313 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4314 // Remove leading separators if followed by drive specifier. This 4315 // hack is necessary to support file URLs containing drive 4316 // specifiers (e.g., "file://c:/path"). As a side effect, 4317 // "/c:/path" can be used as an alternative to "c:/path". 4318 *dst++ = *src++; 4319 colon = dst; 4320 *dst++ = ':'; 4321 src++; 4322 } else { 4323 src = path; 4324 if (isfilesep(src[0]) && isfilesep(src[1])) { 4325 // UNC pathname: Retain first separator; leave src pointed at 4326 // second separator so that further separators will be collapsed 4327 // into the second separator. The result will be a pathname 4328 // beginning with "\\\\" followed (most likely) by a host name. 4329 src = dst = path + 1; 4330 path[0] = '\\'; // Force first separator to '\\' 4331 } 4332 } 4333 4334 end = dst; 4335 4336 // Remove redundant separators from remainder of path, forcing all 4337 // separators to be '\\' rather than '/'. Also, single byte space 4338 // characters are removed from the end of the path because those 4339 // are not legal ending characters on this operating system. 4340 // 4341 while (*src != '\0') { 4342 if (isfilesep(*src)) { 4343 *dst++ = '\\'; src++; 4344 while (isfilesep(*src)) src++; 4345 if (*src == '\0') { 4346 // Check for trailing separator 4347 end = dst; 4348 if (colon == dst - 2) break; // "z:\\" 4349 if (dst == path + 1) break; // "\\" 4350 if (dst == path + 2 && isfilesep(path[0])) { 4351 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4352 // beginning of a UNC pathname. Even though it is not, by 4353 // itself, a valid UNC pathname, we leave it as is in order 4354 // to be consistent with the path canonicalizer as well 4355 // as the win32 APIs, which treat this case as an invalid 4356 // UNC pathname rather than as an alias for the root 4357 // directory of the current drive. 4358 break; 4359 } 4360 end = --dst; // Path does not denote a root directory, so 4361 // remove trailing separator 4362 break; 4363 } 4364 end = dst; 4365 } else { 4366 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4367 *dst++ = *src++; 4368 if (*src) *dst++ = *src++; 4369 end = dst; 4370 } else { // Copy a single-byte character 4371 char c = *src++; 4372 *dst++ = c; 4373 // Space is not a legal ending character 4374 if (c != ' ') end = dst; 4375 } 4376 } 4377 } 4378 4379 *end = '\0'; 4380 4381 // For "z:", add "." to work around a bug in the C runtime library 4382 if (colon == dst - 1) { 4383 path[2] = '.'; 4384 path[3] = '\0'; 4385 } 4386 4387 return path; 4388 } 4389 4390 // This code is a copy of JDK's sysSetLength 4391 // from src/windows/hpi/src/sys_api_md.c 4392 4393 int os::ftruncate(int fd, jlong length) { 4394 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4395 long high = (long)(length >> 32); 4396 DWORD ret; 4397 4398 if (h == (HANDLE)(-1)) { 4399 return -1; 4400 } 4401 4402 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4403 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4404 return -1; 4405 } 4406 4407 if (::SetEndOfFile(h) == FALSE) { 4408 return -1; 4409 } 4410 4411 return 0; 4412 } 4413 4414 int os::get_fileno(FILE* fp) { 4415 return _fileno(fp); 4416 } 4417 4418 // This code is a copy of JDK's sysSync 4419 // from src/windows/hpi/src/sys_api_md.c 4420 // except for the legacy workaround for a bug in Win 98 4421 4422 int os::fsync(int fd) { 4423 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4424 4425 if ((!::FlushFileBuffers(handle)) && 4426 (GetLastError() != ERROR_ACCESS_DENIED)) { 4427 // from winerror.h 4428 return -1; 4429 } 4430 return 0; 4431 } 4432 4433 static int nonSeekAvailable(int, long *); 4434 static int stdinAvailable(int, long *); 4435 4436 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4437 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4438 4439 // This code is a copy of JDK's sysAvailable 4440 // from src/windows/hpi/src/sys_api_md.c 4441 4442 int os::available(int fd, jlong *bytes) { 4443 jlong cur, end; 4444 struct _stati64 stbuf64; 4445 4446 if (::_fstati64(fd, &stbuf64) >= 0) { 4447 int mode = stbuf64.st_mode; 4448 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4449 int ret; 4450 long lpbytes; 4451 if (fd == 0) { 4452 ret = stdinAvailable(fd, &lpbytes); 4453 } else { 4454 ret = nonSeekAvailable(fd, &lpbytes); 4455 } 4456 (*bytes) = (jlong)(lpbytes); 4457 return ret; 4458 } 4459 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4460 return FALSE; 4461 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4462 return FALSE; 4463 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4464 return FALSE; 4465 } 4466 *bytes = end - cur; 4467 return TRUE; 4468 } else { 4469 return FALSE; 4470 } 4471 } 4472 4473 void os::flockfile(FILE* fp) { 4474 _lock_file(fp); 4475 } 4476 4477 void os::funlockfile(FILE* fp) { 4478 _unlock_file(fp); 4479 } 4480 4481 // This code is a copy of JDK's nonSeekAvailable 4482 // from src/windows/hpi/src/sys_api_md.c 4483 4484 static int nonSeekAvailable(int fd, long *pbytes) { 4485 // This is used for available on non-seekable devices 4486 // (like both named and anonymous pipes, such as pipes 4487 // connected to an exec'd process). 4488 // Standard Input is a special case. 4489 HANDLE han; 4490 4491 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4492 return FALSE; 4493 } 4494 4495 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4496 // PeekNamedPipe fails when at EOF. In that case we 4497 // simply make *pbytes = 0 which is consistent with the 4498 // behavior we get on Solaris when an fd is at EOF. 4499 // The only alternative is to raise an Exception, 4500 // which isn't really warranted. 4501 // 4502 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4503 return FALSE; 4504 } 4505 *pbytes = 0; 4506 } 4507 return TRUE; 4508 } 4509 4510 #define MAX_INPUT_EVENTS 2000 4511 4512 // This code is a copy of JDK's stdinAvailable 4513 // from src/windows/hpi/src/sys_api_md.c 4514 4515 static int stdinAvailable(int fd, long *pbytes) { 4516 HANDLE han; 4517 DWORD numEventsRead = 0; // Number of events read from buffer 4518 DWORD numEvents = 0; // Number of events in buffer 4519 DWORD i = 0; // Loop index 4520 DWORD curLength = 0; // Position marker 4521 DWORD actualLength = 0; // Number of bytes readable 4522 BOOL error = FALSE; // Error holder 4523 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4524 4525 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4526 return FALSE; 4527 } 4528 4529 // Construct an array of input records in the console buffer 4530 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4531 if (error == 0) { 4532 return nonSeekAvailable(fd, pbytes); 4533 } 4534 4535 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4536 if (numEvents > MAX_INPUT_EVENTS) { 4537 numEvents = MAX_INPUT_EVENTS; 4538 } 4539 4540 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4541 if (lpBuffer == NULL) { 4542 return FALSE; 4543 } 4544 4545 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4546 if (error == 0) { 4547 os::free(lpBuffer); 4548 return FALSE; 4549 } 4550 4551 // Examine input records for the number of bytes available 4552 for (i=0; i<numEvents; i++) { 4553 if (lpBuffer[i].EventType == KEY_EVENT) { 4554 4555 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4556 &(lpBuffer[i].Event); 4557 if (keyRecord->bKeyDown == TRUE) { 4558 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4559 curLength++; 4560 if (*keyPressed == '\r') { 4561 actualLength = curLength; 4562 } 4563 } 4564 } 4565 } 4566 4567 if (lpBuffer != NULL) { 4568 os::free(lpBuffer); 4569 } 4570 4571 *pbytes = (long) actualLength; 4572 return TRUE; 4573 } 4574 4575 // Map a block of memory. 4576 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4577 char *addr, size_t bytes, bool read_only, 4578 bool allow_exec) { 4579 HANDLE hFile; 4580 char* base; 4581 4582 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4583 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4584 if (hFile == NULL) { 4585 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4586 return NULL; 4587 } 4588 4589 if (allow_exec) { 4590 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4591 // unless it comes from a PE image (which the shared archive is not.) 4592 // Even VirtualProtect refuses to give execute access to mapped memory 4593 // that was not previously executable. 4594 // 4595 // Instead, stick the executable region in anonymous memory. Yuck. 4596 // Penalty is that ~4 pages will not be shareable - in the future 4597 // we might consider DLLizing the shared archive with a proper PE 4598 // header so that mapping executable + sharing is possible. 4599 4600 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4601 PAGE_READWRITE); 4602 if (base == NULL) { 4603 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4604 CloseHandle(hFile); 4605 return NULL; 4606 } 4607 4608 DWORD bytes_read; 4609 OVERLAPPED overlapped; 4610 overlapped.Offset = (DWORD)file_offset; 4611 overlapped.OffsetHigh = 0; 4612 overlapped.hEvent = NULL; 4613 // ReadFile guarantees that if the return value is true, the requested 4614 // number of bytes were read before returning. 4615 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4616 if (!res) { 4617 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4618 release_memory(base, bytes); 4619 CloseHandle(hFile); 4620 return NULL; 4621 } 4622 } else { 4623 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4624 NULL /* file_name */); 4625 if (hMap == NULL) { 4626 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4627 CloseHandle(hFile); 4628 return NULL; 4629 } 4630 4631 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4632 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4633 (DWORD)bytes, addr); 4634 if (base == NULL) { 4635 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4636 CloseHandle(hMap); 4637 CloseHandle(hFile); 4638 return NULL; 4639 } 4640 4641 if (CloseHandle(hMap) == 0) { 4642 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4643 CloseHandle(hFile); 4644 return base; 4645 } 4646 } 4647 4648 if (allow_exec) { 4649 DWORD old_protect; 4650 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4651 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4652 4653 if (!res) { 4654 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4655 // Don't consider this a hard error, on IA32 even if the 4656 // VirtualProtect fails, we should still be able to execute 4657 CloseHandle(hFile); 4658 return base; 4659 } 4660 } 4661 4662 if (CloseHandle(hFile) == 0) { 4663 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4664 return base; 4665 } 4666 4667 return base; 4668 } 4669 4670 4671 // Remap a block of memory. 4672 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4673 char *addr, size_t bytes, bool read_only, 4674 bool allow_exec) { 4675 // This OS does not allow existing memory maps to be remapped so we 4676 // have to unmap the memory before we remap it. 4677 if (!os::unmap_memory(addr, bytes)) { 4678 return NULL; 4679 } 4680 4681 // There is a very small theoretical window between the unmap_memory() 4682 // call above and the map_memory() call below where a thread in native 4683 // code may be able to access an address that is no longer mapped. 4684 4685 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4686 read_only, allow_exec); 4687 } 4688 4689 4690 // Unmap a block of memory. 4691 // Returns true=success, otherwise false. 4692 4693 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4694 MEMORY_BASIC_INFORMATION mem_info; 4695 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4696 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4697 return false; 4698 } 4699 4700 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4701 // Instead, executable region was allocated using VirtualAlloc(). See 4702 // pd_map_memory() above. 4703 // 4704 // The following flags should match the 'exec_access' flages used for 4705 // VirtualProtect() in pd_map_memory(). 4706 if (mem_info.Protect == PAGE_EXECUTE_READ || 4707 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4708 return pd_release_memory(addr, bytes); 4709 } 4710 4711 BOOL result = UnmapViewOfFile(addr); 4712 if (result == 0) { 4713 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4714 return false; 4715 } 4716 return true; 4717 } 4718 4719 void os::pause() { 4720 char filename[MAX_PATH]; 4721 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4722 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4723 } else { 4724 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4725 } 4726 4727 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4728 if (fd != -1) { 4729 struct stat buf; 4730 ::close(fd); 4731 while (::stat(filename, &buf) == 0) { 4732 Sleep(100); 4733 } 4734 } else { 4735 jio_fprintf(stderr, 4736 "Could not open pause file '%s', continuing immediately.\n", filename); 4737 } 4738 } 4739 4740 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4741 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4742 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4743 4744 os::ThreadCrashProtection::ThreadCrashProtection() { 4745 } 4746 4747 // See the caveats for this class in os_windows.hpp 4748 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4749 // into this method and returns false. If no OS EXCEPTION was raised, returns 4750 // true. 4751 // The callback is supposed to provide the method that should be protected. 4752 // 4753 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4754 4755 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4756 4757 _protected_thread = Thread::current_or_null(); 4758 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4759 4760 bool success = true; 4761 __try { 4762 _crash_protection = this; 4763 cb.call(); 4764 } __except(EXCEPTION_EXECUTE_HANDLER) { 4765 // only for protection, nothing to do 4766 success = false; 4767 } 4768 _crash_protection = NULL; 4769 _protected_thread = NULL; 4770 Thread::muxRelease(&_crash_mux); 4771 return success; 4772 } 4773 4774 // An Event wraps a win32 "CreateEvent" kernel handle. 4775 // 4776 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4777 // 4778 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4779 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4780 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4781 // In addition, an unpark() operation might fetch the handle field, but the 4782 // event could recycle between the fetch and the SetEvent() operation. 4783 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4784 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4785 // on an stale but recycled handle would be harmless, but in practice this might 4786 // confuse other non-Sun code, so it's not a viable approach. 4787 // 4788 // 2: Once a win32 event handle is associated with an Event, it remains associated 4789 // with the Event. The event handle is never closed. This could be construed 4790 // as handle leakage, but only up to the maximum # of threads that have been extant 4791 // at any one time. This shouldn't be an issue, as windows platforms typically 4792 // permit a process to have hundreds of thousands of open handles. 4793 // 4794 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4795 // and release unused handles. 4796 // 4797 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4798 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4799 // 4800 // 5. Use an RCU-like mechanism (Read-Copy Update). 4801 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4802 // 4803 // We use (2). 4804 // 4805 // TODO-FIXME: 4806 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4807 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4808 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4809 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4810 // into a single win32 CreateEvent() handle. 4811 // 4812 // Assumption: 4813 // Only one parker can exist on an event, which is why we allocate 4814 // them per-thread. Multiple unparkers can coexist. 4815 // 4816 // _Event transitions in park() 4817 // -1 => -1 : illegal 4818 // 1 => 0 : pass - return immediately 4819 // 0 => -1 : block; then set _Event to 0 before returning 4820 // 4821 // _Event transitions in unpark() 4822 // 0 => 1 : just return 4823 // 1 => 1 : just return 4824 // -1 => either 0 or 1; must signal target thread 4825 // That is, we can safely transition _Event from -1 to either 4826 // 0 or 1. 4827 // 4828 // _Event serves as a restricted-range semaphore. 4829 // -1 : thread is blocked, i.e. there is a waiter 4830 // 0 : neutral: thread is running or ready, 4831 // could have been signaled after a wait started 4832 // 1 : signaled - thread is running or ready 4833 // 4834 // Another possible encoding of _Event would be with 4835 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4836 // 4837 4838 int os::PlatformEvent::park(jlong Millis) { 4839 // Transitions for _Event: 4840 // -1 => -1 : illegal 4841 // 1 => 0 : pass - return immediately 4842 // 0 => -1 : block; then set _Event to 0 before returning 4843 4844 guarantee(_ParkHandle != NULL , "Invariant"); 4845 guarantee(Millis > 0 , "Invariant"); 4846 4847 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4848 // the initial park() operation. 4849 // Consider: use atomic decrement instead of CAS-loop 4850 4851 int v; 4852 for (;;) { 4853 v = _Event; 4854 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4855 } 4856 guarantee((v == 0) || (v == 1), "invariant"); 4857 if (v != 0) return OS_OK; 4858 4859 // Do this the hard way by blocking ... 4860 // TODO: consider a brief spin here, gated on the success of recent 4861 // spin attempts by this thread. 4862 // 4863 // We decompose long timeouts into series of shorter timed waits. 4864 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4865 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4866 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4867 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4868 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4869 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4870 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4871 // for the already waited time. This policy does not admit any new outcomes. 4872 // In the future, however, we might want to track the accumulated wait time and 4873 // adjust Millis accordingly if we encounter a spurious wakeup. 4874 4875 const int MAXTIMEOUT = 0x10000000; 4876 DWORD rv = WAIT_TIMEOUT; 4877 while (_Event < 0 && Millis > 0) { 4878 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4879 if (Millis > MAXTIMEOUT) { 4880 prd = MAXTIMEOUT; 4881 } 4882 rv = ::WaitForSingleObject(_ParkHandle, prd); 4883 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4884 if (rv == WAIT_TIMEOUT) { 4885 Millis -= prd; 4886 } 4887 } 4888 v = _Event; 4889 _Event = 0; 4890 // see comment at end of os::PlatformEvent::park() below: 4891 OrderAccess::fence(); 4892 // If we encounter a nearly simultanous timeout expiry and unpark() 4893 // we return OS_OK indicating we awoke via unpark(). 4894 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4895 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4896 } 4897 4898 void os::PlatformEvent::park() { 4899 // Transitions for _Event: 4900 // -1 => -1 : illegal 4901 // 1 => 0 : pass - return immediately 4902 // 0 => -1 : block; then set _Event to 0 before returning 4903 4904 guarantee(_ParkHandle != NULL, "Invariant"); 4905 // Invariant: Only the thread associated with the Event/PlatformEvent 4906 // may call park(). 4907 // Consider: use atomic decrement instead of CAS-loop 4908 int v; 4909 for (;;) { 4910 v = _Event; 4911 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4912 } 4913 guarantee((v == 0) || (v == 1), "invariant"); 4914 if (v != 0) return; 4915 4916 // Do this the hard way by blocking ... 4917 // TODO: consider a brief spin here, gated on the success of recent 4918 // spin attempts by this thread. 4919 while (_Event < 0) { 4920 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4921 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4922 } 4923 4924 // Usually we'll find _Event == 0 at this point, but as 4925 // an optional optimization we clear it, just in case can 4926 // multiple unpark() operations drove _Event up to 1. 4927 _Event = 0; 4928 OrderAccess::fence(); 4929 guarantee(_Event >= 0, "invariant"); 4930 } 4931 4932 void os::PlatformEvent::unpark() { 4933 guarantee(_ParkHandle != NULL, "Invariant"); 4934 4935 // Transitions for _Event: 4936 // 0 => 1 : just return 4937 // 1 => 1 : just return 4938 // -1 => either 0 or 1; must signal target thread 4939 // That is, we can safely transition _Event from -1 to either 4940 // 0 or 1. 4941 // See also: "Semaphores in Plan 9" by Mullender & Cox 4942 // 4943 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4944 // that it will take two back-to-back park() calls for the owning 4945 // thread to block. This has the benefit of forcing a spurious return 4946 // from the first park() call after an unpark() call which will help 4947 // shake out uses of park() and unpark() without condition variables. 4948 4949 if (Atomic::xchg(1, &_Event) >= 0) return; 4950 4951 ::SetEvent(_ParkHandle); 4952 } 4953 4954 4955 // JSR166 4956 // ------------------------------------------------------- 4957 4958 // The Windows implementation of Park is very straightforward: Basic 4959 // operations on Win32 Events turn out to have the right semantics to 4960 // use them directly. We opportunistically resuse the event inherited 4961 // from Monitor. 4962 4963 void Parker::park(bool isAbsolute, jlong time) { 4964 guarantee(_ParkEvent != NULL, "invariant"); 4965 // First, demultiplex/decode time arguments 4966 if (time < 0) { // don't wait 4967 return; 4968 } else if (time == 0 && !isAbsolute) { 4969 time = INFINITE; 4970 } else if (isAbsolute) { 4971 time -= os::javaTimeMillis(); // convert to relative time 4972 if (time <= 0) { // already elapsed 4973 return; 4974 } 4975 } else { // relative 4976 time /= 1000000; // Must coarsen from nanos to millis 4977 if (time == 0) { // Wait for the minimal time unit if zero 4978 time = 1; 4979 } 4980 } 4981 4982 JavaThread* thread = JavaThread::current(); 4983 4984 // Don't wait if interrupted or already triggered 4985 if (Thread::is_interrupted(thread, false) || 4986 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4987 ResetEvent(_ParkEvent); 4988 return; 4989 } else { 4990 ThreadBlockInVM tbivm(thread); 4991 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4992 thread->set_suspend_equivalent(); 4993 4994 WaitForSingleObject(_ParkEvent, time); 4995 ResetEvent(_ParkEvent); 4996 4997 // If externally suspended while waiting, re-suspend 4998 if (thread->handle_special_suspend_equivalent_condition()) { 4999 thread->java_suspend_self(); 5000 } 5001 } 5002 } 5003 5004 void Parker::unpark() { 5005 guarantee(_ParkEvent != NULL, "invariant"); 5006 SetEvent(_ParkEvent); 5007 } 5008 5009 // Run the specified command in a separate process. Return its exit value, 5010 // or -1 on failure (e.g. can't create a new process). 5011 int os::fork_and_exec(char* cmd) { 5012 STARTUPINFO si; 5013 PROCESS_INFORMATION pi; 5014 DWORD exit_code; 5015 5016 char * cmd_string; 5017 char * cmd_prefix = "cmd /C "; 5018 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5019 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5020 if (cmd_string == NULL) { 5021 return -1; 5022 } 5023 cmd_string[0] = '\0'; 5024 strcat(cmd_string, cmd_prefix); 5025 strcat(cmd_string, cmd); 5026 5027 // now replace all '\n' with '&' 5028 char * substring = cmd_string; 5029 while ((substring = strchr(substring, '\n')) != NULL) { 5030 substring[0] = '&'; 5031 substring++; 5032 } 5033 memset(&si, 0, sizeof(si)); 5034 si.cb = sizeof(si); 5035 memset(&pi, 0, sizeof(pi)); 5036 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5037 cmd_string, // command line 5038 NULL, // process security attribute 5039 NULL, // thread security attribute 5040 TRUE, // inherits system handles 5041 0, // no creation flags 5042 NULL, // use parent's environment block 5043 NULL, // use parent's starting directory 5044 &si, // (in) startup information 5045 &pi); // (out) process information 5046 5047 if (rslt) { 5048 // Wait until child process exits. 5049 WaitForSingleObject(pi.hProcess, INFINITE); 5050 5051 GetExitCodeProcess(pi.hProcess, &exit_code); 5052 5053 // Close process and thread handles. 5054 CloseHandle(pi.hProcess); 5055 CloseHandle(pi.hThread); 5056 } else { 5057 exit_code = -1; 5058 } 5059 5060 FREE_C_HEAP_ARRAY(char, cmd_string); 5061 return (int)exit_code; 5062 } 5063 5064 bool os::find(address addr, outputStream* st) { 5065 int offset = -1; 5066 bool result = false; 5067 char buf[256]; 5068 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5069 st->print(PTR_FORMAT " ", addr); 5070 if (strlen(buf) < sizeof(buf) - 1) { 5071 char* p = strrchr(buf, '\\'); 5072 if (p) { 5073 st->print("%s", p + 1); 5074 } else { 5075 st->print("%s", buf); 5076 } 5077 } else { 5078 // The library name is probably truncated. Let's omit the library name. 5079 // See also JDK-8147512. 5080 } 5081 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5082 st->print("::%s + 0x%x", buf, offset); 5083 } 5084 st->cr(); 5085 result = true; 5086 } 5087 return result; 5088 } 5089 5090 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5091 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5092 5093 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5094 JavaThread* thread = JavaThread::current(); 5095 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5096 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5097 5098 if (os::is_memory_serialize_page(thread, addr)) { 5099 return EXCEPTION_CONTINUE_EXECUTION; 5100 } 5101 } 5102 5103 return EXCEPTION_CONTINUE_SEARCH; 5104 } 5105 5106 // We don't build a headless jre for Windows 5107 bool os::is_headless_jre() { return false; } 5108 5109 static jint initSock() { 5110 WSADATA wsadata; 5111 5112 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5113 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5114 ::GetLastError()); 5115 return JNI_ERR; 5116 } 5117 return JNI_OK; 5118 } 5119 5120 struct hostent* os::get_host_by_name(char* name) { 5121 return (struct hostent*)gethostbyname(name); 5122 } 5123 5124 int os::socket_close(int fd) { 5125 return ::closesocket(fd); 5126 } 5127 5128 int os::socket(int domain, int type, int protocol) { 5129 return ::socket(domain, type, protocol); 5130 } 5131 5132 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5133 return ::connect(fd, him, len); 5134 } 5135 5136 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5137 return ::recv(fd, buf, (int)nBytes, flags); 5138 } 5139 5140 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5141 return ::send(fd, buf, (int)nBytes, flags); 5142 } 5143 5144 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5145 return ::send(fd, buf, (int)nBytes, flags); 5146 } 5147 5148 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5149 #if defined(IA32) 5150 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5151 #elif defined (AMD64) 5152 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5153 #endif 5154 5155 // returns true if thread could be suspended, 5156 // false otherwise 5157 static bool do_suspend(HANDLE* h) { 5158 if (h != NULL) { 5159 if (SuspendThread(*h) != ~0) { 5160 return true; 5161 } 5162 } 5163 return false; 5164 } 5165 5166 // resume the thread 5167 // calling resume on an active thread is a no-op 5168 static void do_resume(HANDLE* h) { 5169 if (h != NULL) { 5170 ResumeThread(*h); 5171 } 5172 } 5173 5174 // retrieve a suspend/resume context capable handle 5175 // from the tid. Caller validates handle return value. 5176 void get_thread_handle_for_extended_context(HANDLE* h, 5177 OSThread::thread_id_t tid) { 5178 if (h != NULL) { 5179 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5180 } 5181 } 5182 5183 // Thread sampling implementation 5184 // 5185 void os::SuspendedThreadTask::internal_do_task() { 5186 CONTEXT ctxt; 5187 HANDLE h = NULL; 5188 5189 // get context capable handle for thread 5190 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5191 5192 // sanity 5193 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5194 return; 5195 } 5196 5197 // suspend the thread 5198 if (do_suspend(&h)) { 5199 ctxt.ContextFlags = sampling_context_flags; 5200 // get thread context 5201 GetThreadContext(h, &ctxt); 5202 SuspendedThreadTaskContext context(_thread, &ctxt); 5203 // pass context to Thread Sampling impl 5204 do_task(context); 5205 // resume thread 5206 do_resume(&h); 5207 } 5208 5209 // close handle 5210 CloseHandle(h); 5211 } 5212 5213 bool os::start_debugging(char *buf, int buflen) { 5214 int len = (int)strlen(buf); 5215 char *p = &buf[len]; 5216 5217 jio_snprintf(p, buflen-len, 5218 "\n\n" 5219 "Do you want to debug the problem?\n\n" 5220 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5221 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5222 "Otherwise, select 'No' to abort...", 5223 os::current_process_id(), os::current_thread_id()); 5224 5225 bool yes = os::message_box("Unexpected Error", buf); 5226 5227 if (yes) { 5228 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5229 // exception. If VM is running inside a debugger, the debugger will 5230 // catch the exception. Otherwise, the breakpoint exception will reach 5231 // the default windows exception handler, which can spawn a debugger and 5232 // automatically attach to the dying VM. 5233 os::breakpoint(); 5234 yes = false; 5235 } 5236 return yes; 5237 } 5238 5239 void* os::get_default_process_handle() { 5240 return (void*)GetModuleHandle(NULL); 5241 } 5242 5243 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5244 // which is used to find statically linked in agents. 5245 // Additionally for windows, takes into account __stdcall names. 5246 // Parameters: 5247 // sym_name: Symbol in library we are looking for 5248 // lib_name: Name of library to look in, NULL for shared libs. 5249 // is_absolute_path == true if lib_name is absolute path to agent 5250 // such as "C:/a/b/L.dll" 5251 // == false if only the base name of the library is passed in 5252 // such as "L" 5253 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5254 bool is_absolute_path) { 5255 char *agent_entry_name; 5256 size_t len; 5257 size_t name_len; 5258 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5259 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5260 const char *start; 5261 5262 if (lib_name != NULL) { 5263 len = name_len = strlen(lib_name); 5264 if (is_absolute_path) { 5265 // Need to strip path, prefix and suffix 5266 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5267 lib_name = ++start; 5268 } else { 5269 // Need to check for drive prefix 5270 if ((start = strchr(lib_name, ':')) != NULL) { 5271 lib_name = ++start; 5272 } 5273 } 5274 if (len <= (prefix_len + suffix_len)) { 5275 return NULL; 5276 } 5277 lib_name += prefix_len; 5278 name_len = strlen(lib_name) - suffix_len; 5279 } 5280 } 5281 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5282 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5283 if (agent_entry_name == NULL) { 5284 return NULL; 5285 } 5286 if (lib_name != NULL) { 5287 const char *p = strrchr(sym_name, '@'); 5288 if (p != NULL && p != sym_name) { 5289 // sym_name == _Agent_OnLoad@XX 5290 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5291 agent_entry_name[(p-sym_name)] = '\0'; 5292 // agent_entry_name == _Agent_OnLoad 5293 strcat(agent_entry_name, "_"); 5294 strncat(agent_entry_name, lib_name, name_len); 5295 strcat(agent_entry_name, p); 5296 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5297 } else { 5298 strcpy(agent_entry_name, sym_name); 5299 strcat(agent_entry_name, "_"); 5300 strncat(agent_entry_name, lib_name, name_len); 5301 } 5302 } else { 5303 strcpy(agent_entry_name, sym_name); 5304 } 5305 return agent_entry_name; 5306 } 5307 5308 #ifndef PRODUCT 5309 5310 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5311 // contiguous memory block at a particular address. 5312 // The test first tries to find a good approximate address to allocate at by using the same 5313 // method to allocate some memory at any address. The test then tries to allocate memory in 5314 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5315 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5316 // the previously allocated memory is available for allocation. The only actual failure 5317 // that is reported is when the test tries to allocate at a particular location but gets a 5318 // different valid one. A NULL return value at this point is not considered an error but may 5319 // be legitimate. 5320 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5321 void TestReserveMemorySpecial_test() { 5322 if (!UseLargePages) { 5323 if (VerboseInternalVMTests) { 5324 tty->print("Skipping test because large pages are disabled"); 5325 } 5326 return; 5327 } 5328 // save current value of globals 5329 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5330 bool old_use_numa_interleaving = UseNUMAInterleaving; 5331 5332 // set globals to make sure we hit the correct code path 5333 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5334 5335 // do an allocation at an address selected by the OS to get a good one. 5336 const size_t large_allocation_size = os::large_page_size() * 4; 5337 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5338 if (result == NULL) { 5339 if (VerboseInternalVMTests) { 5340 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5341 large_allocation_size); 5342 } 5343 } else { 5344 os::release_memory_special(result, large_allocation_size); 5345 5346 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5347 // we managed to get it once. 5348 const size_t expected_allocation_size = os::large_page_size(); 5349 char* expected_location = result + os::large_page_size(); 5350 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5351 if (actual_location == NULL) { 5352 if (VerboseInternalVMTests) { 5353 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5354 expected_location, large_allocation_size); 5355 } 5356 } else { 5357 // release memory 5358 os::release_memory_special(actual_location, expected_allocation_size); 5359 // only now check, after releasing any memory to avoid any leaks. 5360 assert(actual_location == expected_location, 5361 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5362 expected_location, expected_allocation_size, actual_location); 5363 } 5364 } 5365 5366 // restore globals 5367 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5368 UseNUMAInterleaving = old_use_numa_interleaving; 5369 } 5370 #endif // PRODUCT 5371 5372 /* 5373 All the defined signal names for Windows. 5374 5375 NOTE that not all of these names are accepted by FindSignal! 5376 5377 For various reasons some of these may be rejected at runtime. 5378 5379 Here are the names currently accepted by a user of sun.misc.Signal with 5380 1.4.1 (ignoring potential interaction with use of chaining, etc): 5381 5382 (LIST TBD) 5383 5384 */ 5385 int os::get_signal_number(const char* name) { 5386 static const struct { 5387 char* name; 5388 int number; 5389 } siglabels [] = 5390 // derived from version 6.0 VC98/include/signal.h 5391 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5392 "FPE", SIGFPE, // floating point exception 5393 "SEGV", SIGSEGV, // segment violation 5394 "INT", SIGINT, // interrupt 5395 "TERM", SIGTERM, // software term signal from kill 5396 "BREAK", SIGBREAK, // Ctrl-Break sequence 5397 "ILL", SIGILL}; // illegal instruction 5398 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5399 if (strcmp(name, siglabels[i].name) == 0) { 5400 return siglabels[i].number; 5401 } 5402 } 5403 return -1; 5404 } 5405 5406 // Fast current thread access 5407 5408 int os::win32::_thread_ptr_offset = 0; 5409 5410 static void call_wrapper_dummy() {} 5411 5412 // We need to call the os_exception_wrapper once so that it sets 5413 // up the offset from FS of the thread pointer. 5414 void os::win32::initialize_thread_ptr_offset() { 5415 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5416 NULL, NULL, NULL, NULL); 5417 }