rev 13529 : 8185712: [windows] Improve native symbol decoder Reviewed-by:
1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/decoder.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/events.hpp" 74 #include "utilities/growableArray.hpp" 75 #include "utilities/macros.hpp" 76 #include "utilities/vmError.hpp" 77 #include "symbolengine.hpp" 78 #include "windbghelp.hpp" 79 80 81 #ifdef _DEBUG 82 #include <crtdbg.h> 83 #endif 84 85 86 #include <windows.h> 87 #include <sys/types.h> 88 #include <sys/stat.h> 89 #include <sys/timeb.h> 90 #include <objidl.h> 91 #include <shlobj.h> 92 93 #include <malloc.h> 94 #include <signal.h> 95 #include <direct.h> 96 #include <errno.h> 97 #include <fcntl.h> 98 #include <io.h> 99 #include <process.h> // For _beginthreadex(), _endthreadex() 100 #include <imagehlp.h> // For os::dll_address_to_function_name 101 // for enumerating dll libraries 102 #include <vdmdbg.h> 103 104 // for timer info max values which include all bits 105 #define ALL_64_BITS CONST64(-1) 106 107 // For DLL loading/load error detection 108 // Values of PE COFF 109 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 110 #define IMAGE_FILE_SIGNATURE_LENGTH 4 111 112 static HANDLE main_process; 113 static HANDLE main_thread; 114 static int main_thread_id; 115 116 static FILETIME process_creation_time; 117 static FILETIME process_exit_time; 118 static FILETIME process_user_time; 119 static FILETIME process_kernel_time; 120 121 #ifdef _M_AMD64 122 #define __CPU__ amd64 123 #else 124 #define __CPU__ i486 125 #endif 126 127 // save DLL module handle, used by GetModuleFileName 128 129 HINSTANCE vm_lib_handle; 130 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 132 switch (reason) { 133 case DLL_PROCESS_ATTACH: 134 vm_lib_handle = hinst; 135 if (ForceTimeHighResolution) { 136 timeBeginPeriod(1L); 137 } 138 WindowsDbgHelp::pre_initialize(); 139 SymbolEngine::pre_initialize(); 140 break; 141 case DLL_PROCESS_DETACH: 142 if (ForceTimeHighResolution) { 143 timeEndPeriod(1L); 144 } 145 break; 146 default: 147 break; 148 } 149 return true; 150 } 151 152 static inline double fileTimeAsDouble(FILETIME* time) { 153 const double high = (double) ((unsigned int) ~0); 154 const double split = 10000000.0; 155 double result = (time->dwLowDateTime / split) + 156 time->dwHighDateTime * (high/split); 157 return result; 158 } 159 160 // Implementation of os 161 162 bool os::unsetenv(const char* name) { 163 assert(name != NULL, "Null pointer"); 164 return (SetEnvironmentVariable(name, NULL) == TRUE); 165 } 166 167 // No setuid programs under Windows. 168 bool os::have_special_privileges() { 169 return false; 170 } 171 172 173 // This method is a periodic task to check for misbehaving JNI applications 174 // under CheckJNI, we can add any periodic checks here. 175 // For Windows at the moment does nothing 176 void os::run_periodic_checks() { 177 return; 178 } 179 180 // previous UnhandledExceptionFilter, if there is one 181 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 182 183 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 184 185 void os::init_system_properties_values() { 186 // sysclasspath, java_home, dll_dir 187 { 188 char *home_path; 189 char *dll_path; 190 char *pslash; 191 char *bin = "\\bin"; 192 char home_dir[MAX_PATH + 1]; 193 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 194 195 if (alt_home_dir != NULL) { 196 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 197 home_dir[MAX_PATH] = '\0'; 198 } else { 199 os::jvm_path(home_dir, sizeof(home_dir)); 200 // Found the full path to jvm.dll. 201 // Now cut the path to <java_home>/jre if we can. 202 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \{client|server} 206 pslash = strrchr(home_dir, '\\'); 207 if (pslash != NULL) { 208 *pslash = '\0'; // get rid of \bin 209 } 210 } 211 } 212 213 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 214 if (home_path == NULL) { 215 return; 216 } 217 strcpy(home_path, home_dir); 218 Arguments::set_java_home(home_path); 219 FREE_C_HEAP_ARRAY(char, home_path); 220 221 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 222 mtInternal); 223 if (dll_path == NULL) { 224 return; 225 } 226 strcpy(dll_path, home_dir); 227 strcat(dll_path, bin); 228 Arguments::set_dll_dir(dll_path); 229 FREE_C_HEAP_ARRAY(char, dll_path); 230 231 if (!set_boot_path('\\', ';')) { 232 return; 233 } 234 } 235 236 // library_path 237 #define EXT_DIR "\\lib\\ext" 238 #define BIN_DIR "\\bin" 239 #define PACKAGE_DIR "\\Sun\\Java" 240 { 241 // Win32 library search order (See the documentation for LoadLibrary): 242 // 243 // 1. The directory from which application is loaded. 244 // 2. The system wide Java Extensions directory (Java only) 245 // 3. System directory (GetSystemDirectory) 246 // 4. Windows directory (GetWindowsDirectory) 247 // 5. The PATH environment variable 248 // 6. The current directory 249 250 char *library_path; 251 char tmp[MAX_PATH]; 252 char *path_str = ::getenv("PATH"); 253 254 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 255 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 256 257 library_path[0] = '\0'; 258 259 GetModuleFileName(NULL, tmp, sizeof(tmp)); 260 *(strrchr(tmp, '\\')) = '\0'; 261 strcat(library_path, tmp); 262 263 GetWindowsDirectory(tmp, sizeof(tmp)); 264 strcat(library_path, ";"); 265 strcat(library_path, tmp); 266 strcat(library_path, PACKAGE_DIR BIN_DIR); 267 268 GetSystemDirectory(tmp, sizeof(tmp)); 269 strcat(library_path, ";"); 270 strcat(library_path, tmp); 271 272 GetWindowsDirectory(tmp, sizeof(tmp)); 273 strcat(library_path, ";"); 274 strcat(library_path, tmp); 275 276 if (path_str) { 277 strcat(library_path, ";"); 278 strcat(library_path, path_str); 279 } 280 281 strcat(library_path, ";."); 282 283 Arguments::set_library_path(library_path); 284 FREE_C_HEAP_ARRAY(char, library_path); 285 } 286 287 // Default extensions directory 288 { 289 char path[MAX_PATH]; 290 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 291 GetWindowsDirectory(path, MAX_PATH); 292 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 293 path, PACKAGE_DIR, EXT_DIR); 294 Arguments::set_ext_dirs(buf); 295 } 296 #undef EXT_DIR 297 #undef BIN_DIR 298 #undef PACKAGE_DIR 299 300 #ifndef _WIN64 301 // set our UnhandledExceptionFilter and save any previous one 302 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 303 #endif 304 305 // Done 306 return; 307 } 308 309 void os::breakpoint() { 310 DebugBreak(); 311 } 312 313 // Invoked from the BREAKPOINT Macro 314 extern "C" void breakpoint() { 315 os::breakpoint(); 316 } 317 318 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 319 // So far, this method is only used by Native Memory Tracking, which is 320 // only supported on Windows XP or later. 321 // 322 int os::get_native_stack(address* stack, int frames, int toSkip) { 323 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 324 for (int index = captured; index < frames; index ++) { 325 stack[index] = NULL; 326 } 327 return captured; 328 } 329 330 331 // os::current_stack_base() 332 // 333 // Returns the base of the stack, which is the stack's 334 // starting address. This function must be called 335 // while running on the stack of the thread being queried. 336 337 address os::current_stack_base() { 338 MEMORY_BASIC_INFORMATION minfo; 339 address stack_bottom; 340 size_t stack_size; 341 342 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 343 stack_bottom = (address)minfo.AllocationBase; 344 stack_size = minfo.RegionSize; 345 346 // Add up the sizes of all the regions with the same 347 // AllocationBase. 348 while (1) { 349 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 350 if (stack_bottom == (address)minfo.AllocationBase) { 351 stack_size += minfo.RegionSize; 352 } else { 353 break; 354 } 355 } 356 return stack_bottom + stack_size; 357 } 358 359 size_t os::current_stack_size() { 360 size_t sz; 361 MEMORY_BASIC_INFORMATION minfo; 362 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 363 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 364 return sz; 365 } 366 367 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 368 const struct tm* time_struct_ptr = localtime(clock); 369 if (time_struct_ptr != NULL) { 370 *res = *time_struct_ptr; 371 return res; 372 } 373 return NULL; 374 } 375 376 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 377 const struct tm* time_struct_ptr = gmtime(clock); 378 if (time_struct_ptr != NULL) { 379 *res = *time_struct_ptr; 380 return res; 381 } 382 return NULL; 383 } 384 385 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 386 387 // Thread start routine for all newly created threads 388 static unsigned __stdcall thread_native_entry(Thread* thread) { 389 // Try to randomize the cache line index of hot stack frames. 390 // This helps when threads of the same stack traces evict each other's 391 // cache lines. The threads can be either from the same JVM instance, or 392 // from different JVM instances. The benefit is especially true for 393 // processors with hyperthreading technology. 394 static int counter = 0; 395 int pid = os::current_process_id(); 396 _alloca(((pid ^ counter++) & 7) * 128); 397 398 thread->initialize_thread_current(); 399 400 OSThread* osthr = thread->osthread(); 401 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 402 403 if (UseNUMA) { 404 int lgrp_id = os::numa_get_group_id(); 405 if (lgrp_id != -1) { 406 thread->set_lgrp_id(lgrp_id); 407 } 408 } 409 410 // Diagnostic code to investigate JDK-6573254 411 int res = 30115; // non-java thread 412 if (thread->is_Java_thread()) { 413 res = 20115; // java thread 414 } 415 416 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 417 418 // Install a win32 structured exception handler around every thread created 419 // by VM, so VM can generate error dump when an exception occurred in non- 420 // Java thread (e.g. VM thread). 421 __try { 422 thread->run(); 423 } __except(topLevelExceptionFilter( 424 (_EXCEPTION_POINTERS*)_exception_info())) { 425 // Nothing to do. 426 } 427 428 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 429 430 // One less thread is executing 431 // When the VMThread gets here, the main thread may have already exited 432 // which frees the CodeHeap containing the Atomic::add code 433 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 434 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 435 } 436 437 // If a thread has not deleted itself ("delete this") as part of its 438 // termination sequence, we have to ensure thread-local-storage is 439 // cleared before we actually terminate. No threads should ever be 440 // deleted asynchronously with respect to their termination. 441 if (Thread::current_or_null_safe() != NULL) { 442 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 443 thread->clear_thread_current(); 444 } 445 446 // Thread must not return from exit_process_or_thread(), but if it does, 447 // let it proceed to exit normally 448 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 449 } 450 451 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 452 int thread_id) { 453 // Allocate the OSThread object 454 OSThread* osthread = new OSThread(NULL, NULL); 455 if (osthread == NULL) return NULL; 456 457 // Initialize support for Java interrupts 458 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 459 if (interrupt_event == NULL) { 460 delete osthread; 461 return NULL; 462 } 463 osthread->set_interrupt_event(interrupt_event); 464 465 // Store info on the Win32 thread into the OSThread 466 osthread->set_thread_handle(thread_handle); 467 osthread->set_thread_id(thread_id); 468 469 if (UseNUMA) { 470 int lgrp_id = os::numa_get_group_id(); 471 if (lgrp_id != -1) { 472 thread->set_lgrp_id(lgrp_id); 473 } 474 } 475 476 // Initial thread state is INITIALIZED, not SUSPENDED 477 osthread->set_state(INITIALIZED); 478 479 return osthread; 480 } 481 482 483 bool os::create_attached_thread(JavaThread* thread) { 484 #ifdef ASSERT 485 thread->verify_not_published(); 486 #endif 487 HANDLE thread_h; 488 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 489 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 490 fatal("DuplicateHandle failed\n"); 491 } 492 OSThread* osthread = create_os_thread(thread, thread_h, 493 (int)current_thread_id()); 494 if (osthread == NULL) { 495 return false; 496 } 497 498 // Initial thread state is RUNNABLE 499 osthread->set_state(RUNNABLE); 500 501 thread->set_osthread(osthread); 502 503 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 504 os::current_thread_id()); 505 506 return true; 507 } 508 509 bool os::create_main_thread(JavaThread* thread) { 510 #ifdef ASSERT 511 thread->verify_not_published(); 512 #endif 513 if (_starting_thread == NULL) { 514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 515 if (_starting_thread == NULL) { 516 return false; 517 } 518 } 519 520 // The primordial thread is runnable from the start) 521 _starting_thread->set_state(RUNNABLE); 522 523 thread->set_osthread(_starting_thread); 524 return true; 525 } 526 527 // Helper function to trace _beginthreadex attributes, 528 // similar to os::Posix::describe_pthread_attr() 529 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 530 size_t stacksize, unsigned initflag) { 531 stringStream ss(buf, buflen); 532 if (stacksize == 0) { 533 ss.print("stacksize: default, "); 534 } else { 535 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 536 } 537 ss.print("flags: "); 538 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 539 #define ALL(X) \ 540 X(CREATE_SUSPENDED) \ 541 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 542 ALL(PRINT_FLAG) 543 #undef ALL 544 #undef PRINT_FLAG 545 return buf; 546 } 547 548 // Allocate and initialize a new OSThread 549 bool os::create_thread(Thread* thread, ThreadType thr_type, 550 size_t stack_size) { 551 unsigned thread_id; 552 553 // Allocate the OSThread object 554 OSThread* osthread = new OSThread(NULL, NULL); 555 if (osthread == NULL) { 556 return false; 557 } 558 559 // Initialize support for Java interrupts 560 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 561 if (interrupt_event == NULL) { 562 delete osthread; 563 return NULL; 564 } 565 osthread->set_interrupt_event(interrupt_event); 566 osthread->set_interrupted(false); 567 568 thread->set_osthread(osthread); 569 570 if (stack_size == 0) { 571 switch (thr_type) { 572 case os::java_thread: 573 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 574 if (JavaThread::stack_size_at_create() > 0) { 575 stack_size = JavaThread::stack_size_at_create(); 576 } 577 break; 578 case os::compiler_thread: 579 if (CompilerThreadStackSize > 0) { 580 stack_size = (size_t)(CompilerThreadStackSize * K); 581 break; 582 } // else fall through: 583 // use VMThreadStackSize if CompilerThreadStackSize is not defined 584 case os::vm_thread: 585 case os::pgc_thread: 586 case os::cgc_thread: 587 case os::watcher_thread: 588 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 589 break; 590 } 591 } 592 593 // Create the Win32 thread 594 // 595 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 596 // does not specify stack size. Instead, it specifies the size of 597 // initially committed space. The stack size is determined by 598 // PE header in the executable. If the committed "stack_size" is larger 599 // than default value in the PE header, the stack is rounded up to the 600 // nearest multiple of 1MB. For example if the launcher has default 601 // stack size of 320k, specifying any size less than 320k does not 602 // affect the actual stack size at all, it only affects the initial 603 // commitment. On the other hand, specifying 'stack_size' larger than 604 // default value may cause significant increase in memory usage, because 605 // not only the stack space will be rounded up to MB, but also the 606 // entire space is committed upfront. 607 // 608 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 609 // for CreateThread() that can treat 'stack_size' as stack size. However we 610 // are not supposed to call CreateThread() directly according to MSDN 611 // document because JVM uses C runtime library. The good news is that the 612 // flag appears to work with _beginthredex() as well. 613 614 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 615 HANDLE thread_handle = 616 (HANDLE)_beginthreadex(NULL, 617 (unsigned)stack_size, 618 (unsigned (__stdcall *)(void*)) thread_native_entry, 619 thread, 620 initflag, 621 &thread_id); 622 623 char buf[64]; 624 if (thread_handle != NULL) { 625 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 626 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 627 } else { 628 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 629 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 630 } 631 632 if (thread_handle == NULL) { 633 // Need to clean up stuff we've allocated so far 634 CloseHandle(osthread->interrupt_event()); 635 thread->set_osthread(NULL); 636 delete osthread; 637 return NULL; 638 } 639 640 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 641 642 // Store info on the Win32 thread into the OSThread 643 osthread->set_thread_handle(thread_handle); 644 osthread->set_thread_id(thread_id); 645 646 // Initial thread state is INITIALIZED, not SUSPENDED 647 osthread->set_state(INITIALIZED); 648 649 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 650 return true; 651 } 652 653 654 // Free Win32 resources related to the OSThread 655 void os::free_thread(OSThread* osthread) { 656 assert(osthread != NULL, "osthread not set"); 657 658 // We are told to free resources of the argument thread, 659 // but we can only really operate on the current thread. 660 assert(Thread::current()->osthread() == osthread, 661 "os::free_thread but not current thread"); 662 663 CloseHandle(osthread->thread_handle()); 664 CloseHandle(osthread->interrupt_event()); 665 delete osthread; 666 } 667 668 static jlong first_filetime; 669 static jlong initial_performance_count; 670 static jlong performance_frequency; 671 672 673 jlong as_long(LARGE_INTEGER x) { 674 jlong result = 0; // initialization to avoid warning 675 set_high(&result, x.HighPart); 676 set_low(&result, x.LowPart); 677 return result; 678 } 679 680 681 jlong os::elapsed_counter() { 682 LARGE_INTEGER count; 683 QueryPerformanceCounter(&count); 684 return as_long(count) - initial_performance_count; 685 } 686 687 688 jlong os::elapsed_frequency() { 689 return performance_frequency; 690 } 691 692 693 julong os::available_memory() { 694 return win32::available_memory(); 695 } 696 697 julong os::win32::available_memory() { 698 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 699 // value if total memory is larger than 4GB 700 MEMORYSTATUSEX ms; 701 ms.dwLength = sizeof(ms); 702 GlobalMemoryStatusEx(&ms); 703 704 return (julong)ms.ullAvailPhys; 705 } 706 707 julong os::physical_memory() { 708 return win32::physical_memory(); 709 } 710 711 bool os::has_allocatable_memory_limit(julong* limit) { 712 MEMORYSTATUSEX ms; 713 ms.dwLength = sizeof(ms); 714 GlobalMemoryStatusEx(&ms); 715 #ifdef _LP64 716 *limit = (julong)ms.ullAvailVirtual; 717 return true; 718 #else 719 // Limit to 1400m because of the 2gb address space wall 720 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 721 return true; 722 #endif 723 } 724 725 int os::active_processor_count() { 726 DWORD_PTR lpProcessAffinityMask = 0; 727 DWORD_PTR lpSystemAffinityMask = 0; 728 int proc_count = processor_count(); 729 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 730 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 731 // Nof active processors is number of bits in process affinity mask 732 int bitcount = 0; 733 while (lpProcessAffinityMask != 0) { 734 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 735 bitcount++; 736 } 737 return bitcount; 738 } else { 739 return proc_count; 740 } 741 } 742 743 void os::set_native_thread_name(const char *name) { 744 745 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 746 // 747 // Note that unfortunately this only works if the process 748 // is already attached to a debugger; debugger must observe 749 // the exception below to show the correct name. 750 751 // If there is no debugger attached skip raising the exception 752 if (!IsDebuggerPresent()) { 753 return; 754 } 755 756 const DWORD MS_VC_EXCEPTION = 0x406D1388; 757 struct { 758 DWORD dwType; // must be 0x1000 759 LPCSTR szName; // pointer to name (in user addr space) 760 DWORD dwThreadID; // thread ID (-1=caller thread) 761 DWORD dwFlags; // reserved for future use, must be zero 762 } info; 763 764 info.dwType = 0x1000; 765 info.szName = name; 766 info.dwThreadID = -1; 767 info.dwFlags = 0; 768 769 __try { 770 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 771 } __except(EXCEPTION_EXECUTE_HANDLER) {} 772 } 773 774 bool os::distribute_processes(uint length, uint* distribution) { 775 // Not yet implemented. 776 return false; 777 } 778 779 bool os::bind_to_processor(uint processor_id) { 780 // Not yet implemented. 781 return false; 782 } 783 784 void os::win32::initialize_performance_counter() { 785 LARGE_INTEGER count; 786 QueryPerformanceFrequency(&count); 787 performance_frequency = as_long(count); 788 QueryPerformanceCounter(&count); 789 initial_performance_count = as_long(count); 790 } 791 792 793 double os::elapsedTime() { 794 return (double) elapsed_counter() / (double) elapsed_frequency(); 795 } 796 797 798 // Windows format: 799 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 800 // Java format: 801 // Java standards require the number of milliseconds since 1/1/1970 802 803 // Constant offset - calculated using offset() 804 static jlong _offset = 116444736000000000; 805 // Fake time counter for reproducible results when debugging 806 static jlong fake_time = 0; 807 808 #ifdef ASSERT 809 // Just to be safe, recalculate the offset in debug mode 810 static jlong _calculated_offset = 0; 811 static int _has_calculated_offset = 0; 812 813 jlong offset() { 814 if (_has_calculated_offset) return _calculated_offset; 815 SYSTEMTIME java_origin; 816 java_origin.wYear = 1970; 817 java_origin.wMonth = 1; 818 java_origin.wDayOfWeek = 0; // ignored 819 java_origin.wDay = 1; 820 java_origin.wHour = 0; 821 java_origin.wMinute = 0; 822 java_origin.wSecond = 0; 823 java_origin.wMilliseconds = 0; 824 FILETIME jot; 825 if (!SystemTimeToFileTime(&java_origin, &jot)) { 826 fatal("Error = %d\nWindows error", GetLastError()); 827 } 828 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 829 _has_calculated_offset = 1; 830 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 831 return _calculated_offset; 832 } 833 #else 834 jlong offset() { 835 return _offset; 836 } 837 #endif 838 839 jlong windows_to_java_time(FILETIME wt) { 840 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 841 return (a - offset()) / 10000; 842 } 843 844 // Returns time ticks in (10th of micro seconds) 845 jlong windows_to_time_ticks(FILETIME wt) { 846 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 847 return (a - offset()); 848 } 849 850 FILETIME java_to_windows_time(jlong l) { 851 jlong a = (l * 10000) + offset(); 852 FILETIME result; 853 result.dwHighDateTime = high(a); 854 result.dwLowDateTime = low(a); 855 return result; 856 } 857 858 bool os::supports_vtime() { return true; } 859 bool os::enable_vtime() { return false; } 860 bool os::vtime_enabled() { return false; } 861 862 double os::elapsedVTime() { 863 FILETIME created; 864 FILETIME exited; 865 FILETIME kernel; 866 FILETIME user; 867 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 868 // the resolution of windows_to_java_time() should be sufficient (ms) 869 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 870 } else { 871 return elapsedTime(); 872 } 873 } 874 875 jlong os::javaTimeMillis() { 876 if (UseFakeTimers) { 877 return fake_time++; 878 } else { 879 FILETIME wt; 880 GetSystemTimeAsFileTime(&wt); 881 return windows_to_java_time(wt); 882 } 883 } 884 885 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 886 FILETIME wt; 887 GetSystemTimeAsFileTime(&wt); 888 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 889 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 890 seconds = secs; 891 nanos = jlong(ticks - (secs*10000000)) * 100; 892 } 893 894 jlong os::javaTimeNanos() { 895 LARGE_INTEGER current_count; 896 QueryPerformanceCounter(¤t_count); 897 double current = as_long(current_count); 898 double freq = performance_frequency; 899 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 900 return time; 901 } 902 903 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 904 jlong freq = performance_frequency; 905 if (freq < NANOSECS_PER_SEC) { 906 // the performance counter is 64 bits and we will 907 // be multiplying it -- so no wrap in 64 bits 908 info_ptr->max_value = ALL_64_BITS; 909 } else if (freq > NANOSECS_PER_SEC) { 910 // use the max value the counter can reach to 911 // determine the max value which could be returned 912 julong max_counter = (julong)ALL_64_BITS; 913 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 914 } else { 915 // the performance counter is 64 bits and we will 916 // be using it directly -- so no wrap in 64 bits 917 info_ptr->max_value = ALL_64_BITS; 918 } 919 920 // using a counter, so no skipping 921 info_ptr->may_skip_backward = false; 922 info_ptr->may_skip_forward = false; 923 924 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 925 } 926 927 char* os::local_time_string(char *buf, size_t buflen) { 928 SYSTEMTIME st; 929 GetLocalTime(&st); 930 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 931 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 932 return buf; 933 } 934 935 bool os::getTimesSecs(double* process_real_time, 936 double* process_user_time, 937 double* process_system_time) { 938 HANDLE h_process = GetCurrentProcess(); 939 FILETIME create_time, exit_time, kernel_time, user_time; 940 BOOL result = GetProcessTimes(h_process, 941 &create_time, 942 &exit_time, 943 &kernel_time, 944 &user_time); 945 if (result != 0) { 946 FILETIME wt; 947 GetSystemTimeAsFileTime(&wt); 948 jlong rtc_millis = windows_to_java_time(wt); 949 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 950 *process_user_time = 951 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 952 *process_system_time = 953 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 954 return true; 955 } else { 956 return false; 957 } 958 } 959 960 void os::shutdown() { 961 // allow PerfMemory to attempt cleanup of any persistent resources 962 perfMemory_exit(); 963 964 // flush buffered output, finish log files 965 ostream_abort(); 966 967 // Check for abort hook 968 abort_hook_t abort_hook = Arguments::abort_hook(); 969 if (abort_hook != NULL) { 970 abort_hook(); 971 } 972 } 973 974 975 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 976 PMINIDUMP_EXCEPTION_INFORMATION, 977 PMINIDUMP_USER_STREAM_INFORMATION, 978 PMINIDUMP_CALLBACK_INFORMATION); 979 980 static HANDLE dumpFile = NULL; 981 982 // Check if dump file can be created. 983 void os::check_dump_limit(char* buffer, size_t buffsz) { 984 bool status = true; 985 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 986 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 987 status = false; 988 } 989 990 #ifndef ASSERT 991 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 992 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 993 status = false; 994 } 995 #endif 996 997 if (status) { 998 const char* cwd = get_current_directory(NULL, 0); 999 int pid = current_process_id(); 1000 if (cwd != NULL) { 1001 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1002 } else { 1003 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1004 } 1005 1006 if (dumpFile == NULL && 1007 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1008 == INVALID_HANDLE_VALUE) { 1009 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1010 status = false; 1011 } 1012 } 1013 VMError::record_coredump_status(buffer, status); 1014 } 1015 1016 void os::abort(bool dump_core, void* siginfo, const void* context) { 1017 EXCEPTION_POINTERS ep; 1018 MINIDUMP_EXCEPTION_INFORMATION mei; 1019 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1020 1021 HANDLE hProcess = GetCurrentProcess(); 1022 DWORD processId = GetCurrentProcessId(); 1023 MINIDUMP_TYPE dumpType; 1024 1025 shutdown(); 1026 if (!dump_core || dumpFile == NULL) { 1027 if (dumpFile != NULL) { 1028 CloseHandle(dumpFile); 1029 } 1030 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1031 } 1032 1033 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1034 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1035 1036 if (siginfo != NULL && context != NULL) { 1037 ep.ContextRecord = (PCONTEXT) context; 1038 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1039 1040 mei.ThreadId = GetCurrentThreadId(); 1041 mei.ExceptionPointers = &ep; 1042 pmei = &mei; 1043 } else { 1044 pmei = NULL; 1045 } 1046 1047 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1048 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1049 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1050 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1051 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1052 } 1053 CloseHandle(dumpFile); 1054 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1055 } 1056 1057 // Die immediately, no exit hook, no abort hook, no cleanup. 1058 void os::die() { 1059 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1060 } 1061 1062 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1063 // * dirent_md.c 1.15 00/02/02 1064 // 1065 // The declarations for DIR and struct dirent are in jvm_win32.h. 1066 1067 // Caller must have already run dirname through JVM_NativePath, which removes 1068 // duplicate slashes and converts all instances of '/' into '\\'. 1069 1070 DIR * os::opendir(const char *dirname) { 1071 assert(dirname != NULL, "just checking"); // hotspot change 1072 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1073 DWORD fattr; // hotspot change 1074 char alt_dirname[4] = { 0, 0, 0, 0 }; 1075 1076 if (dirp == 0) { 1077 errno = ENOMEM; 1078 return 0; 1079 } 1080 1081 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1082 // as a directory in FindFirstFile(). We detect this case here and 1083 // prepend the current drive name. 1084 // 1085 if (dirname[1] == '\0' && dirname[0] == '\\') { 1086 alt_dirname[0] = _getdrive() + 'A' - 1; 1087 alt_dirname[1] = ':'; 1088 alt_dirname[2] = '\\'; 1089 alt_dirname[3] = '\0'; 1090 dirname = alt_dirname; 1091 } 1092 1093 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1094 if (dirp->path == 0) { 1095 free(dirp); 1096 errno = ENOMEM; 1097 return 0; 1098 } 1099 strcpy(dirp->path, dirname); 1100 1101 fattr = GetFileAttributes(dirp->path); 1102 if (fattr == 0xffffffff) { 1103 free(dirp->path); 1104 free(dirp); 1105 errno = ENOENT; 1106 return 0; 1107 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1108 free(dirp->path); 1109 free(dirp); 1110 errno = ENOTDIR; 1111 return 0; 1112 } 1113 1114 // Append "*.*", or possibly "\\*.*", to path 1115 if (dirp->path[1] == ':' && 1116 (dirp->path[2] == '\0' || 1117 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1118 // No '\\' needed for cases like "Z:" or "Z:\" 1119 strcat(dirp->path, "*.*"); 1120 } else { 1121 strcat(dirp->path, "\\*.*"); 1122 } 1123 1124 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1125 if (dirp->handle == INVALID_HANDLE_VALUE) { 1126 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1127 free(dirp->path); 1128 free(dirp); 1129 errno = EACCES; 1130 return 0; 1131 } 1132 } 1133 return dirp; 1134 } 1135 1136 // parameter dbuf unused on Windows 1137 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1138 assert(dirp != NULL, "just checking"); // hotspot change 1139 if (dirp->handle == INVALID_HANDLE_VALUE) { 1140 return 0; 1141 } 1142 1143 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1144 1145 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1146 if (GetLastError() == ERROR_INVALID_HANDLE) { 1147 errno = EBADF; 1148 return 0; 1149 } 1150 FindClose(dirp->handle); 1151 dirp->handle = INVALID_HANDLE_VALUE; 1152 } 1153 1154 return &dirp->dirent; 1155 } 1156 1157 int os::closedir(DIR *dirp) { 1158 assert(dirp != NULL, "just checking"); // hotspot change 1159 if (dirp->handle != INVALID_HANDLE_VALUE) { 1160 if (!FindClose(dirp->handle)) { 1161 errno = EBADF; 1162 return -1; 1163 } 1164 dirp->handle = INVALID_HANDLE_VALUE; 1165 } 1166 free(dirp->path); 1167 free(dirp); 1168 return 0; 1169 } 1170 1171 // This must be hard coded because it's the system's temporary 1172 // directory not the java application's temp directory, ala java.io.tmpdir. 1173 const char* os::get_temp_directory() { 1174 static char path_buf[MAX_PATH]; 1175 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1176 return path_buf; 1177 } else { 1178 path_buf[0] = '\0'; 1179 return path_buf; 1180 } 1181 } 1182 1183 // Needs to be in os specific directory because windows requires another 1184 // header file <direct.h> 1185 const char* os::get_current_directory(char *buf, size_t buflen) { 1186 int n = static_cast<int>(buflen); 1187 if (buflen > INT_MAX) n = INT_MAX; 1188 return _getcwd(buf, n); 1189 } 1190 1191 //----------------------------------------------------------- 1192 // Helper functions for fatal error handler 1193 #ifdef _WIN64 1194 // Helper routine which returns true if address in 1195 // within the NTDLL address space. 1196 // 1197 static bool _addr_in_ntdll(address addr) { 1198 HMODULE hmod; 1199 MODULEINFO minfo; 1200 1201 hmod = GetModuleHandle("NTDLL.DLL"); 1202 if (hmod == NULL) return false; 1203 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1204 &minfo, sizeof(MODULEINFO))) { 1205 return false; 1206 } 1207 1208 if ((addr >= minfo.lpBaseOfDll) && 1209 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1210 return true; 1211 } else { 1212 return false; 1213 } 1214 } 1215 #endif 1216 1217 struct _modinfo { 1218 address addr; 1219 char* full_path; // point to a char buffer 1220 int buflen; // size of the buffer 1221 address base_addr; 1222 }; 1223 1224 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1225 address top_address, void * param) { 1226 struct _modinfo *pmod = (struct _modinfo *)param; 1227 if (!pmod) return -1; 1228 1229 if (base_addr <= pmod->addr && 1230 top_address > pmod->addr) { 1231 // if a buffer is provided, copy path name to the buffer 1232 if (pmod->full_path) { 1233 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1234 } 1235 pmod->base_addr = base_addr; 1236 return 1; 1237 } 1238 return 0; 1239 } 1240 1241 bool os::dll_address_to_library_name(address addr, char* buf, 1242 int buflen, int* offset) { 1243 // buf is not optional, but offset is optional 1244 assert(buf != NULL, "sanity check"); 1245 1246 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1247 // return the full path to the DLL file, sometimes it returns path 1248 // to the corresponding PDB file (debug info); sometimes it only 1249 // returns partial path, which makes life painful. 1250 1251 struct _modinfo mi; 1252 mi.addr = addr; 1253 mi.full_path = buf; 1254 mi.buflen = buflen; 1255 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1256 // buf already contains path name 1257 if (offset) *offset = addr - mi.base_addr; 1258 return true; 1259 } 1260 1261 buf[0] = '\0'; 1262 if (offset) *offset = -1; 1263 return false; 1264 } 1265 1266 bool os::dll_address_to_function_name(address addr, char *buf, 1267 int buflen, int *offset, 1268 bool demangle) { 1269 // buf is not optional, but offset is optional 1270 assert(buf != NULL, "sanity check"); 1271 1272 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1273 return true; 1274 } 1275 if (offset != NULL) *offset = -1; 1276 buf[0] = '\0'; 1277 return false; 1278 } 1279 1280 // save the start and end address of jvm.dll into param[0] and param[1] 1281 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1282 address top_address, void * param) { 1283 if (!param) return -1; 1284 1285 if (base_addr <= (address)_locate_jvm_dll && 1286 top_address > (address)_locate_jvm_dll) { 1287 ((address*)param)[0] = base_addr; 1288 ((address*)param)[1] = top_address; 1289 return 1; 1290 } 1291 return 0; 1292 } 1293 1294 address vm_lib_location[2]; // start and end address of jvm.dll 1295 1296 // check if addr is inside jvm.dll 1297 bool os::address_is_in_vm(address addr) { 1298 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1299 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1300 assert(false, "Can't find jvm module."); 1301 return false; 1302 } 1303 } 1304 1305 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1306 } 1307 1308 // print module info; param is outputStream* 1309 static int _print_module(const char* fname, address base_address, 1310 address top_address, void* param) { 1311 if (!param) return -1; 1312 1313 outputStream* st = (outputStream*)param; 1314 1315 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1316 return 0; 1317 } 1318 1319 // Loads .dll/.so and 1320 // in case of error it checks if .dll/.so was built for the 1321 // same architecture as Hotspot is running on 1322 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1323 void * result = LoadLibrary(name); 1324 if (result != NULL) { 1325 if (InitializeDbgHelpEarly) { 1326 // Recalculate pdb search path if a DLL was loaded successfully. 1327 SymbolEngine::recalc_search_path(); 1328 } 1329 return result; 1330 } 1331 1332 DWORD errcode = GetLastError(); 1333 if (errcode == ERROR_MOD_NOT_FOUND) { 1334 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1335 ebuf[ebuflen - 1] = '\0'; 1336 return NULL; 1337 } 1338 1339 // Parsing dll below 1340 // If we can read dll-info and find that dll was built 1341 // for an architecture other than Hotspot is running in 1342 // - then print to buffer "DLL was built for a different architecture" 1343 // else call os::lasterror to obtain system error message 1344 1345 // Read system error message into ebuf 1346 // It may or may not be overwritten below (in the for loop and just above) 1347 lasterror(ebuf, (size_t) ebuflen); 1348 ebuf[ebuflen - 1] = '\0'; 1349 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1350 if (fd < 0) { 1351 return NULL; 1352 } 1353 1354 uint32_t signature_offset; 1355 uint16_t lib_arch = 0; 1356 bool failed_to_get_lib_arch = 1357 ( // Go to position 3c in the dll 1358 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1359 || 1360 // Read location of signature 1361 (sizeof(signature_offset) != 1362 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1363 || 1364 // Go to COFF File Header in dll 1365 // that is located after "signature" (4 bytes long) 1366 (os::seek_to_file_offset(fd, 1367 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1368 || 1369 // Read field that contains code of architecture 1370 // that dll was built for 1371 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1372 ); 1373 1374 ::close(fd); 1375 if (failed_to_get_lib_arch) { 1376 // file i/o error - report os::lasterror(...) msg 1377 return NULL; 1378 } 1379 1380 typedef struct { 1381 uint16_t arch_code; 1382 char* arch_name; 1383 } arch_t; 1384 1385 static const arch_t arch_array[] = { 1386 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1387 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1388 }; 1389 #if (defined _M_AMD64) 1390 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1391 #elif (defined _M_IX86) 1392 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1393 #else 1394 #error Method os::dll_load requires that one of following \ 1395 is defined :_M_AMD64 or _M_IX86 1396 #endif 1397 1398 1399 // Obtain a string for printf operation 1400 // lib_arch_str shall contain string what platform this .dll was built for 1401 // running_arch_str shall string contain what platform Hotspot was built for 1402 char *running_arch_str = NULL, *lib_arch_str = NULL; 1403 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1404 if (lib_arch == arch_array[i].arch_code) { 1405 lib_arch_str = arch_array[i].arch_name; 1406 } 1407 if (running_arch == arch_array[i].arch_code) { 1408 running_arch_str = arch_array[i].arch_name; 1409 } 1410 } 1411 1412 assert(running_arch_str, 1413 "Didn't find running architecture code in arch_array"); 1414 1415 // If the architecture is right 1416 // but some other error took place - report os::lasterror(...) msg 1417 if (lib_arch == running_arch) { 1418 return NULL; 1419 } 1420 1421 if (lib_arch_str != NULL) { 1422 ::_snprintf(ebuf, ebuflen - 1, 1423 "Can't load %s-bit .dll on a %s-bit platform", 1424 lib_arch_str, running_arch_str); 1425 } else { 1426 // don't know what architecture this dll was build for 1427 ::_snprintf(ebuf, ebuflen - 1, 1428 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1429 lib_arch, running_arch_str); 1430 } 1431 1432 return NULL; 1433 } 1434 1435 void os::print_dll_info(outputStream *st) { 1436 st->print_cr("Dynamic libraries:"); 1437 get_loaded_modules_info(_print_module, (void *)st); 1438 } 1439 1440 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1441 HANDLE hProcess; 1442 1443 # define MAX_NUM_MODULES 128 1444 HMODULE modules[MAX_NUM_MODULES]; 1445 static char filename[MAX_PATH]; 1446 int result = 0; 1447 1448 int pid = os::current_process_id(); 1449 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1450 FALSE, pid); 1451 if (hProcess == NULL) return 0; 1452 1453 DWORD size_needed; 1454 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1455 CloseHandle(hProcess); 1456 return 0; 1457 } 1458 1459 // number of modules that are currently loaded 1460 int num_modules = size_needed / sizeof(HMODULE); 1461 1462 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1463 // Get Full pathname: 1464 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1465 filename[0] = '\0'; 1466 } 1467 1468 MODULEINFO modinfo; 1469 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1470 modinfo.lpBaseOfDll = NULL; 1471 modinfo.SizeOfImage = 0; 1472 } 1473 1474 // Invoke callback function 1475 result = callback(filename, (address)modinfo.lpBaseOfDll, 1476 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1477 if (result) break; 1478 } 1479 1480 CloseHandle(hProcess); 1481 return result; 1482 } 1483 1484 bool os::get_host_name(char* buf, size_t buflen) { 1485 DWORD size = (DWORD)buflen; 1486 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1487 } 1488 1489 void os::get_summary_os_info(char* buf, size_t buflen) { 1490 stringStream sst(buf, buflen); 1491 os::win32::print_windows_version(&sst); 1492 // chop off newline character 1493 char* nl = strchr(buf, '\n'); 1494 if (nl != NULL) *nl = '\0'; 1495 } 1496 1497 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1498 int ret = vsnprintf(buf, len, fmt, args); 1499 // Get the correct buffer size if buf is too small 1500 if (ret < 0) { 1501 return _vscprintf(fmt, args); 1502 } 1503 return ret; 1504 } 1505 1506 static inline time_t get_mtime(const char* filename) { 1507 struct stat st; 1508 int ret = os::stat(filename, &st); 1509 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1510 return st.st_mtime; 1511 } 1512 1513 int os::compare_file_modified_times(const char* file1, const char* file2) { 1514 time_t t1 = get_mtime(file1); 1515 time_t t2 = get_mtime(file2); 1516 return t1 - t2; 1517 } 1518 1519 void os::print_os_info_brief(outputStream* st) { 1520 os::print_os_info(st); 1521 } 1522 1523 void os::print_os_info(outputStream* st) { 1524 #ifdef ASSERT 1525 char buffer[1024]; 1526 st->print("HostName: "); 1527 if (get_host_name(buffer, sizeof(buffer))) { 1528 st->print("%s ", buffer); 1529 } else { 1530 st->print("N/A "); 1531 } 1532 #endif 1533 st->print("OS:"); 1534 os::win32::print_windows_version(st); 1535 } 1536 1537 void os::win32::print_windows_version(outputStream* st) { 1538 OSVERSIONINFOEX osvi; 1539 VS_FIXEDFILEINFO *file_info; 1540 TCHAR kernel32_path[MAX_PATH]; 1541 UINT len, ret; 1542 1543 // Use the GetVersionEx information to see if we're on a server or 1544 // workstation edition of Windows. Starting with Windows 8.1 we can't 1545 // trust the OS version information returned by this API. 1546 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1547 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1548 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1549 st->print_cr("Call to GetVersionEx failed"); 1550 return; 1551 } 1552 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1553 1554 // Get the full path to \Windows\System32\kernel32.dll and use that for 1555 // determining what version of Windows we're running on. 1556 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1557 ret = GetSystemDirectory(kernel32_path, len); 1558 if (ret == 0 || ret > len) { 1559 st->print_cr("Call to GetSystemDirectory failed"); 1560 return; 1561 } 1562 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1563 1564 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1565 if (version_size == 0) { 1566 st->print_cr("Call to GetFileVersionInfoSize failed"); 1567 return; 1568 } 1569 1570 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1571 if (version_info == NULL) { 1572 st->print_cr("Failed to allocate version_info"); 1573 return; 1574 } 1575 1576 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1577 os::free(version_info); 1578 st->print_cr("Call to GetFileVersionInfo failed"); 1579 return; 1580 } 1581 1582 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1583 os::free(version_info); 1584 st->print_cr("Call to VerQueryValue failed"); 1585 return; 1586 } 1587 1588 int major_version = HIWORD(file_info->dwProductVersionMS); 1589 int minor_version = LOWORD(file_info->dwProductVersionMS); 1590 int build_number = HIWORD(file_info->dwProductVersionLS); 1591 int build_minor = LOWORD(file_info->dwProductVersionLS); 1592 int os_vers = major_version * 1000 + minor_version; 1593 os::free(version_info); 1594 1595 st->print(" Windows "); 1596 switch (os_vers) { 1597 1598 case 6000: 1599 if (is_workstation) { 1600 st->print("Vista"); 1601 } else { 1602 st->print("Server 2008"); 1603 } 1604 break; 1605 1606 case 6001: 1607 if (is_workstation) { 1608 st->print("7"); 1609 } else { 1610 st->print("Server 2008 R2"); 1611 } 1612 break; 1613 1614 case 6002: 1615 if (is_workstation) { 1616 st->print("8"); 1617 } else { 1618 st->print("Server 2012"); 1619 } 1620 break; 1621 1622 case 6003: 1623 if (is_workstation) { 1624 st->print("8.1"); 1625 } else { 1626 st->print("Server 2012 R2"); 1627 } 1628 break; 1629 1630 case 10000: 1631 if (is_workstation) { 1632 st->print("10"); 1633 } else { 1634 st->print("Server 2016"); 1635 } 1636 break; 1637 1638 default: 1639 // Unrecognized windows, print out its major and minor versions 1640 st->print("%d.%d", major_version, minor_version); 1641 break; 1642 } 1643 1644 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1645 // find out whether we are running on 64 bit processor or not 1646 SYSTEM_INFO si; 1647 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1648 GetNativeSystemInfo(&si); 1649 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1650 st->print(" , 64 bit"); 1651 } 1652 1653 st->print(" Build %d", build_number); 1654 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1655 st->cr(); 1656 } 1657 1658 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1659 // Nothing to do for now. 1660 } 1661 1662 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1663 HKEY key; 1664 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1665 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1666 if (status == ERROR_SUCCESS) { 1667 DWORD size = (DWORD)buflen; 1668 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1669 if (status != ERROR_SUCCESS) { 1670 strncpy(buf, "## __CPU__", buflen); 1671 } 1672 RegCloseKey(key); 1673 } else { 1674 // Put generic cpu info to return 1675 strncpy(buf, "## __CPU__", buflen); 1676 } 1677 } 1678 1679 void os::print_memory_info(outputStream* st) { 1680 st->print("Memory:"); 1681 st->print(" %dk page", os::vm_page_size()>>10); 1682 1683 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1684 // value if total memory is larger than 4GB 1685 MEMORYSTATUSEX ms; 1686 ms.dwLength = sizeof(ms); 1687 GlobalMemoryStatusEx(&ms); 1688 1689 st->print(", physical %uk", os::physical_memory() >> 10); 1690 st->print("(%uk free)", os::available_memory() >> 10); 1691 1692 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1693 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1694 st->cr(); 1695 } 1696 1697 void os::print_siginfo(outputStream *st, const void* siginfo) { 1698 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1699 st->print("siginfo:"); 1700 1701 char tmp[64]; 1702 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1703 strcpy(tmp, "EXCEPTION_??"); 1704 } 1705 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1706 1707 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1708 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1709 er->NumberParameters >= 2) { 1710 switch (er->ExceptionInformation[0]) { 1711 case 0: st->print(", reading address"); break; 1712 case 1: st->print(", writing address"); break; 1713 case 8: st->print(", data execution prevention violation at address"); break; 1714 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1715 er->ExceptionInformation[0]); 1716 } 1717 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1718 } else { 1719 int num = er->NumberParameters; 1720 if (num > 0) { 1721 st->print(", ExceptionInformation="); 1722 for (int i = 0; i < num; i++) { 1723 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1724 } 1725 } 1726 } 1727 st->cr(); 1728 } 1729 1730 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1731 // do nothing 1732 } 1733 1734 static char saved_jvm_path[MAX_PATH] = {0}; 1735 1736 // Find the full path to the current module, jvm.dll 1737 void os::jvm_path(char *buf, jint buflen) { 1738 // Error checking. 1739 if (buflen < MAX_PATH) { 1740 assert(false, "must use a large-enough buffer"); 1741 buf[0] = '\0'; 1742 return; 1743 } 1744 // Lazy resolve the path to current module. 1745 if (saved_jvm_path[0] != 0) { 1746 strcpy(buf, saved_jvm_path); 1747 return; 1748 } 1749 1750 buf[0] = '\0'; 1751 if (Arguments::sun_java_launcher_is_altjvm()) { 1752 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1753 // for a JAVA_HOME environment variable and fix up the path so it 1754 // looks like jvm.dll is installed there (append a fake suffix 1755 // hotspot/jvm.dll). 1756 char* java_home_var = ::getenv("JAVA_HOME"); 1757 if (java_home_var != NULL && java_home_var[0] != 0 && 1758 strlen(java_home_var) < (size_t)buflen) { 1759 strncpy(buf, java_home_var, buflen); 1760 1761 // determine if this is a legacy image or modules image 1762 // modules image doesn't have "jre" subdirectory 1763 size_t len = strlen(buf); 1764 char* jrebin_p = buf + len; 1765 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1766 if (0 != _access(buf, 0)) { 1767 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1768 } 1769 len = strlen(buf); 1770 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1771 } 1772 } 1773 1774 if (buf[0] == '\0') { 1775 GetModuleFileName(vm_lib_handle, buf, buflen); 1776 } 1777 strncpy(saved_jvm_path, buf, MAX_PATH); 1778 saved_jvm_path[MAX_PATH - 1] = '\0'; 1779 } 1780 1781 1782 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1783 #ifndef _WIN64 1784 st->print("_"); 1785 #endif 1786 } 1787 1788 1789 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1790 #ifndef _WIN64 1791 st->print("@%d", args_size * sizeof(int)); 1792 #endif 1793 } 1794 1795 // This method is a copy of JDK's sysGetLastErrorString 1796 // from src/windows/hpi/src/system_md.c 1797 1798 size_t os::lasterror(char* buf, size_t len) { 1799 DWORD errval; 1800 1801 if ((errval = GetLastError()) != 0) { 1802 // DOS error 1803 size_t n = (size_t)FormatMessage( 1804 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1805 NULL, 1806 errval, 1807 0, 1808 buf, 1809 (DWORD)len, 1810 NULL); 1811 if (n > 3) { 1812 // Drop final '.', CR, LF 1813 if (buf[n - 1] == '\n') n--; 1814 if (buf[n - 1] == '\r') n--; 1815 if (buf[n - 1] == '.') n--; 1816 buf[n] = '\0'; 1817 } 1818 return n; 1819 } 1820 1821 if (errno != 0) { 1822 // C runtime error that has no corresponding DOS error code 1823 const char* s = os::strerror(errno); 1824 size_t n = strlen(s); 1825 if (n >= len) n = len - 1; 1826 strncpy(buf, s, n); 1827 buf[n] = '\0'; 1828 return n; 1829 } 1830 1831 return 0; 1832 } 1833 1834 int os::get_last_error() { 1835 DWORD error = GetLastError(); 1836 if (error == 0) { 1837 error = errno; 1838 } 1839 return (int)error; 1840 } 1841 1842 WindowsSemaphore::WindowsSemaphore(uint value) { 1843 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1844 1845 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1846 } 1847 1848 WindowsSemaphore::~WindowsSemaphore() { 1849 ::CloseHandle(_semaphore); 1850 } 1851 1852 void WindowsSemaphore::signal(uint count) { 1853 if (count > 0) { 1854 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1855 1856 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1857 } 1858 } 1859 1860 void WindowsSemaphore::wait() { 1861 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1862 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1863 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1864 } 1865 1866 bool WindowsSemaphore::trywait() { 1867 DWORD ret = ::WaitForSingleObject(_semaphore, 0); 1868 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1869 return ret == WAIT_OBJECT_0; 1870 } 1871 1872 // sun.misc.Signal 1873 // NOTE that this is a workaround for an apparent kernel bug where if 1874 // a signal handler for SIGBREAK is installed then that signal handler 1875 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1876 // See bug 4416763. 1877 static void (*sigbreakHandler)(int) = NULL; 1878 1879 static void UserHandler(int sig, void *siginfo, void *context) { 1880 os::signal_notify(sig); 1881 // We need to reinstate the signal handler each time... 1882 os::signal(sig, (void*)UserHandler); 1883 } 1884 1885 void* os::user_handler() { 1886 return (void*) UserHandler; 1887 } 1888 1889 void* os::signal(int signal_number, void* handler) { 1890 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1891 void (*oldHandler)(int) = sigbreakHandler; 1892 sigbreakHandler = (void (*)(int)) handler; 1893 return (void*) oldHandler; 1894 } else { 1895 return (void*)::signal(signal_number, (void (*)(int))handler); 1896 } 1897 } 1898 1899 void os::signal_raise(int signal_number) { 1900 raise(signal_number); 1901 } 1902 1903 // The Win32 C runtime library maps all console control events other than ^C 1904 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1905 // logoff, and shutdown events. We therefore install our own console handler 1906 // that raises SIGTERM for the latter cases. 1907 // 1908 static BOOL WINAPI consoleHandler(DWORD event) { 1909 switch (event) { 1910 case CTRL_C_EVENT: 1911 if (VMError::is_error_reported()) { 1912 // Ctrl-C is pressed during error reporting, likely because the error 1913 // handler fails to abort. Let VM die immediately. 1914 os::die(); 1915 } 1916 1917 os::signal_raise(SIGINT); 1918 return TRUE; 1919 break; 1920 case CTRL_BREAK_EVENT: 1921 if (sigbreakHandler != NULL) { 1922 (*sigbreakHandler)(SIGBREAK); 1923 } 1924 return TRUE; 1925 break; 1926 case CTRL_LOGOFF_EVENT: { 1927 // Don't terminate JVM if it is running in a non-interactive session, 1928 // such as a service process. 1929 USEROBJECTFLAGS flags; 1930 HANDLE handle = GetProcessWindowStation(); 1931 if (handle != NULL && 1932 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1933 sizeof(USEROBJECTFLAGS), NULL)) { 1934 // If it is a non-interactive session, let next handler to deal 1935 // with it. 1936 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1937 return FALSE; 1938 } 1939 } 1940 } 1941 case CTRL_CLOSE_EVENT: 1942 case CTRL_SHUTDOWN_EVENT: 1943 os::signal_raise(SIGTERM); 1944 return TRUE; 1945 break; 1946 default: 1947 break; 1948 } 1949 return FALSE; 1950 } 1951 1952 // The following code is moved from os.cpp for making this 1953 // code platform specific, which it is by its very nature. 1954 1955 // Return maximum OS signal used + 1 for internal use only 1956 // Used as exit signal for signal_thread 1957 int os::sigexitnum_pd() { 1958 return NSIG; 1959 } 1960 1961 // a counter for each possible signal value, including signal_thread exit signal 1962 static volatile jint pending_signals[NSIG+1] = { 0 }; 1963 static HANDLE sig_sem = NULL; 1964 1965 void os::signal_init_pd() { 1966 // Initialize signal structures 1967 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1968 1969 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1970 1971 // Programs embedding the VM do not want it to attempt to receive 1972 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1973 // shutdown hooks mechanism introduced in 1.3. For example, when 1974 // the VM is run as part of a Windows NT service (i.e., a servlet 1975 // engine in a web server), the correct behavior is for any console 1976 // control handler to return FALSE, not TRUE, because the OS's 1977 // "final" handler for such events allows the process to continue if 1978 // it is a service (while terminating it if it is not a service). 1979 // To make this behavior uniform and the mechanism simpler, we 1980 // completely disable the VM's usage of these console events if -Xrs 1981 // (=ReduceSignalUsage) is specified. This means, for example, that 1982 // the CTRL-BREAK thread dump mechanism is also disabled in this 1983 // case. See bugs 4323062, 4345157, and related bugs. 1984 1985 if (!ReduceSignalUsage) { 1986 // Add a CTRL-C handler 1987 SetConsoleCtrlHandler(consoleHandler, TRUE); 1988 } 1989 } 1990 1991 void os::signal_notify(int signal_number) { 1992 BOOL ret; 1993 if (sig_sem != NULL) { 1994 Atomic::inc(&pending_signals[signal_number]); 1995 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1996 assert(ret != 0, "ReleaseSemaphore() failed"); 1997 } 1998 } 1999 2000 static int check_pending_signals(bool wait_for_signal) { 2001 DWORD ret; 2002 while (true) { 2003 for (int i = 0; i < NSIG + 1; i++) { 2004 jint n = pending_signals[i]; 2005 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2006 return i; 2007 } 2008 } 2009 if (!wait_for_signal) { 2010 return -1; 2011 } 2012 2013 JavaThread *thread = JavaThread::current(); 2014 2015 ThreadBlockInVM tbivm(thread); 2016 2017 bool threadIsSuspended; 2018 do { 2019 thread->set_suspend_equivalent(); 2020 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2021 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2022 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2023 2024 // were we externally suspended while we were waiting? 2025 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2026 if (threadIsSuspended) { 2027 // The semaphore has been incremented, but while we were waiting 2028 // another thread suspended us. We don't want to continue running 2029 // while suspended because that would surprise the thread that 2030 // suspended us. 2031 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2032 assert(ret != 0, "ReleaseSemaphore() failed"); 2033 2034 thread->java_suspend_self(); 2035 } 2036 } while (threadIsSuspended); 2037 } 2038 } 2039 2040 int os::signal_lookup() { 2041 return check_pending_signals(false); 2042 } 2043 2044 int os::signal_wait() { 2045 return check_pending_signals(true); 2046 } 2047 2048 // Implicit OS exception handling 2049 2050 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2051 address handler) { 2052 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2053 // Save pc in thread 2054 #ifdef _M_AMD64 2055 // Do not blow up if no thread info available. 2056 if (thread) { 2057 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2058 } 2059 // Set pc to handler 2060 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2061 #else 2062 // Do not blow up if no thread info available. 2063 if (thread) { 2064 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2065 } 2066 // Set pc to handler 2067 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2068 #endif 2069 2070 // Continue the execution 2071 return EXCEPTION_CONTINUE_EXECUTION; 2072 } 2073 2074 2075 // Used for PostMortemDump 2076 extern "C" void safepoints(); 2077 extern "C" void find(int x); 2078 extern "C" void events(); 2079 2080 // According to Windows API documentation, an illegal instruction sequence should generate 2081 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2082 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2083 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2084 2085 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2086 2087 // From "Execution Protection in the Windows Operating System" draft 0.35 2088 // Once a system header becomes available, the "real" define should be 2089 // included or copied here. 2090 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2091 2092 // Windows Vista/2008 heap corruption check 2093 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2094 2095 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2096 // C++ compiler contain this error code. Because this is a compiler-generated 2097 // error, the code is not listed in the Win32 API header files. 2098 // The code is actually a cryptic mnemonic device, with the initial "E" 2099 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2100 // ASCII values of "msc". 2101 2102 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2103 2104 #define def_excpt(val) { #val, (val) } 2105 2106 static const struct { char* name; uint number; } exceptlabels[] = { 2107 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2108 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2109 def_excpt(EXCEPTION_BREAKPOINT), 2110 def_excpt(EXCEPTION_SINGLE_STEP), 2111 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2112 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2113 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2114 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2115 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2116 def_excpt(EXCEPTION_FLT_OVERFLOW), 2117 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2118 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2119 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2120 def_excpt(EXCEPTION_INT_OVERFLOW), 2121 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2122 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2123 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2124 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2125 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2126 def_excpt(EXCEPTION_STACK_OVERFLOW), 2127 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2128 def_excpt(EXCEPTION_GUARD_PAGE), 2129 def_excpt(EXCEPTION_INVALID_HANDLE), 2130 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2131 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2132 }; 2133 2134 #undef def_excpt 2135 2136 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2137 uint code = static_cast<uint>(exception_code); 2138 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2139 if (exceptlabels[i].number == code) { 2140 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2141 return buf; 2142 } 2143 } 2144 2145 return NULL; 2146 } 2147 2148 //----------------------------------------------------------------------------- 2149 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2150 // handle exception caused by idiv; should only happen for -MinInt/-1 2151 // (division by zero is handled explicitly) 2152 #ifdef _M_AMD64 2153 PCONTEXT ctx = exceptionInfo->ContextRecord; 2154 address pc = (address)ctx->Rip; 2155 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2156 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2157 if (pc[0] == 0xF7) { 2158 // set correct result values and continue after idiv instruction 2159 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2160 } else { 2161 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2162 } 2163 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2164 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2165 // idiv opcode (0xF7). 2166 ctx->Rdx = (DWORD)0; // remainder 2167 // Continue the execution 2168 #else 2169 PCONTEXT ctx = exceptionInfo->ContextRecord; 2170 address pc = (address)ctx->Eip; 2171 assert(pc[0] == 0xF7, "not an idiv opcode"); 2172 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2173 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2174 // set correct result values and continue after idiv instruction 2175 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2176 ctx->Eax = (DWORD)min_jint; // result 2177 ctx->Edx = (DWORD)0; // remainder 2178 // Continue the execution 2179 #endif 2180 return EXCEPTION_CONTINUE_EXECUTION; 2181 } 2182 2183 //----------------------------------------------------------------------------- 2184 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2185 PCONTEXT ctx = exceptionInfo->ContextRecord; 2186 #ifndef _WIN64 2187 // handle exception caused by native method modifying control word 2188 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2189 2190 switch (exception_code) { 2191 case EXCEPTION_FLT_DENORMAL_OPERAND: 2192 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2193 case EXCEPTION_FLT_INEXACT_RESULT: 2194 case EXCEPTION_FLT_INVALID_OPERATION: 2195 case EXCEPTION_FLT_OVERFLOW: 2196 case EXCEPTION_FLT_STACK_CHECK: 2197 case EXCEPTION_FLT_UNDERFLOW: 2198 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2199 if (fp_control_word != ctx->FloatSave.ControlWord) { 2200 // Restore FPCW and mask out FLT exceptions 2201 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2202 // Mask out pending FLT exceptions 2203 ctx->FloatSave.StatusWord &= 0xffffff00; 2204 return EXCEPTION_CONTINUE_EXECUTION; 2205 } 2206 } 2207 2208 if (prev_uef_handler != NULL) { 2209 // We didn't handle this exception so pass it to the previous 2210 // UnhandledExceptionFilter. 2211 return (prev_uef_handler)(exceptionInfo); 2212 } 2213 #else // !_WIN64 2214 // On Windows, the mxcsr control bits are non-volatile across calls 2215 // See also CR 6192333 2216 // 2217 jint MxCsr = INITIAL_MXCSR; 2218 // we can't use StubRoutines::addr_mxcsr_std() 2219 // because in Win64 mxcsr is not saved there 2220 if (MxCsr != ctx->MxCsr) { 2221 ctx->MxCsr = MxCsr; 2222 return EXCEPTION_CONTINUE_EXECUTION; 2223 } 2224 #endif // !_WIN64 2225 2226 return EXCEPTION_CONTINUE_SEARCH; 2227 } 2228 2229 static inline void report_error(Thread* t, DWORD exception_code, 2230 address addr, void* siginfo, void* context) { 2231 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2232 2233 // If UseOsErrorReporting, this will return here and save the error file 2234 // somewhere where we can find it in the minidump. 2235 } 2236 2237 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2238 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2239 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2240 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2241 if (Interpreter::contains(pc)) { 2242 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2243 if (!fr->is_first_java_frame()) { 2244 // get_frame_at_stack_banging_point() is only called when we 2245 // have well defined stacks so java_sender() calls do not need 2246 // to assert safe_for_sender() first. 2247 *fr = fr->java_sender(); 2248 } 2249 } else { 2250 // more complex code with compiled code 2251 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2252 CodeBlob* cb = CodeCache::find_blob(pc); 2253 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2254 // Not sure where the pc points to, fallback to default 2255 // stack overflow handling 2256 return false; 2257 } else { 2258 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2259 // in compiled code, the stack banging is performed just after the return pc 2260 // has been pushed on the stack 2261 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2262 if (!fr->is_java_frame()) { 2263 // See java_sender() comment above. 2264 *fr = fr->java_sender(); 2265 } 2266 } 2267 } 2268 assert(fr->is_java_frame(), "Safety check"); 2269 return true; 2270 } 2271 2272 //----------------------------------------------------------------------------- 2273 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2274 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2275 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2276 #ifdef _M_AMD64 2277 address pc = (address) exceptionInfo->ContextRecord->Rip; 2278 #else 2279 address pc = (address) exceptionInfo->ContextRecord->Eip; 2280 #endif 2281 Thread* t = Thread::current_or_null_safe(); 2282 2283 // Handle SafeFetch32 and SafeFetchN exceptions. 2284 if (StubRoutines::is_safefetch_fault(pc)) { 2285 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2286 } 2287 2288 #ifndef _WIN64 2289 // Execution protection violation - win32 running on AMD64 only 2290 // Handled first to avoid misdiagnosis as a "normal" access violation; 2291 // This is safe to do because we have a new/unique ExceptionInformation 2292 // code for this condition. 2293 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2294 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2295 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2296 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2297 2298 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2299 int page_size = os::vm_page_size(); 2300 2301 // Make sure the pc and the faulting address are sane. 2302 // 2303 // If an instruction spans a page boundary, and the page containing 2304 // the beginning of the instruction is executable but the following 2305 // page is not, the pc and the faulting address might be slightly 2306 // different - we still want to unguard the 2nd page in this case. 2307 // 2308 // 15 bytes seems to be a (very) safe value for max instruction size. 2309 bool pc_is_near_addr = 2310 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2311 bool instr_spans_page_boundary = 2312 (align_down((intptr_t) pc ^ (intptr_t) addr, 2313 (intptr_t) page_size) > 0); 2314 2315 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2316 static volatile address last_addr = 2317 (address) os::non_memory_address_word(); 2318 2319 // In conservative mode, don't unguard unless the address is in the VM 2320 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2321 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2322 2323 // Set memory to RWX and retry 2324 address page_start = align_down(addr, page_size); 2325 bool res = os::protect_memory((char*) page_start, page_size, 2326 os::MEM_PROT_RWX); 2327 2328 log_debug(os)("Execution protection violation " 2329 "at " INTPTR_FORMAT 2330 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2331 p2i(page_start), (res ? "success" : os::strerror(errno))); 2332 2333 // Set last_addr so if we fault again at the same address, we don't 2334 // end up in an endless loop. 2335 // 2336 // There are two potential complications here. Two threads trapping 2337 // at the same address at the same time could cause one of the 2338 // threads to think it already unguarded, and abort the VM. Likely 2339 // very rare. 2340 // 2341 // The other race involves two threads alternately trapping at 2342 // different addresses and failing to unguard the page, resulting in 2343 // an endless loop. This condition is probably even more unlikely 2344 // than the first. 2345 // 2346 // Although both cases could be avoided by using locks or thread 2347 // local last_addr, these solutions are unnecessary complication: 2348 // this handler is a best-effort safety net, not a complete solution. 2349 // It is disabled by default and should only be used as a workaround 2350 // in case we missed any no-execute-unsafe VM code. 2351 2352 last_addr = addr; 2353 2354 return EXCEPTION_CONTINUE_EXECUTION; 2355 } 2356 } 2357 2358 // Last unguard failed or not unguarding 2359 tty->print_raw_cr("Execution protection violation"); 2360 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2361 exceptionInfo->ContextRecord); 2362 return EXCEPTION_CONTINUE_SEARCH; 2363 } 2364 } 2365 #endif // _WIN64 2366 2367 // Check to see if we caught the safepoint code in the 2368 // process of write protecting the memory serialization page. 2369 // It write enables the page immediately after protecting it 2370 // so just return. 2371 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2372 if (t != NULL && t->is_Java_thread()) { 2373 JavaThread* thread = (JavaThread*) t; 2374 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2375 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2376 if (os::is_memory_serialize_page(thread, addr)) { 2377 // Block current thread until the memory serialize page permission restored. 2378 os::block_on_serialize_page_trap(); 2379 return EXCEPTION_CONTINUE_EXECUTION; 2380 } 2381 } 2382 } 2383 2384 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2385 VM_Version::is_cpuinfo_segv_addr(pc)) { 2386 // Verify that OS save/restore AVX registers. 2387 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2388 } 2389 2390 if (t != NULL && t->is_Java_thread()) { 2391 JavaThread* thread = (JavaThread*) t; 2392 bool in_java = thread->thread_state() == _thread_in_Java; 2393 2394 // Handle potential stack overflows up front. 2395 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2396 if (thread->stack_guards_enabled()) { 2397 if (in_java) { 2398 frame fr; 2399 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2400 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2401 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2402 assert(fr.is_java_frame(), "Must be a Java frame"); 2403 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2404 } 2405 } 2406 // Yellow zone violation. The o/s has unprotected the first yellow 2407 // zone page for us. Note: must call disable_stack_yellow_zone to 2408 // update the enabled status, even if the zone contains only one page. 2409 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2410 thread->disable_stack_yellow_reserved_zone(); 2411 // If not in java code, return and hope for the best. 2412 return in_java 2413 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2414 : EXCEPTION_CONTINUE_EXECUTION; 2415 } else { 2416 // Fatal red zone violation. 2417 thread->disable_stack_red_zone(); 2418 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2419 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2420 exceptionInfo->ContextRecord); 2421 return EXCEPTION_CONTINUE_SEARCH; 2422 } 2423 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2424 // Either stack overflow or null pointer exception. 2425 if (in_java) { 2426 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2427 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2428 address stack_end = thread->stack_end(); 2429 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2430 // Stack overflow. 2431 assert(!os::uses_stack_guard_pages(), 2432 "should be caught by red zone code above."); 2433 return Handle_Exception(exceptionInfo, 2434 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2435 } 2436 // Check for safepoint polling and implicit null 2437 // We only expect null pointers in the stubs (vtable) 2438 // the rest are checked explicitly now. 2439 CodeBlob* cb = CodeCache::find_blob(pc); 2440 if (cb != NULL) { 2441 if (os::is_poll_address(addr)) { 2442 address stub = SharedRuntime::get_poll_stub(pc); 2443 return Handle_Exception(exceptionInfo, stub); 2444 } 2445 } 2446 { 2447 #ifdef _WIN64 2448 // If it's a legal stack address map the entire region in 2449 // 2450 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2451 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2452 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2453 addr = (address)((uintptr_t)addr & 2454 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2455 os::commit_memory((char *)addr, thread->stack_base() - addr, 2456 !ExecMem); 2457 return EXCEPTION_CONTINUE_EXECUTION; 2458 } else 2459 #endif 2460 { 2461 // Null pointer exception. 2462 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2463 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2464 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2465 } 2466 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2467 exceptionInfo->ContextRecord); 2468 return EXCEPTION_CONTINUE_SEARCH; 2469 } 2470 } 2471 } 2472 2473 #ifdef _WIN64 2474 // Special care for fast JNI field accessors. 2475 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2476 // in and the heap gets shrunk before the field access. 2477 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2478 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2479 if (addr != (address)-1) { 2480 return Handle_Exception(exceptionInfo, addr); 2481 } 2482 } 2483 #endif 2484 2485 // Stack overflow or null pointer exception in native code. 2486 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2487 exceptionInfo->ContextRecord); 2488 return EXCEPTION_CONTINUE_SEARCH; 2489 } // /EXCEPTION_ACCESS_VIOLATION 2490 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2491 2492 if (in_java) { 2493 switch (exception_code) { 2494 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2495 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2496 2497 case EXCEPTION_INT_OVERFLOW: 2498 return Handle_IDiv_Exception(exceptionInfo); 2499 2500 } // switch 2501 } 2502 if (((thread->thread_state() == _thread_in_Java) || 2503 (thread->thread_state() == _thread_in_native)) && 2504 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2505 LONG result=Handle_FLT_Exception(exceptionInfo); 2506 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2507 } 2508 } 2509 2510 if (exception_code != EXCEPTION_BREAKPOINT) { 2511 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2512 exceptionInfo->ContextRecord); 2513 } 2514 return EXCEPTION_CONTINUE_SEARCH; 2515 } 2516 2517 #ifndef _WIN64 2518 // Special care for fast JNI accessors. 2519 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2520 // the heap gets shrunk before the field access. 2521 // Need to install our own structured exception handler since native code may 2522 // install its own. 2523 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2524 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2525 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2526 address pc = (address) exceptionInfo->ContextRecord->Eip; 2527 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2528 if (addr != (address)-1) { 2529 return Handle_Exception(exceptionInfo, addr); 2530 } 2531 } 2532 return EXCEPTION_CONTINUE_SEARCH; 2533 } 2534 2535 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2536 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2537 jobject obj, \ 2538 jfieldID fieldID) { \ 2539 __try { \ 2540 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2541 obj, \ 2542 fieldID); \ 2543 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2544 _exception_info())) { \ 2545 } \ 2546 return 0; \ 2547 } 2548 2549 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2550 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2551 DEFINE_FAST_GETFIELD(jchar, char, Char) 2552 DEFINE_FAST_GETFIELD(jshort, short, Short) 2553 DEFINE_FAST_GETFIELD(jint, int, Int) 2554 DEFINE_FAST_GETFIELD(jlong, long, Long) 2555 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2556 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2557 2558 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2559 switch (type) { 2560 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2561 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2562 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2563 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2564 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2565 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2566 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2567 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2568 default: ShouldNotReachHere(); 2569 } 2570 return (address)-1; 2571 } 2572 #endif 2573 2574 // Virtual Memory 2575 2576 int os::vm_page_size() { return os::win32::vm_page_size(); } 2577 int os::vm_allocation_granularity() { 2578 return os::win32::vm_allocation_granularity(); 2579 } 2580 2581 // Windows large page support is available on Windows 2003. In order to use 2582 // large page memory, the administrator must first assign additional privilege 2583 // to the user: 2584 // + select Control Panel -> Administrative Tools -> Local Security Policy 2585 // + select Local Policies -> User Rights Assignment 2586 // + double click "Lock pages in memory", add users and/or groups 2587 // + reboot 2588 // Note the above steps are needed for administrator as well, as administrators 2589 // by default do not have the privilege to lock pages in memory. 2590 // 2591 // Note about Windows 2003: although the API supports committing large page 2592 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2593 // scenario, I found through experiment it only uses large page if the entire 2594 // memory region is reserved and committed in a single VirtualAlloc() call. 2595 // This makes Windows large page support more or less like Solaris ISM, in 2596 // that the entire heap must be committed upfront. This probably will change 2597 // in the future, if so the code below needs to be revisited. 2598 2599 #ifndef MEM_LARGE_PAGES 2600 #define MEM_LARGE_PAGES 0x20000000 2601 #endif 2602 2603 static HANDLE _hProcess; 2604 static HANDLE _hToken; 2605 2606 // Container for NUMA node list info 2607 class NUMANodeListHolder { 2608 private: 2609 int *_numa_used_node_list; // allocated below 2610 int _numa_used_node_count; 2611 2612 void free_node_list() { 2613 if (_numa_used_node_list != NULL) { 2614 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2615 } 2616 } 2617 2618 public: 2619 NUMANodeListHolder() { 2620 _numa_used_node_count = 0; 2621 _numa_used_node_list = NULL; 2622 // do rest of initialization in build routine (after function pointers are set up) 2623 } 2624 2625 ~NUMANodeListHolder() { 2626 free_node_list(); 2627 } 2628 2629 bool build() { 2630 DWORD_PTR proc_aff_mask; 2631 DWORD_PTR sys_aff_mask; 2632 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2633 ULONG highest_node_number; 2634 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2635 free_node_list(); 2636 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2637 for (unsigned int i = 0; i <= highest_node_number; i++) { 2638 ULONGLONG proc_mask_numa_node; 2639 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2640 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2641 _numa_used_node_list[_numa_used_node_count++] = i; 2642 } 2643 } 2644 return (_numa_used_node_count > 1); 2645 } 2646 2647 int get_count() { return _numa_used_node_count; } 2648 int get_node_list_entry(int n) { 2649 // for indexes out of range, returns -1 2650 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2651 } 2652 2653 } numa_node_list_holder; 2654 2655 2656 2657 static size_t _large_page_size = 0; 2658 2659 static bool request_lock_memory_privilege() { 2660 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2661 os::current_process_id()); 2662 2663 LUID luid; 2664 if (_hProcess != NULL && 2665 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2666 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2667 2668 TOKEN_PRIVILEGES tp; 2669 tp.PrivilegeCount = 1; 2670 tp.Privileges[0].Luid = luid; 2671 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2672 2673 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2674 // privilege. Check GetLastError() too. See MSDN document. 2675 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2676 (GetLastError() == ERROR_SUCCESS)) { 2677 return true; 2678 } 2679 } 2680 2681 return false; 2682 } 2683 2684 static void cleanup_after_large_page_init() { 2685 if (_hProcess) CloseHandle(_hProcess); 2686 _hProcess = NULL; 2687 if (_hToken) CloseHandle(_hToken); 2688 _hToken = NULL; 2689 } 2690 2691 static bool numa_interleaving_init() { 2692 bool success = false; 2693 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2694 2695 // print a warning if UseNUMAInterleaving flag is specified on command line 2696 bool warn_on_failure = use_numa_interleaving_specified; 2697 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2698 2699 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2700 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2701 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2702 2703 if (numa_node_list_holder.build()) { 2704 if (log_is_enabled(Debug, os, cpu)) { 2705 Log(os, cpu) log; 2706 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2707 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2708 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2709 } 2710 } 2711 success = true; 2712 } else { 2713 WARN("Process does not cover multiple NUMA nodes."); 2714 } 2715 if (!success) { 2716 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2717 } 2718 return success; 2719 #undef WARN 2720 } 2721 2722 // this routine is used whenever we need to reserve a contiguous VA range 2723 // but we need to make separate VirtualAlloc calls for each piece of the range 2724 // Reasons for doing this: 2725 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2726 // * UseNUMAInterleaving requires a separate node for each piece 2727 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2728 DWORD prot, 2729 bool should_inject_error = false) { 2730 char * p_buf; 2731 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2732 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2733 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2734 2735 // first reserve enough address space in advance since we want to be 2736 // able to break a single contiguous virtual address range into multiple 2737 // large page commits but WS2003 does not allow reserving large page space 2738 // so we just use 4K pages for reserve, this gives us a legal contiguous 2739 // address space. then we will deallocate that reservation, and re alloc 2740 // using large pages 2741 const size_t size_of_reserve = bytes + chunk_size; 2742 if (bytes > size_of_reserve) { 2743 // Overflowed. 2744 return NULL; 2745 } 2746 p_buf = (char *) VirtualAlloc(addr, 2747 size_of_reserve, // size of Reserve 2748 MEM_RESERVE, 2749 PAGE_READWRITE); 2750 // If reservation failed, return NULL 2751 if (p_buf == NULL) return NULL; 2752 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2753 os::release_memory(p_buf, bytes + chunk_size); 2754 2755 // we still need to round up to a page boundary (in case we are using large pages) 2756 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2757 // instead we handle this in the bytes_to_rq computation below 2758 p_buf = align_up(p_buf, page_size); 2759 2760 // now go through and allocate one chunk at a time until all bytes are 2761 // allocated 2762 size_t bytes_remaining = bytes; 2763 // An overflow of align_up() would have been caught above 2764 // in the calculation of size_of_reserve. 2765 char * next_alloc_addr = p_buf; 2766 HANDLE hProc = GetCurrentProcess(); 2767 2768 #ifdef ASSERT 2769 // Variable for the failure injection 2770 int ran_num = os::random(); 2771 size_t fail_after = ran_num % bytes; 2772 #endif 2773 2774 int count=0; 2775 while (bytes_remaining) { 2776 // select bytes_to_rq to get to the next chunk_size boundary 2777 2778 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2779 // Note allocate and commit 2780 char * p_new; 2781 2782 #ifdef ASSERT 2783 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2784 #else 2785 const bool inject_error_now = false; 2786 #endif 2787 2788 if (inject_error_now) { 2789 p_new = NULL; 2790 } else { 2791 if (!UseNUMAInterleaving) { 2792 p_new = (char *) VirtualAlloc(next_alloc_addr, 2793 bytes_to_rq, 2794 flags, 2795 prot); 2796 } else { 2797 // get the next node to use from the used_node_list 2798 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2799 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2800 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2801 } 2802 } 2803 2804 if (p_new == NULL) { 2805 // Free any allocated pages 2806 if (next_alloc_addr > p_buf) { 2807 // Some memory was committed so release it. 2808 size_t bytes_to_release = bytes - bytes_remaining; 2809 // NMT has yet to record any individual blocks, so it 2810 // need to create a dummy 'reserve' record to match 2811 // the release. 2812 MemTracker::record_virtual_memory_reserve((address)p_buf, 2813 bytes_to_release, CALLER_PC); 2814 os::release_memory(p_buf, bytes_to_release); 2815 } 2816 #ifdef ASSERT 2817 if (should_inject_error) { 2818 log_develop_debug(pagesize)("Reserving pages individually failed."); 2819 } 2820 #endif 2821 return NULL; 2822 } 2823 2824 bytes_remaining -= bytes_to_rq; 2825 next_alloc_addr += bytes_to_rq; 2826 count++; 2827 } 2828 // Although the memory is allocated individually, it is returned as one. 2829 // NMT records it as one block. 2830 if ((flags & MEM_COMMIT) != 0) { 2831 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2832 } else { 2833 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2834 } 2835 2836 // made it this far, success 2837 return p_buf; 2838 } 2839 2840 2841 2842 void os::large_page_init() { 2843 if (!UseLargePages) return; 2844 2845 // print a warning if any large page related flag is specified on command line 2846 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2847 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2848 bool success = false; 2849 2850 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2851 if (request_lock_memory_privilege()) { 2852 size_t s = GetLargePageMinimum(); 2853 if (s) { 2854 #if defined(IA32) || defined(AMD64) 2855 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2856 WARN("JVM cannot use large pages bigger than 4mb."); 2857 } else { 2858 #endif 2859 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2860 _large_page_size = LargePageSizeInBytes; 2861 } else { 2862 _large_page_size = s; 2863 } 2864 success = true; 2865 #if defined(IA32) || defined(AMD64) 2866 } 2867 #endif 2868 } else { 2869 WARN("Large page is not supported by the processor."); 2870 } 2871 } else { 2872 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2873 } 2874 #undef WARN 2875 2876 const size_t default_page_size = (size_t) vm_page_size(); 2877 if (success && _large_page_size > default_page_size) { 2878 _page_sizes[0] = _large_page_size; 2879 _page_sizes[1] = default_page_size; 2880 _page_sizes[2] = 0; 2881 } 2882 2883 cleanup_after_large_page_init(); 2884 UseLargePages = success; 2885 } 2886 2887 // On win32, one cannot release just a part of reserved memory, it's an 2888 // all or nothing deal. When we split a reservation, we must break the 2889 // reservation into two reservations. 2890 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2891 bool realloc) { 2892 if (size > 0) { 2893 release_memory(base, size); 2894 if (realloc) { 2895 reserve_memory(split, base); 2896 } 2897 if (size != split) { 2898 reserve_memory(size - split, base + split); 2899 } 2900 } 2901 } 2902 2903 // Multiple threads can race in this code but it's not possible to unmap small sections of 2904 // virtual space to get requested alignment, like posix-like os's. 2905 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2906 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 2907 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 2908 "Alignment must be a multiple of allocation granularity (page size)"); 2909 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 2910 2911 size_t extra_size = size + alignment; 2912 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 2913 2914 char* aligned_base = NULL; 2915 2916 do { 2917 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 2918 if (extra_base == NULL) { 2919 return NULL; 2920 } 2921 // Do manual alignment 2922 aligned_base = align_up(extra_base, alignment); 2923 2924 os::release_memory(extra_base, extra_size); 2925 2926 aligned_base = os::reserve_memory(size, aligned_base); 2927 2928 } while (aligned_base == NULL); 2929 2930 return aligned_base; 2931 } 2932 2933 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 2934 assert((size_t)addr % os::vm_allocation_granularity() == 0, 2935 "reserve alignment"); 2936 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 2937 char* res; 2938 // note that if UseLargePages is on, all the areas that require interleaving 2939 // will go thru reserve_memory_special rather than thru here. 2940 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 2941 if (!use_individual) { 2942 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 2943 } else { 2944 elapsedTimer reserveTimer; 2945 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 2946 // in numa interleaving, we have to allocate pages individually 2947 // (well really chunks of NUMAInterleaveGranularity size) 2948 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 2949 if (res == NULL) { 2950 warning("NUMA page allocation failed"); 2951 } 2952 if (Verbose && PrintMiscellaneous) { 2953 reserveTimer.stop(); 2954 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 2955 reserveTimer.milliseconds(), reserveTimer.ticks()); 2956 } 2957 } 2958 assert(res == NULL || addr == NULL || addr == res, 2959 "Unexpected address from reserve."); 2960 2961 return res; 2962 } 2963 2964 // Reserve memory at an arbitrary address, only if that area is 2965 // available (and not reserved for something else). 2966 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2967 // Windows os::reserve_memory() fails of the requested address range is 2968 // not avilable. 2969 return reserve_memory(bytes, requested_addr); 2970 } 2971 2972 size_t os::large_page_size() { 2973 return _large_page_size; 2974 } 2975 2976 bool os::can_commit_large_page_memory() { 2977 // Windows only uses large page memory when the entire region is reserved 2978 // and committed in a single VirtualAlloc() call. This may change in the 2979 // future, but with Windows 2003 it's not possible to commit on demand. 2980 return false; 2981 } 2982 2983 bool os::can_execute_large_page_memory() { 2984 return true; 2985 } 2986 2987 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 2988 bool exec) { 2989 assert(UseLargePages, "only for large pages"); 2990 2991 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 2992 return NULL; // Fallback to small pages. 2993 } 2994 2995 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 2996 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 2997 2998 // with large pages, there are two cases where we need to use Individual Allocation 2999 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3000 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3001 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3002 log_debug(pagesize)("Reserving large pages individually."); 3003 3004 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3005 if (p_buf == NULL) { 3006 // give an appropriate warning message 3007 if (UseNUMAInterleaving) { 3008 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3009 } 3010 if (UseLargePagesIndividualAllocation) { 3011 warning("Individually allocated large pages failed, " 3012 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3013 } 3014 return NULL; 3015 } 3016 3017 return p_buf; 3018 3019 } else { 3020 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3021 3022 // normal policy just allocate it all at once 3023 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3024 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3025 if (res != NULL) { 3026 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3027 } 3028 3029 return res; 3030 } 3031 } 3032 3033 bool os::release_memory_special(char* base, size_t bytes) { 3034 assert(base != NULL, "Sanity check"); 3035 return release_memory(base, bytes); 3036 } 3037 3038 void os::print_statistics() { 3039 } 3040 3041 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3042 int err = os::get_last_error(); 3043 char buf[256]; 3044 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3045 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3046 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3047 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3048 } 3049 3050 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3051 if (bytes == 0) { 3052 // Don't bother the OS with noops. 3053 return true; 3054 } 3055 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3056 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3057 // Don't attempt to print anything if the OS call fails. We're 3058 // probably low on resources, so the print itself may cause crashes. 3059 3060 // unless we have NUMAInterleaving enabled, the range of a commit 3061 // is always within a reserve covered by a single VirtualAlloc 3062 // in that case we can just do a single commit for the requested size 3063 if (!UseNUMAInterleaving) { 3064 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3065 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3066 return false; 3067 } 3068 if (exec) { 3069 DWORD oldprot; 3070 // Windows doc says to use VirtualProtect to get execute permissions 3071 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3072 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3073 return false; 3074 } 3075 } 3076 return true; 3077 } else { 3078 3079 // when NUMAInterleaving is enabled, the commit might cover a range that 3080 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3081 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3082 // returns represents the number of bytes that can be committed in one step. 3083 size_t bytes_remaining = bytes; 3084 char * next_alloc_addr = addr; 3085 while (bytes_remaining > 0) { 3086 MEMORY_BASIC_INFORMATION alloc_info; 3087 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3088 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3089 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3090 PAGE_READWRITE) == NULL) { 3091 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3092 exec);) 3093 return false; 3094 } 3095 if (exec) { 3096 DWORD oldprot; 3097 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3098 PAGE_EXECUTE_READWRITE, &oldprot)) { 3099 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3100 exec);) 3101 return false; 3102 } 3103 } 3104 bytes_remaining -= bytes_to_rq; 3105 next_alloc_addr += bytes_to_rq; 3106 } 3107 } 3108 // if we made it this far, return true 3109 return true; 3110 } 3111 3112 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3113 bool exec) { 3114 // alignment_hint is ignored on this OS 3115 return pd_commit_memory(addr, size, exec); 3116 } 3117 3118 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3119 const char* mesg) { 3120 assert(mesg != NULL, "mesg must be specified"); 3121 if (!pd_commit_memory(addr, size, exec)) { 3122 warn_fail_commit_memory(addr, size, exec); 3123 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3124 } 3125 } 3126 3127 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3128 size_t alignment_hint, bool exec, 3129 const char* mesg) { 3130 // alignment_hint is ignored on this OS 3131 pd_commit_memory_or_exit(addr, size, exec, mesg); 3132 } 3133 3134 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3135 if (bytes == 0) { 3136 // Don't bother the OS with noops. 3137 return true; 3138 } 3139 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3140 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3141 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3142 } 3143 3144 bool os::pd_release_memory(char* addr, size_t bytes) { 3145 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3146 } 3147 3148 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3149 return os::commit_memory(addr, size, !ExecMem); 3150 } 3151 3152 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3153 return os::uncommit_memory(addr, size); 3154 } 3155 3156 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3157 uint count = 0; 3158 bool ret = false; 3159 size_t bytes_remaining = bytes; 3160 char * next_protect_addr = addr; 3161 3162 // Use VirtualQuery() to get the chunk size. 3163 while (bytes_remaining) { 3164 MEMORY_BASIC_INFORMATION alloc_info; 3165 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3166 return false; 3167 } 3168 3169 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3170 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3171 // but we don't distinguish here as both cases are protected by same API. 3172 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3173 warning("Failed protecting pages individually for chunk #%u", count); 3174 if (!ret) { 3175 return false; 3176 } 3177 3178 bytes_remaining -= bytes_to_protect; 3179 next_protect_addr += bytes_to_protect; 3180 count++; 3181 } 3182 return ret; 3183 } 3184 3185 // Set protections specified 3186 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3187 bool is_committed) { 3188 unsigned int p = 0; 3189 switch (prot) { 3190 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3191 case MEM_PROT_READ: p = PAGE_READONLY; break; 3192 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3193 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3194 default: 3195 ShouldNotReachHere(); 3196 } 3197 3198 DWORD old_status; 3199 3200 // Strange enough, but on Win32 one can change protection only for committed 3201 // memory, not a big deal anyway, as bytes less or equal than 64K 3202 if (!is_committed) { 3203 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3204 "cannot commit protection page"); 3205 } 3206 // One cannot use os::guard_memory() here, as on Win32 guard page 3207 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3208 // 3209 // Pages in the region become guard pages. Any attempt to access a guard page 3210 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3211 // the guard page status. Guard pages thus act as a one-time access alarm. 3212 bool ret; 3213 if (UseNUMAInterleaving) { 3214 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3215 // so we must protect the chunks individually. 3216 ret = protect_pages_individually(addr, bytes, p, &old_status); 3217 } else { 3218 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3219 } 3220 #ifdef ASSERT 3221 if (!ret) { 3222 int err = os::get_last_error(); 3223 char buf[256]; 3224 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3225 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3226 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3227 buf_len != 0 ? buf : "<no_error_string>", err); 3228 } 3229 #endif 3230 return ret; 3231 } 3232 3233 bool os::guard_memory(char* addr, size_t bytes) { 3234 DWORD old_status; 3235 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3236 } 3237 3238 bool os::unguard_memory(char* addr, size_t bytes) { 3239 DWORD old_status; 3240 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3241 } 3242 3243 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3244 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3245 void os::numa_make_global(char *addr, size_t bytes) { } 3246 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3247 bool os::numa_topology_changed() { return false; } 3248 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3249 int os::numa_get_group_id() { return 0; } 3250 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3251 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3252 // Provide an answer for UMA systems 3253 ids[0] = 0; 3254 return 1; 3255 } else { 3256 // check for size bigger than actual groups_num 3257 size = MIN2(size, numa_get_groups_num()); 3258 for (int i = 0; i < (int)size; i++) { 3259 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3260 } 3261 return size; 3262 } 3263 } 3264 3265 bool os::get_page_info(char *start, page_info* info) { 3266 return false; 3267 } 3268 3269 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3270 page_info* page_found) { 3271 return end; 3272 } 3273 3274 char* os::non_memory_address_word() { 3275 // Must never look like an address returned by reserve_memory, 3276 // even in its subfields (as defined by the CPU immediate fields, 3277 // if the CPU splits constants across multiple instructions). 3278 return (char*)-1; 3279 } 3280 3281 #define MAX_ERROR_COUNT 100 3282 #define SYS_THREAD_ERROR 0xffffffffUL 3283 3284 void os::pd_start_thread(Thread* thread) { 3285 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3286 // Returns previous suspend state: 3287 // 0: Thread was not suspended 3288 // 1: Thread is running now 3289 // >1: Thread is still suspended. 3290 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3291 } 3292 3293 class HighResolutionInterval : public CHeapObj<mtThread> { 3294 // The default timer resolution seems to be 10 milliseconds. 3295 // (Where is this written down?) 3296 // If someone wants to sleep for only a fraction of the default, 3297 // then we set the timer resolution down to 1 millisecond for 3298 // the duration of their interval. 3299 // We carefully set the resolution back, since otherwise we 3300 // seem to incur an overhead (3%?) that we don't need. 3301 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3302 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3303 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3304 // timeBeginPeriod() if the relative error exceeded some threshold. 3305 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3306 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3307 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3308 // resolution timers running. 3309 private: 3310 jlong resolution; 3311 public: 3312 HighResolutionInterval(jlong ms) { 3313 resolution = ms % 10L; 3314 if (resolution != 0) { 3315 MMRESULT result = timeBeginPeriod(1L); 3316 } 3317 } 3318 ~HighResolutionInterval() { 3319 if (resolution != 0) { 3320 MMRESULT result = timeEndPeriod(1L); 3321 } 3322 resolution = 0L; 3323 } 3324 }; 3325 3326 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3327 jlong limit = (jlong) MAXDWORD; 3328 3329 while (ms > limit) { 3330 int res; 3331 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3332 return res; 3333 } 3334 ms -= limit; 3335 } 3336 3337 assert(thread == Thread::current(), "thread consistency check"); 3338 OSThread* osthread = thread->osthread(); 3339 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3340 int result; 3341 if (interruptable) { 3342 assert(thread->is_Java_thread(), "must be java thread"); 3343 JavaThread *jt = (JavaThread *) thread; 3344 ThreadBlockInVM tbivm(jt); 3345 3346 jt->set_suspend_equivalent(); 3347 // cleared by handle_special_suspend_equivalent_condition() or 3348 // java_suspend_self() via check_and_wait_while_suspended() 3349 3350 HANDLE events[1]; 3351 events[0] = osthread->interrupt_event(); 3352 HighResolutionInterval *phri=NULL; 3353 if (!ForceTimeHighResolution) { 3354 phri = new HighResolutionInterval(ms); 3355 } 3356 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3357 result = OS_TIMEOUT; 3358 } else { 3359 ResetEvent(osthread->interrupt_event()); 3360 osthread->set_interrupted(false); 3361 result = OS_INTRPT; 3362 } 3363 delete phri; //if it is NULL, harmless 3364 3365 // were we externally suspended while we were waiting? 3366 jt->check_and_wait_while_suspended(); 3367 } else { 3368 assert(!thread->is_Java_thread(), "must not be java thread"); 3369 Sleep((long) ms); 3370 result = OS_TIMEOUT; 3371 } 3372 return result; 3373 } 3374 3375 // Short sleep, direct OS call. 3376 // 3377 // ms = 0, means allow others (if any) to run. 3378 // 3379 void os::naked_short_sleep(jlong ms) { 3380 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3381 Sleep(ms); 3382 } 3383 3384 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3385 void os::infinite_sleep() { 3386 while (true) { // sleep forever ... 3387 Sleep(100000); // ... 100 seconds at a time 3388 } 3389 } 3390 3391 typedef BOOL (WINAPI * STTSignature)(void); 3392 3393 void os::naked_yield() { 3394 // Consider passing back the return value from SwitchToThread(). 3395 SwitchToThread(); 3396 } 3397 3398 // Win32 only gives you access to seven real priorities at a time, 3399 // so we compress Java's ten down to seven. It would be better 3400 // if we dynamically adjusted relative priorities. 3401 3402 int os::java_to_os_priority[CriticalPriority + 1] = { 3403 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3404 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3405 THREAD_PRIORITY_LOWEST, // 2 3406 THREAD_PRIORITY_BELOW_NORMAL, // 3 3407 THREAD_PRIORITY_BELOW_NORMAL, // 4 3408 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3409 THREAD_PRIORITY_NORMAL, // 6 3410 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3411 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3412 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3413 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3414 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3415 }; 3416 3417 int prio_policy1[CriticalPriority + 1] = { 3418 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3419 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3420 THREAD_PRIORITY_LOWEST, // 2 3421 THREAD_PRIORITY_BELOW_NORMAL, // 3 3422 THREAD_PRIORITY_BELOW_NORMAL, // 4 3423 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3424 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3425 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3426 THREAD_PRIORITY_HIGHEST, // 8 3427 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3428 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3429 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3430 }; 3431 3432 static int prio_init() { 3433 // If ThreadPriorityPolicy is 1, switch tables 3434 if (ThreadPriorityPolicy == 1) { 3435 int i; 3436 for (i = 0; i < CriticalPriority + 1; i++) { 3437 os::java_to_os_priority[i] = prio_policy1[i]; 3438 } 3439 } 3440 if (UseCriticalJavaThreadPriority) { 3441 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3442 } 3443 return 0; 3444 } 3445 3446 OSReturn os::set_native_priority(Thread* thread, int priority) { 3447 if (!UseThreadPriorities) return OS_OK; 3448 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3449 return ret ? OS_OK : OS_ERR; 3450 } 3451 3452 OSReturn os::get_native_priority(const Thread* const thread, 3453 int* priority_ptr) { 3454 if (!UseThreadPriorities) { 3455 *priority_ptr = java_to_os_priority[NormPriority]; 3456 return OS_OK; 3457 } 3458 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3459 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3460 assert(false, "GetThreadPriority failed"); 3461 return OS_ERR; 3462 } 3463 *priority_ptr = os_prio; 3464 return OS_OK; 3465 } 3466 3467 3468 // Hint to the underlying OS that a task switch would not be good. 3469 // Void return because it's a hint and can fail. 3470 void os::hint_no_preempt() {} 3471 3472 void os::interrupt(Thread* thread) { 3473 assert(!thread->is_Java_thread() || Thread::current() == thread || 3474 Threads_lock->owned_by_self(), 3475 "possibility of dangling Thread pointer"); 3476 3477 OSThread* osthread = thread->osthread(); 3478 osthread->set_interrupted(true); 3479 // More than one thread can get here with the same value of osthread, 3480 // resulting in multiple notifications. We do, however, want the store 3481 // to interrupted() to be visible to other threads before we post 3482 // the interrupt event. 3483 OrderAccess::release(); 3484 SetEvent(osthread->interrupt_event()); 3485 // For JSR166: unpark after setting status 3486 if (thread->is_Java_thread()) { 3487 ((JavaThread*)thread)->parker()->unpark(); 3488 } 3489 3490 ParkEvent * ev = thread->_ParkEvent; 3491 if (ev != NULL) ev->unpark(); 3492 } 3493 3494 3495 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3496 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3497 "possibility of dangling Thread pointer"); 3498 3499 OSThread* osthread = thread->osthread(); 3500 // There is no synchronization between the setting of the interrupt 3501 // and it being cleared here. It is critical - see 6535709 - that 3502 // we only clear the interrupt state, and reset the interrupt event, 3503 // if we are going to report that we were indeed interrupted - else 3504 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3505 // depending on the timing. By checking thread interrupt event to see 3506 // if the thread gets real interrupt thus prevent spurious wakeup. 3507 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3508 if (interrupted && clear_interrupted) { 3509 osthread->set_interrupted(false); 3510 ResetEvent(osthread->interrupt_event()); 3511 } // Otherwise leave the interrupted state alone 3512 3513 return interrupted; 3514 } 3515 3516 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3517 ExtendedPC os::get_thread_pc(Thread* thread) { 3518 CONTEXT context; 3519 context.ContextFlags = CONTEXT_CONTROL; 3520 HANDLE handle = thread->osthread()->thread_handle(); 3521 if (GetThreadContext(handle, &context)) { 3522 #ifdef _M_AMD64 3523 return ExtendedPC((address) context.Rip); 3524 #else 3525 return ExtendedPC((address) context.Eip); 3526 #endif 3527 } else { 3528 return ExtendedPC(NULL); 3529 } 3530 } 3531 3532 // GetCurrentThreadId() returns DWORD 3533 intx os::current_thread_id() { return GetCurrentThreadId(); } 3534 3535 static int _initial_pid = 0; 3536 3537 int os::current_process_id() { 3538 return (_initial_pid ? _initial_pid : _getpid()); 3539 } 3540 3541 int os::win32::_vm_page_size = 0; 3542 int os::win32::_vm_allocation_granularity = 0; 3543 int os::win32::_processor_type = 0; 3544 // Processor level is not available on non-NT systems, use vm_version instead 3545 int os::win32::_processor_level = 0; 3546 julong os::win32::_physical_memory = 0; 3547 size_t os::win32::_default_stack_size = 0; 3548 3549 intx os::win32::_os_thread_limit = 0; 3550 volatile intx os::win32::_os_thread_count = 0; 3551 3552 bool os::win32::_is_windows_server = false; 3553 3554 // 6573254 3555 // Currently, the bug is observed across all the supported Windows releases, 3556 // including the latest one (as of this writing - Windows Server 2012 R2) 3557 bool os::win32::_has_exit_bug = true; 3558 3559 void os::win32::initialize_system_info() { 3560 SYSTEM_INFO si; 3561 GetSystemInfo(&si); 3562 _vm_page_size = si.dwPageSize; 3563 _vm_allocation_granularity = si.dwAllocationGranularity; 3564 _processor_type = si.dwProcessorType; 3565 _processor_level = si.wProcessorLevel; 3566 set_processor_count(si.dwNumberOfProcessors); 3567 3568 MEMORYSTATUSEX ms; 3569 ms.dwLength = sizeof(ms); 3570 3571 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3572 // dwMemoryLoad (% of memory in use) 3573 GlobalMemoryStatusEx(&ms); 3574 _physical_memory = ms.ullTotalPhys; 3575 3576 if (FLAG_IS_DEFAULT(MaxRAM)) { 3577 // Adjust MaxRAM according to the maximum virtual address space available. 3578 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3579 } 3580 3581 OSVERSIONINFOEX oi; 3582 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3583 GetVersionEx((OSVERSIONINFO*)&oi); 3584 switch (oi.dwPlatformId) { 3585 case VER_PLATFORM_WIN32_NT: 3586 { 3587 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3588 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3589 oi.wProductType == VER_NT_SERVER) { 3590 _is_windows_server = true; 3591 } 3592 } 3593 break; 3594 default: fatal("Unknown platform"); 3595 } 3596 3597 _default_stack_size = os::current_stack_size(); 3598 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3599 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3600 "stack size not a multiple of page size"); 3601 3602 initialize_performance_counter(); 3603 } 3604 3605 3606 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3607 int ebuflen) { 3608 char path[MAX_PATH]; 3609 DWORD size; 3610 DWORD pathLen = (DWORD)sizeof(path); 3611 HINSTANCE result = NULL; 3612 3613 // only allow library name without path component 3614 assert(strchr(name, '\\') == NULL, "path not allowed"); 3615 assert(strchr(name, ':') == NULL, "path not allowed"); 3616 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3617 jio_snprintf(ebuf, ebuflen, 3618 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3619 return NULL; 3620 } 3621 3622 // search system directory 3623 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3624 if (size >= pathLen) { 3625 return NULL; // truncated 3626 } 3627 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3628 return NULL; // truncated 3629 } 3630 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3631 return result; 3632 } 3633 } 3634 3635 // try Windows directory 3636 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3637 if (size >= pathLen) { 3638 return NULL; // truncated 3639 } 3640 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3641 return NULL; // truncated 3642 } 3643 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3644 return result; 3645 } 3646 } 3647 3648 jio_snprintf(ebuf, ebuflen, 3649 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3650 return NULL; 3651 } 3652 3653 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3654 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3655 3656 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3657 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3658 return TRUE; 3659 } 3660 3661 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3662 // Basic approach: 3663 // - Each exiting thread registers its intent to exit and then does so. 3664 // - A thread trying to terminate the process must wait for all 3665 // threads currently exiting to complete their exit. 3666 3667 if (os::win32::has_exit_bug()) { 3668 // The array holds handles of the threads that have started exiting by calling 3669 // _endthreadex(). 3670 // Should be large enough to avoid blocking the exiting thread due to lack of 3671 // a free slot. 3672 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3673 static int handle_count = 0; 3674 3675 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3676 static CRITICAL_SECTION crit_sect; 3677 static volatile jint process_exiting = 0; 3678 int i, j; 3679 DWORD res; 3680 HANDLE hproc, hthr; 3681 3682 // We only attempt to register threads until a process exiting 3683 // thread manages to set the process_exiting flag. Any threads 3684 // that come through here after the process_exiting flag is set 3685 // are unregistered and will be caught in the SuspendThread() 3686 // infinite loop below. 3687 bool registered = false; 3688 3689 // The first thread that reached this point, initializes the critical section. 3690 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3691 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3692 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3693 if (what != EPT_THREAD) { 3694 // Atomically set process_exiting before the critical section 3695 // to increase the visibility between racing threads. 3696 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3697 } 3698 EnterCriticalSection(&crit_sect); 3699 3700 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3701 // Remove from the array those handles of the threads that have completed exiting. 3702 for (i = 0, j = 0; i < handle_count; ++i) { 3703 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3704 if (res == WAIT_TIMEOUT) { 3705 handles[j++] = handles[i]; 3706 } else { 3707 if (res == WAIT_FAILED) { 3708 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3709 GetLastError(), __FILE__, __LINE__); 3710 } 3711 // Don't keep the handle, if we failed waiting for it. 3712 CloseHandle(handles[i]); 3713 } 3714 } 3715 3716 // If there's no free slot in the array of the kept handles, we'll have to 3717 // wait until at least one thread completes exiting. 3718 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3719 // Raise the priority of the oldest exiting thread to increase its chances 3720 // to complete sooner. 3721 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3722 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3723 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3724 i = (res - WAIT_OBJECT_0); 3725 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3726 for (; i < handle_count; ++i) { 3727 handles[i] = handles[i + 1]; 3728 } 3729 } else { 3730 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3731 (res == WAIT_FAILED ? "failed" : "timed out"), 3732 GetLastError(), __FILE__, __LINE__); 3733 // Don't keep handles, if we failed waiting for them. 3734 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3735 CloseHandle(handles[i]); 3736 } 3737 handle_count = 0; 3738 } 3739 } 3740 3741 // Store a duplicate of the current thread handle in the array of handles. 3742 hproc = GetCurrentProcess(); 3743 hthr = GetCurrentThread(); 3744 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3745 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3746 warning("DuplicateHandle failed (%u) in %s: %d\n", 3747 GetLastError(), __FILE__, __LINE__); 3748 3749 // We can't register this thread (no more handles) so this thread 3750 // may be racing with a thread that is calling exit(). If the thread 3751 // that is calling exit() has managed to set the process_exiting 3752 // flag, then this thread will be caught in the SuspendThread() 3753 // infinite loop below which closes that race. A small timing 3754 // window remains before the process_exiting flag is set, but it 3755 // is only exposed when we are out of handles. 3756 } else { 3757 ++handle_count; 3758 registered = true; 3759 3760 // The current exiting thread has stored its handle in the array, and now 3761 // should leave the critical section before calling _endthreadex(). 3762 } 3763 3764 } else if (what != EPT_THREAD && handle_count > 0) { 3765 jlong start_time, finish_time, timeout_left; 3766 // Before ending the process, make sure all the threads that had called 3767 // _endthreadex() completed. 3768 3769 // Set the priority level of the current thread to the same value as 3770 // the priority level of exiting threads. 3771 // This is to ensure it will be given a fair chance to execute if 3772 // the timeout expires. 3773 hthr = GetCurrentThread(); 3774 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3775 start_time = os::javaTimeNanos(); 3776 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3777 for (i = 0; ; ) { 3778 int portion_count = handle_count - i; 3779 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3780 portion_count = MAXIMUM_WAIT_OBJECTS; 3781 } 3782 for (j = 0; j < portion_count; ++j) { 3783 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3784 } 3785 timeout_left = (finish_time - start_time) / 1000000L; 3786 if (timeout_left < 0) { 3787 timeout_left = 0; 3788 } 3789 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3790 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3791 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3792 (res == WAIT_FAILED ? "failed" : "timed out"), 3793 GetLastError(), __FILE__, __LINE__); 3794 // Reset portion_count so we close the remaining 3795 // handles due to this error. 3796 portion_count = handle_count - i; 3797 } 3798 for (j = 0; j < portion_count; ++j) { 3799 CloseHandle(handles[i + j]); 3800 } 3801 if ((i += portion_count) >= handle_count) { 3802 break; 3803 } 3804 start_time = os::javaTimeNanos(); 3805 } 3806 handle_count = 0; 3807 } 3808 3809 LeaveCriticalSection(&crit_sect); 3810 } 3811 3812 if (!registered && 3813 OrderAccess::load_acquire(&process_exiting) != 0 && 3814 process_exiting != (jint)GetCurrentThreadId()) { 3815 // Some other thread is about to call exit(), so we don't let 3816 // the current unregistered thread proceed to exit() or _endthreadex() 3817 while (true) { 3818 SuspendThread(GetCurrentThread()); 3819 // Avoid busy-wait loop, if SuspendThread() failed. 3820 Sleep(EXIT_TIMEOUT); 3821 } 3822 } 3823 } 3824 3825 // We are here if either 3826 // - there's no 'race at exit' bug on this OS release; 3827 // - initialization of the critical section failed (unlikely); 3828 // - the current thread has registered itself and left the critical section; 3829 // - the process-exiting thread has raised the flag and left the critical section. 3830 if (what == EPT_THREAD) { 3831 _endthreadex((unsigned)exit_code); 3832 } else if (what == EPT_PROCESS) { 3833 ::exit(exit_code); 3834 } else { 3835 _exit(exit_code); 3836 } 3837 3838 // Should not reach here 3839 return exit_code; 3840 } 3841 3842 #undef EXIT_TIMEOUT 3843 3844 void os::win32::setmode_streams() { 3845 _setmode(_fileno(stdin), _O_BINARY); 3846 _setmode(_fileno(stdout), _O_BINARY); 3847 _setmode(_fileno(stderr), _O_BINARY); 3848 } 3849 3850 3851 bool os::is_debugger_attached() { 3852 return IsDebuggerPresent() ? true : false; 3853 } 3854 3855 3856 void os::wait_for_keypress_at_exit(void) { 3857 if (PauseAtExit) { 3858 fprintf(stderr, "Press any key to continue...\n"); 3859 fgetc(stdin); 3860 } 3861 } 3862 3863 3864 bool os::message_box(const char* title, const char* message) { 3865 int result = MessageBox(NULL, message, title, 3866 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3867 return result == IDYES; 3868 } 3869 3870 #ifndef PRODUCT 3871 #ifndef _WIN64 3872 // Helpers to check whether NX protection is enabled 3873 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3874 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3875 pex->ExceptionRecord->NumberParameters > 0 && 3876 pex->ExceptionRecord->ExceptionInformation[0] == 3877 EXCEPTION_INFO_EXEC_VIOLATION) { 3878 return EXCEPTION_EXECUTE_HANDLER; 3879 } 3880 return EXCEPTION_CONTINUE_SEARCH; 3881 } 3882 3883 void nx_check_protection() { 3884 // If NX is enabled we'll get an exception calling into code on the stack 3885 char code[] = { (char)0xC3 }; // ret 3886 void *code_ptr = (void *)code; 3887 __try { 3888 __asm call code_ptr 3889 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3890 tty->print_raw_cr("NX protection detected."); 3891 } 3892 } 3893 #endif // _WIN64 3894 #endif // PRODUCT 3895 3896 // This is called _before_ the global arguments have been parsed 3897 void os::init(void) { 3898 _initial_pid = _getpid(); 3899 3900 init_random(1234567); 3901 3902 win32::initialize_system_info(); 3903 win32::setmode_streams(); 3904 init_page_sizes((size_t) win32::vm_page_size()); 3905 3906 // This may be overridden later when argument processing is done. 3907 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3908 3909 // Initialize main_process and main_thread 3910 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3911 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3912 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3913 fatal("DuplicateHandle failed\n"); 3914 } 3915 main_thread_id = (int) GetCurrentThreadId(); 3916 3917 // initialize fast thread access - only used for 32-bit 3918 win32::initialize_thread_ptr_offset(); 3919 } 3920 3921 // To install functions for atexit processing 3922 extern "C" { 3923 static void perfMemory_exit_helper() { 3924 perfMemory_exit(); 3925 } 3926 } 3927 3928 static jint initSock(); 3929 3930 // this is called _after_ the global arguments have been parsed 3931 jint os::init_2(void) { 3932 // Allocate a single page and mark it as readable for safepoint polling 3933 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3934 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3935 3936 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3937 guarantee(return_page != NULL, "Commit Failed for polling page"); 3938 3939 os::set_polling_page(polling_page); 3940 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page)); 3941 3942 if (!UseMembar) { 3943 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3944 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3945 3946 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3947 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3948 3949 os::set_memory_serialize_page(mem_serialize_page); 3950 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); 3951 } 3952 3953 // Setup Windows Exceptions 3954 3955 // for debugging float code generation bugs 3956 if (ForceFloatExceptions) { 3957 #ifndef _WIN64 3958 static long fp_control_word = 0; 3959 __asm { fstcw fp_control_word } 3960 // see Intel PPro Manual, Vol. 2, p 7-16 3961 const long precision = 0x20; 3962 const long underflow = 0x10; 3963 const long overflow = 0x08; 3964 const long zero_div = 0x04; 3965 const long denorm = 0x02; 3966 const long invalid = 0x01; 3967 fp_control_word |= invalid; 3968 __asm { fldcw fp_control_word } 3969 #endif 3970 } 3971 3972 // If stack_commit_size is 0, windows will reserve the default size, 3973 // but only commit a small portion of it. 3974 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 3975 size_t default_reserve_size = os::win32::default_stack_size(); 3976 size_t actual_reserve_size = stack_commit_size; 3977 if (stack_commit_size < default_reserve_size) { 3978 // If stack_commit_size == 0, we want this too 3979 actual_reserve_size = default_reserve_size; 3980 } 3981 3982 // Check minimum allowable stack size for thread creation and to initialize 3983 // the java system classes, including StackOverflowError - depends on page 3984 // size. Add two 4K pages for compiler2 recursion in main thread. 3985 // Add in 4*BytesPerWord 4K pages to account for VM stack during 3986 // class initialization depending on 32 or 64 bit VM. 3987 size_t min_stack_allowed = 3988 (size_t)(JavaThread::stack_guard_zone_size() + 3989 JavaThread::stack_shadow_zone_size() + 3990 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 3991 3992 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 3993 3994 if (actual_reserve_size < min_stack_allowed) { 3995 tty->print_cr("\nThe Java thread stack size specified is too small. " 3996 "Specify at least %dk", 3997 min_stack_allowed / K); 3998 return JNI_ERR; 3999 } 4000 4001 JavaThread::set_stack_size_at_create(stack_commit_size); 4002 4003 // Calculate theoretical max. size of Threads to guard gainst artifical 4004 // out-of-memory situations, where all available address-space has been 4005 // reserved by thread stacks. 4006 assert(actual_reserve_size != 0, "Must have a stack"); 4007 4008 // Calculate the thread limit when we should start doing Virtual Memory 4009 // banging. Currently when the threads will have used all but 200Mb of space. 4010 // 4011 // TODO: consider performing a similar calculation for commit size instead 4012 // as reserve size, since on a 64-bit platform we'll run into that more 4013 // often than running out of virtual memory space. We can use the 4014 // lower value of the two calculations as the os_thread_limit. 4015 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4016 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4017 4018 // at exit methods are called in the reverse order of their registration. 4019 // there is no limit to the number of functions registered. atexit does 4020 // not set errno. 4021 4022 if (PerfAllowAtExitRegistration) { 4023 // only register atexit functions if PerfAllowAtExitRegistration is set. 4024 // atexit functions can be delayed until process exit time, which 4025 // can be problematic for embedded VM situations. Embedded VMs should 4026 // call DestroyJavaVM() to assure that VM resources are released. 4027 4028 // note: perfMemory_exit_helper atexit function may be removed in 4029 // the future if the appropriate cleanup code can be added to the 4030 // VM_Exit VMOperation's doit method. 4031 if (atexit(perfMemory_exit_helper) != 0) { 4032 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4033 } 4034 } 4035 4036 #ifndef _WIN64 4037 // Print something if NX is enabled (win32 on AMD64) 4038 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4039 #endif 4040 4041 // initialize thread priority policy 4042 prio_init(); 4043 4044 if (UseNUMA && !ForceNUMA) { 4045 UseNUMA = false; // We don't fully support this yet 4046 } 4047 4048 if (UseNUMAInterleaving) { 4049 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4050 bool success = numa_interleaving_init(); 4051 if (!success) UseNUMAInterleaving = false; 4052 } 4053 4054 if (initSock() != JNI_OK) { 4055 return JNI_ERR; 4056 } 4057 4058 if (InitializeDbgHelpEarly) { 4059 SymbolEngine::recalc_search_path(); 4060 } 4061 4062 return JNI_OK; 4063 } 4064 4065 // Mark the polling page as unreadable 4066 void os::make_polling_page_unreadable(void) { 4067 DWORD old_status; 4068 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4069 PAGE_NOACCESS, &old_status)) { 4070 fatal("Could not disable polling page"); 4071 } 4072 } 4073 4074 // Mark the polling page as readable 4075 void os::make_polling_page_readable(void) { 4076 DWORD old_status; 4077 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4078 PAGE_READONLY, &old_status)) { 4079 fatal("Could not enable polling page"); 4080 } 4081 } 4082 4083 4084 int os::stat(const char *path, struct stat *sbuf) { 4085 char pathbuf[MAX_PATH]; 4086 if (strlen(path) > MAX_PATH - 1) { 4087 errno = ENAMETOOLONG; 4088 return -1; 4089 } 4090 os::native_path(strcpy(pathbuf, path)); 4091 int ret = ::stat(pathbuf, sbuf); 4092 if (sbuf != NULL && UseUTCFileTimestamp) { 4093 // Fix for 6539723. st_mtime returned from stat() is dependent on 4094 // the system timezone and so can return different values for the 4095 // same file if/when daylight savings time changes. This adjustment 4096 // makes sure the same timestamp is returned regardless of the TZ. 4097 // 4098 // See: 4099 // http://msdn.microsoft.com/library/ 4100 // default.asp?url=/library/en-us/sysinfo/base/ 4101 // time_zone_information_str.asp 4102 // and 4103 // http://msdn.microsoft.com/library/default.asp?url= 4104 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4105 // 4106 // NOTE: there is a insidious bug here: If the timezone is changed 4107 // after the call to stat() but before 'GetTimeZoneInformation()', then 4108 // the adjustment we do here will be wrong and we'll return the wrong 4109 // value (which will likely end up creating an invalid class data 4110 // archive). Absent a better API for this, or some time zone locking 4111 // mechanism, we'll have to live with this risk. 4112 TIME_ZONE_INFORMATION tz; 4113 DWORD tzid = GetTimeZoneInformation(&tz); 4114 int daylightBias = 4115 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4116 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4117 } 4118 return ret; 4119 } 4120 4121 4122 #define FT2INT64(ft) \ 4123 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4124 4125 4126 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4127 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4128 // of a thread. 4129 // 4130 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4131 // the fast estimate available on the platform. 4132 4133 // current_thread_cpu_time() is not optimized for Windows yet 4134 jlong os::current_thread_cpu_time() { 4135 // return user + sys since the cost is the same 4136 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4137 } 4138 4139 jlong os::thread_cpu_time(Thread* thread) { 4140 // consistent with what current_thread_cpu_time() returns. 4141 return os::thread_cpu_time(thread, true /* user+sys */); 4142 } 4143 4144 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4145 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4146 } 4147 4148 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4149 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4150 // If this function changes, os::is_thread_cpu_time_supported() should too 4151 FILETIME CreationTime; 4152 FILETIME ExitTime; 4153 FILETIME KernelTime; 4154 FILETIME UserTime; 4155 4156 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4157 &ExitTime, &KernelTime, &UserTime) == 0) { 4158 return -1; 4159 } else if (user_sys_cpu_time) { 4160 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4161 } else { 4162 return FT2INT64(UserTime) * 100; 4163 } 4164 } 4165 4166 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4167 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4168 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4169 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4170 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4171 } 4172 4173 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4174 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4175 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4176 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4177 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4178 } 4179 4180 bool os::is_thread_cpu_time_supported() { 4181 // see os::thread_cpu_time 4182 FILETIME CreationTime; 4183 FILETIME ExitTime; 4184 FILETIME KernelTime; 4185 FILETIME UserTime; 4186 4187 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4188 &KernelTime, &UserTime) == 0) { 4189 return false; 4190 } else { 4191 return true; 4192 } 4193 } 4194 4195 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4196 // It does have primitives (PDH API) to get CPU usage and run queue length. 4197 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4198 // If we wanted to implement loadavg on Windows, we have a few options: 4199 // 4200 // a) Query CPU usage and run queue length and "fake" an answer by 4201 // returning the CPU usage if it's under 100%, and the run queue 4202 // length otherwise. It turns out that querying is pretty slow 4203 // on Windows, on the order of 200 microseconds on a fast machine. 4204 // Note that on the Windows the CPU usage value is the % usage 4205 // since the last time the API was called (and the first call 4206 // returns 100%), so we'd have to deal with that as well. 4207 // 4208 // b) Sample the "fake" answer using a sampling thread and store 4209 // the answer in a global variable. The call to loadavg would 4210 // just return the value of the global, avoiding the slow query. 4211 // 4212 // c) Sample a better answer using exponential decay to smooth the 4213 // value. This is basically the algorithm used by UNIX kernels. 4214 // 4215 // Note that sampling thread starvation could affect both (b) and (c). 4216 int os::loadavg(double loadavg[], int nelem) { 4217 return -1; 4218 } 4219 4220 4221 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4222 bool os::dont_yield() { 4223 return DontYieldALot; 4224 } 4225 4226 // This method is a slightly reworked copy of JDK's sysOpen 4227 // from src/windows/hpi/src/sys_api_md.c 4228 4229 int os::open(const char *path, int oflag, int mode) { 4230 char pathbuf[MAX_PATH]; 4231 4232 if (strlen(path) > MAX_PATH - 1) { 4233 errno = ENAMETOOLONG; 4234 return -1; 4235 } 4236 os::native_path(strcpy(pathbuf, path)); 4237 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4238 } 4239 4240 FILE* os::open(int fd, const char* mode) { 4241 return ::_fdopen(fd, mode); 4242 } 4243 4244 // Is a (classpath) directory empty? 4245 bool os::dir_is_empty(const char* path) { 4246 WIN32_FIND_DATA fd; 4247 HANDLE f = FindFirstFile(path, &fd); 4248 if (f == INVALID_HANDLE_VALUE) { 4249 return true; 4250 } 4251 FindClose(f); 4252 return false; 4253 } 4254 4255 // create binary file, rewriting existing file if required 4256 int os::create_binary_file(const char* path, bool rewrite_existing) { 4257 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4258 if (!rewrite_existing) { 4259 oflags |= _O_EXCL; 4260 } 4261 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4262 } 4263 4264 // return current position of file pointer 4265 jlong os::current_file_offset(int fd) { 4266 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4267 } 4268 4269 // move file pointer to the specified offset 4270 jlong os::seek_to_file_offset(int fd, jlong offset) { 4271 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4272 } 4273 4274 4275 jlong os::lseek(int fd, jlong offset, int whence) { 4276 return (jlong) ::_lseeki64(fd, offset, whence); 4277 } 4278 4279 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4280 OVERLAPPED ov; 4281 DWORD nread; 4282 BOOL result; 4283 4284 ZeroMemory(&ov, sizeof(ov)); 4285 ov.Offset = (DWORD)offset; 4286 ov.OffsetHigh = (DWORD)(offset >> 32); 4287 4288 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4289 4290 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4291 4292 return result ? nread : 0; 4293 } 4294 4295 4296 // This method is a slightly reworked copy of JDK's sysNativePath 4297 // from src/windows/hpi/src/path_md.c 4298 4299 // Convert a pathname to native format. On win32, this involves forcing all 4300 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4301 // sometimes rejects '/') and removing redundant separators. The input path is 4302 // assumed to have been converted into the character encoding used by the local 4303 // system. Because this might be a double-byte encoding, care is taken to 4304 // treat double-byte lead characters correctly. 4305 // 4306 // This procedure modifies the given path in place, as the result is never 4307 // longer than the original. There is no error return; this operation always 4308 // succeeds. 4309 char * os::native_path(char *path) { 4310 char *src = path, *dst = path, *end = path; 4311 char *colon = NULL; // If a drive specifier is found, this will 4312 // point to the colon following the drive letter 4313 4314 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4315 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4316 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4317 4318 // Check for leading separators 4319 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4320 while (isfilesep(*src)) { 4321 src++; 4322 } 4323 4324 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4325 // Remove leading separators if followed by drive specifier. This 4326 // hack is necessary to support file URLs containing drive 4327 // specifiers (e.g., "file://c:/path"). As a side effect, 4328 // "/c:/path" can be used as an alternative to "c:/path". 4329 *dst++ = *src++; 4330 colon = dst; 4331 *dst++ = ':'; 4332 src++; 4333 } else { 4334 src = path; 4335 if (isfilesep(src[0]) && isfilesep(src[1])) { 4336 // UNC pathname: Retain first separator; leave src pointed at 4337 // second separator so that further separators will be collapsed 4338 // into the second separator. The result will be a pathname 4339 // beginning with "\\\\" followed (most likely) by a host name. 4340 src = dst = path + 1; 4341 path[0] = '\\'; // Force first separator to '\\' 4342 } 4343 } 4344 4345 end = dst; 4346 4347 // Remove redundant separators from remainder of path, forcing all 4348 // separators to be '\\' rather than '/'. Also, single byte space 4349 // characters are removed from the end of the path because those 4350 // are not legal ending characters on this operating system. 4351 // 4352 while (*src != '\0') { 4353 if (isfilesep(*src)) { 4354 *dst++ = '\\'; src++; 4355 while (isfilesep(*src)) src++; 4356 if (*src == '\0') { 4357 // Check for trailing separator 4358 end = dst; 4359 if (colon == dst - 2) break; // "z:\\" 4360 if (dst == path + 1) break; // "\\" 4361 if (dst == path + 2 && isfilesep(path[0])) { 4362 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4363 // beginning of a UNC pathname. Even though it is not, by 4364 // itself, a valid UNC pathname, we leave it as is in order 4365 // to be consistent with the path canonicalizer as well 4366 // as the win32 APIs, which treat this case as an invalid 4367 // UNC pathname rather than as an alias for the root 4368 // directory of the current drive. 4369 break; 4370 } 4371 end = --dst; // Path does not denote a root directory, so 4372 // remove trailing separator 4373 break; 4374 } 4375 end = dst; 4376 } else { 4377 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4378 *dst++ = *src++; 4379 if (*src) *dst++ = *src++; 4380 end = dst; 4381 } else { // Copy a single-byte character 4382 char c = *src++; 4383 *dst++ = c; 4384 // Space is not a legal ending character 4385 if (c != ' ') end = dst; 4386 } 4387 } 4388 } 4389 4390 *end = '\0'; 4391 4392 // For "z:", add "." to work around a bug in the C runtime library 4393 if (colon == dst - 1) { 4394 path[2] = '.'; 4395 path[3] = '\0'; 4396 } 4397 4398 return path; 4399 } 4400 4401 // This code is a copy of JDK's sysSetLength 4402 // from src/windows/hpi/src/sys_api_md.c 4403 4404 int os::ftruncate(int fd, jlong length) { 4405 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4406 long high = (long)(length >> 32); 4407 DWORD ret; 4408 4409 if (h == (HANDLE)(-1)) { 4410 return -1; 4411 } 4412 4413 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4414 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4415 return -1; 4416 } 4417 4418 if (::SetEndOfFile(h) == FALSE) { 4419 return -1; 4420 } 4421 4422 return 0; 4423 } 4424 4425 int os::get_fileno(FILE* fp) { 4426 return _fileno(fp); 4427 } 4428 4429 // This code is a copy of JDK's sysSync 4430 // from src/windows/hpi/src/sys_api_md.c 4431 // except for the legacy workaround for a bug in Win 98 4432 4433 int os::fsync(int fd) { 4434 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4435 4436 if ((!::FlushFileBuffers(handle)) && 4437 (GetLastError() != ERROR_ACCESS_DENIED)) { 4438 // from winerror.h 4439 return -1; 4440 } 4441 return 0; 4442 } 4443 4444 static int nonSeekAvailable(int, long *); 4445 static int stdinAvailable(int, long *); 4446 4447 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4448 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4449 4450 // This code is a copy of JDK's sysAvailable 4451 // from src/windows/hpi/src/sys_api_md.c 4452 4453 int os::available(int fd, jlong *bytes) { 4454 jlong cur, end; 4455 struct _stati64 stbuf64; 4456 4457 if (::_fstati64(fd, &stbuf64) >= 0) { 4458 int mode = stbuf64.st_mode; 4459 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4460 int ret; 4461 long lpbytes; 4462 if (fd == 0) { 4463 ret = stdinAvailable(fd, &lpbytes); 4464 } else { 4465 ret = nonSeekAvailable(fd, &lpbytes); 4466 } 4467 (*bytes) = (jlong)(lpbytes); 4468 return ret; 4469 } 4470 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4471 return FALSE; 4472 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4473 return FALSE; 4474 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4475 return FALSE; 4476 } 4477 *bytes = end - cur; 4478 return TRUE; 4479 } else { 4480 return FALSE; 4481 } 4482 } 4483 4484 void os::flockfile(FILE* fp) { 4485 _lock_file(fp); 4486 } 4487 4488 void os::funlockfile(FILE* fp) { 4489 _unlock_file(fp); 4490 } 4491 4492 // This code is a copy of JDK's nonSeekAvailable 4493 // from src/windows/hpi/src/sys_api_md.c 4494 4495 static int nonSeekAvailable(int fd, long *pbytes) { 4496 // This is used for available on non-seekable devices 4497 // (like both named and anonymous pipes, such as pipes 4498 // connected to an exec'd process). 4499 // Standard Input is a special case. 4500 HANDLE han; 4501 4502 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4503 return FALSE; 4504 } 4505 4506 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4507 // PeekNamedPipe fails when at EOF. In that case we 4508 // simply make *pbytes = 0 which is consistent with the 4509 // behavior we get on Solaris when an fd is at EOF. 4510 // The only alternative is to raise an Exception, 4511 // which isn't really warranted. 4512 // 4513 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4514 return FALSE; 4515 } 4516 *pbytes = 0; 4517 } 4518 return TRUE; 4519 } 4520 4521 #define MAX_INPUT_EVENTS 2000 4522 4523 // This code is a copy of JDK's stdinAvailable 4524 // from src/windows/hpi/src/sys_api_md.c 4525 4526 static int stdinAvailable(int fd, long *pbytes) { 4527 HANDLE han; 4528 DWORD numEventsRead = 0; // Number of events read from buffer 4529 DWORD numEvents = 0; // Number of events in buffer 4530 DWORD i = 0; // Loop index 4531 DWORD curLength = 0; // Position marker 4532 DWORD actualLength = 0; // Number of bytes readable 4533 BOOL error = FALSE; // Error holder 4534 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4535 4536 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4537 return FALSE; 4538 } 4539 4540 // Construct an array of input records in the console buffer 4541 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4542 if (error == 0) { 4543 return nonSeekAvailable(fd, pbytes); 4544 } 4545 4546 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4547 if (numEvents > MAX_INPUT_EVENTS) { 4548 numEvents = MAX_INPUT_EVENTS; 4549 } 4550 4551 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4552 if (lpBuffer == NULL) { 4553 return FALSE; 4554 } 4555 4556 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4557 if (error == 0) { 4558 os::free(lpBuffer); 4559 return FALSE; 4560 } 4561 4562 // Examine input records for the number of bytes available 4563 for (i=0; i<numEvents; i++) { 4564 if (lpBuffer[i].EventType == KEY_EVENT) { 4565 4566 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4567 &(lpBuffer[i].Event); 4568 if (keyRecord->bKeyDown == TRUE) { 4569 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4570 curLength++; 4571 if (*keyPressed == '\r') { 4572 actualLength = curLength; 4573 } 4574 } 4575 } 4576 } 4577 4578 if (lpBuffer != NULL) { 4579 os::free(lpBuffer); 4580 } 4581 4582 *pbytes = (long) actualLength; 4583 return TRUE; 4584 } 4585 4586 // Map a block of memory. 4587 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4588 char *addr, size_t bytes, bool read_only, 4589 bool allow_exec) { 4590 HANDLE hFile; 4591 char* base; 4592 4593 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4594 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4595 if (hFile == NULL) { 4596 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4597 return NULL; 4598 } 4599 4600 if (allow_exec) { 4601 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4602 // unless it comes from a PE image (which the shared archive is not.) 4603 // Even VirtualProtect refuses to give execute access to mapped memory 4604 // that was not previously executable. 4605 // 4606 // Instead, stick the executable region in anonymous memory. Yuck. 4607 // Penalty is that ~4 pages will not be shareable - in the future 4608 // we might consider DLLizing the shared archive with a proper PE 4609 // header so that mapping executable + sharing is possible. 4610 4611 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4612 PAGE_READWRITE); 4613 if (base == NULL) { 4614 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4615 CloseHandle(hFile); 4616 return NULL; 4617 } 4618 4619 DWORD bytes_read; 4620 OVERLAPPED overlapped; 4621 overlapped.Offset = (DWORD)file_offset; 4622 overlapped.OffsetHigh = 0; 4623 overlapped.hEvent = NULL; 4624 // ReadFile guarantees that if the return value is true, the requested 4625 // number of bytes were read before returning. 4626 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4627 if (!res) { 4628 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4629 release_memory(base, bytes); 4630 CloseHandle(hFile); 4631 return NULL; 4632 } 4633 } else { 4634 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4635 NULL /* file_name */); 4636 if (hMap == NULL) { 4637 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4638 CloseHandle(hFile); 4639 return NULL; 4640 } 4641 4642 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4643 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4644 (DWORD)bytes, addr); 4645 if (base == NULL) { 4646 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4647 CloseHandle(hMap); 4648 CloseHandle(hFile); 4649 return NULL; 4650 } 4651 4652 if (CloseHandle(hMap) == 0) { 4653 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4654 CloseHandle(hFile); 4655 return base; 4656 } 4657 } 4658 4659 if (allow_exec) { 4660 DWORD old_protect; 4661 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4662 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4663 4664 if (!res) { 4665 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4666 // Don't consider this a hard error, on IA32 even if the 4667 // VirtualProtect fails, we should still be able to execute 4668 CloseHandle(hFile); 4669 return base; 4670 } 4671 } 4672 4673 if (CloseHandle(hFile) == 0) { 4674 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4675 return base; 4676 } 4677 4678 return base; 4679 } 4680 4681 4682 // Remap a block of memory. 4683 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4684 char *addr, size_t bytes, bool read_only, 4685 bool allow_exec) { 4686 // This OS does not allow existing memory maps to be remapped so we 4687 // have to unmap the memory before we remap it. 4688 if (!os::unmap_memory(addr, bytes)) { 4689 return NULL; 4690 } 4691 4692 // There is a very small theoretical window between the unmap_memory() 4693 // call above and the map_memory() call below where a thread in native 4694 // code may be able to access an address that is no longer mapped. 4695 4696 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4697 read_only, allow_exec); 4698 } 4699 4700 4701 // Unmap a block of memory. 4702 // Returns true=success, otherwise false. 4703 4704 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4705 MEMORY_BASIC_INFORMATION mem_info; 4706 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4707 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4708 return false; 4709 } 4710 4711 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4712 // Instead, executable region was allocated using VirtualAlloc(). See 4713 // pd_map_memory() above. 4714 // 4715 // The following flags should match the 'exec_access' flages used for 4716 // VirtualProtect() in pd_map_memory(). 4717 if (mem_info.Protect == PAGE_EXECUTE_READ || 4718 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4719 return pd_release_memory(addr, bytes); 4720 } 4721 4722 BOOL result = UnmapViewOfFile(addr); 4723 if (result == 0) { 4724 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4725 return false; 4726 } 4727 return true; 4728 } 4729 4730 void os::pause() { 4731 char filename[MAX_PATH]; 4732 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4733 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4734 } else { 4735 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4736 } 4737 4738 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4739 if (fd != -1) { 4740 struct stat buf; 4741 ::close(fd); 4742 while (::stat(filename, &buf) == 0) { 4743 Sleep(100); 4744 } 4745 } else { 4746 jio_fprintf(stderr, 4747 "Could not open pause file '%s', continuing immediately.\n", filename); 4748 } 4749 } 4750 4751 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4752 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4753 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4754 4755 os::ThreadCrashProtection::ThreadCrashProtection() { 4756 } 4757 4758 // See the caveats for this class in os_windows.hpp 4759 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4760 // into this method and returns false. If no OS EXCEPTION was raised, returns 4761 // true. 4762 // The callback is supposed to provide the method that should be protected. 4763 // 4764 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4765 4766 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4767 4768 _protected_thread = Thread::current_or_null(); 4769 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4770 4771 bool success = true; 4772 __try { 4773 _crash_protection = this; 4774 cb.call(); 4775 } __except(EXCEPTION_EXECUTE_HANDLER) { 4776 // only for protection, nothing to do 4777 success = false; 4778 } 4779 _crash_protection = NULL; 4780 _protected_thread = NULL; 4781 Thread::muxRelease(&_crash_mux); 4782 return success; 4783 } 4784 4785 // An Event wraps a win32 "CreateEvent" kernel handle. 4786 // 4787 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4788 // 4789 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4790 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4791 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4792 // In addition, an unpark() operation might fetch the handle field, but the 4793 // event could recycle between the fetch and the SetEvent() operation. 4794 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4795 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4796 // on an stale but recycled handle would be harmless, but in practice this might 4797 // confuse other non-Sun code, so it's not a viable approach. 4798 // 4799 // 2: Once a win32 event handle is associated with an Event, it remains associated 4800 // with the Event. The event handle is never closed. This could be construed 4801 // as handle leakage, but only up to the maximum # of threads that have been extant 4802 // at any one time. This shouldn't be an issue, as windows platforms typically 4803 // permit a process to have hundreds of thousands of open handles. 4804 // 4805 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4806 // and release unused handles. 4807 // 4808 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4809 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4810 // 4811 // 5. Use an RCU-like mechanism (Read-Copy Update). 4812 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4813 // 4814 // We use (2). 4815 // 4816 // TODO-FIXME: 4817 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4818 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4819 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4820 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4821 // into a single win32 CreateEvent() handle. 4822 // 4823 // Assumption: 4824 // Only one parker can exist on an event, which is why we allocate 4825 // them per-thread. Multiple unparkers can coexist. 4826 // 4827 // _Event transitions in park() 4828 // -1 => -1 : illegal 4829 // 1 => 0 : pass - return immediately 4830 // 0 => -1 : block; then set _Event to 0 before returning 4831 // 4832 // _Event transitions in unpark() 4833 // 0 => 1 : just return 4834 // 1 => 1 : just return 4835 // -1 => either 0 or 1; must signal target thread 4836 // That is, we can safely transition _Event from -1 to either 4837 // 0 or 1. 4838 // 4839 // _Event serves as a restricted-range semaphore. 4840 // -1 : thread is blocked, i.e. there is a waiter 4841 // 0 : neutral: thread is running or ready, 4842 // could have been signaled after a wait started 4843 // 1 : signaled - thread is running or ready 4844 // 4845 // Another possible encoding of _Event would be with 4846 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4847 // 4848 4849 int os::PlatformEvent::park(jlong Millis) { 4850 // Transitions for _Event: 4851 // -1 => -1 : illegal 4852 // 1 => 0 : pass - return immediately 4853 // 0 => -1 : block; then set _Event to 0 before returning 4854 4855 guarantee(_ParkHandle != NULL , "Invariant"); 4856 guarantee(Millis > 0 , "Invariant"); 4857 4858 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4859 // the initial park() operation. 4860 // Consider: use atomic decrement instead of CAS-loop 4861 4862 int v; 4863 for (;;) { 4864 v = _Event; 4865 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4866 } 4867 guarantee((v == 0) || (v == 1), "invariant"); 4868 if (v != 0) return OS_OK; 4869 4870 // Do this the hard way by blocking ... 4871 // TODO: consider a brief spin here, gated on the success of recent 4872 // spin attempts by this thread. 4873 // 4874 // We decompose long timeouts into series of shorter timed waits. 4875 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4876 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4877 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4878 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4879 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4880 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4881 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4882 // for the already waited time. This policy does not admit any new outcomes. 4883 // In the future, however, we might want to track the accumulated wait time and 4884 // adjust Millis accordingly if we encounter a spurious wakeup. 4885 4886 const int MAXTIMEOUT = 0x10000000; 4887 DWORD rv = WAIT_TIMEOUT; 4888 while (_Event < 0 && Millis > 0) { 4889 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4890 if (Millis > MAXTIMEOUT) { 4891 prd = MAXTIMEOUT; 4892 } 4893 rv = ::WaitForSingleObject(_ParkHandle, prd); 4894 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4895 if (rv == WAIT_TIMEOUT) { 4896 Millis -= prd; 4897 } 4898 } 4899 v = _Event; 4900 _Event = 0; 4901 // see comment at end of os::PlatformEvent::park() below: 4902 OrderAccess::fence(); 4903 // If we encounter a nearly simultanous timeout expiry and unpark() 4904 // we return OS_OK indicating we awoke via unpark(). 4905 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4906 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4907 } 4908 4909 void os::PlatformEvent::park() { 4910 // Transitions for _Event: 4911 // -1 => -1 : illegal 4912 // 1 => 0 : pass - return immediately 4913 // 0 => -1 : block; then set _Event to 0 before returning 4914 4915 guarantee(_ParkHandle != NULL, "Invariant"); 4916 // Invariant: Only the thread associated with the Event/PlatformEvent 4917 // may call park(). 4918 // Consider: use atomic decrement instead of CAS-loop 4919 int v; 4920 for (;;) { 4921 v = _Event; 4922 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4923 } 4924 guarantee((v == 0) || (v == 1), "invariant"); 4925 if (v != 0) return; 4926 4927 // Do this the hard way by blocking ... 4928 // TODO: consider a brief spin here, gated on the success of recent 4929 // spin attempts by this thread. 4930 while (_Event < 0) { 4931 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4932 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4933 } 4934 4935 // Usually we'll find _Event == 0 at this point, but as 4936 // an optional optimization we clear it, just in case can 4937 // multiple unpark() operations drove _Event up to 1. 4938 _Event = 0; 4939 OrderAccess::fence(); 4940 guarantee(_Event >= 0, "invariant"); 4941 } 4942 4943 void os::PlatformEvent::unpark() { 4944 guarantee(_ParkHandle != NULL, "Invariant"); 4945 4946 // Transitions for _Event: 4947 // 0 => 1 : just return 4948 // 1 => 1 : just return 4949 // -1 => either 0 or 1; must signal target thread 4950 // That is, we can safely transition _Event from -1 to either 4951 // 0 or 1. 4952 // See also: "Semaphores in Plan 9" by Mullender & Cox 4953 // 4954 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4955 // that it will take two back-to-back park() calls for the owning 4956 // thread to block. This has the benefit of forcing a spurious return 4957 // from the first park() call after an unpark() call which will help 4958 // shake out uses of park() and unpark() without condition variables. 4959 4960 if (Atomic::xchg(1, &_Event) >= 0) return; 4961 4962 ::SetEvent(_ParkHandle); 4963 } 4964 4965 4966 // JSR166 4967 // ------------------------------------------------------- 4968 4969 // The Windows implementation of Park is very straightforward: Basic 4970 // operations on Win32 Events turn out to have the right semantics to 4971 // use them directly. We opportunistically resuse the event inherited 4972 // from Monitor. 4973 4974 void Parker::park(bool isAbsolute, jlong time) { 4975 guarantee(_ParkEvent != NULL, "invariant"); 4976 // First, demultiplex/decode time arguments 4977 if (time < 0) { // don't wait 4978 return; 4979 } else if (time == 0 && !isAbsolute) { 4980 time = INFINITE; 4981 } else if (isAbsolute) { 4982 time -= os::javaTimeMillis(); // convert to relative time 4983 if (time <= 0) { // already elapsed 4984 return; 4985 } 4986 } else { // relative 4987 time /= 1000000; // Must coarsen from nanos to millis 4988 if (time == 0) { // Wait for the minimal time unit if zero 4989 time = 1; 4990 } 4991 } 4992 4993 JavaThread* thread = JavaThread::current(); 4994 4995 // Don't wait if interrupted or already triggered 4996 if (Thread::is_interrupted(thread, false) || 4997 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4998 ResetEvent(_ParkEvent); 4999 return; 5000 } else { 5001 ThreadBlockInVM tbivm(thread); 5002 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5003 thread->set_suspend_equivalent(); 5004 5005 WaitForSingleObject(_ParkEvent, time); 5006 ResetEvent(_ParkEvent); 5007 5008 // If externally suspended while waiting, re-suspend 5009 if (thread->handle_special_suspend_equivalent_condition()) { 5010 thread->java_suspend_self(); 5011 } 5012 } 5013 } 5014 5015 void Parker::unpark() { 5016 guarantee(_ParkEvent != NULL, "invariant"); 5017 SetEvent(_ParkEvent); 5018 } 5019 5020 // Run the specified command in a separate process. Return its exit value, 5021 // or -1 on failure (e.g. can't create a new process). 5022 int os::fork_and_exec(char* cmd) { 5023 STARTUPINFO si; 5024 PROCESS_INFORMATION pi; 5025 DWORD exit_code; 5026 5027 char * cmd_string; 5028 char * cmd_prefix = "cmd /C "; 5029 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5030 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5031 if (cmd_string == NULL) { 5032 return -1; 5033 } 5034 cmd_string[0] = '\0'; 5035 strcat(cmd_string, cmd_prefix); 5036 strcat(cmd_string, cmd); 5037 5038 // now replace all '\n' with '&' 5039 char * substring = cmd_string; 5040 while ((substring = strchr(substring, '\n')) != NULL) { 5041 substring[0] = '&'; 5042 substring++; 5043 } 5044 memset(&si, 0, sizeof(si)); 5045 si.cb = sizeof(si); 5046 memset(&pi, 0, sizeof(pi)); 5047 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5048 cmd_string, // command line 5049 NULL, // process security attribute 5050 NULL, // thread security attribute 5051 TRUE, // inherits system handles 5052 0, // no creation flags 5053 NULL, // use parent's environment block 5054 NULL, // use parent's starting directory 5055 &si, // (in) startup information 5056 &pi); // (out) process information 5057 5058 if (rslt) { 5059 // Wait until child process exits. 5060 WaitForSingleObject(pi.hProcess, INFINITE); 5061 5062 GetExitCodeProcess(pi.hProcess, &exit_code); 5063 5064 // Close process and thread handles. 5065 CloseHandle(pi.hProcess); 5066 CloseHandle(pi.hThread); 5067 } else { 5068 exit_code = -1; 5069 } 5070 5071 FREE_C_HEAP_ARRAY(char, cmd_string); 5072 return (int)exit_code; 5073 } 5074 5075 bool os::find(address addr, outputStream* st) { 5076 int offset = -1; 5077 bool result = false; 5078 char buf[256]; 5079 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5080 st->print(PTR_FORMAT " ", addr); 5081 if (strlen(buf) < sizeof(buf) - 1) { 5082 char* p = strrchr(buf, '\\'); 5083 if (p) { 5084 st->print("%s", p + 1); 5085 } else { 5086 st->print("%s", buf); 5087 } 5088 } else { 5089 // The library name is probably truncated. Let's omit the library name. 5090 // See also JDK-8147512. 5091 } 5092 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5093 st->print("::%s + 0x%x", buf, offset); 5094 } 5095 st->cr(); 5096 result = true; 5097 } 5098 return result; 5099 } 5100 5101 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5102 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5103 5104 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5105 JavaThread* thread = JavaThread::current(); 5106 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5107 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5108 5109 if (os::is_memory_serialize_page(thread, addr)) { 5110 return EXCEPTION_CONTINUE_EXECUTION; 5111 } 5112 } 5113 5114 return EXCEPTION_CONTINUE_SEARCH; 5115 } 5116 5117 // We don't build a headless jre for Windows 5118 bool os::is_headless_jre() { return false; } 5119 5120 static jint initSock() { 5121 WSADATA wsadata; 5122 5123 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5124 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5125 ::GetLastError()); 5126 return JNI_ERR; 5127 } 5128 return JNI_OK; 5129 } 5130 5131 struct hostent* os::get_host_by_name(char* name) { 5132 return (struct hostent*)gethostbyname(name); 5133 } 5134 5135 int os::socket_close(int fd) { 5136 return ::closesocket(fd); 5137 } 5138 5139 int os::socket(int domain, int type, int protocol) { 5140 return ::socket(domain, type, protocol); 5141 } 5142 5143 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5144 return ::connect(fd, him, len); 5145 } 5146 5147 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5148 return ::recv(fd, buf, (int)nBytes, flags); 5149 } 5150 5151 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5152 return ::send(fd, buf, (int)nBytes, flags); 5153 } 5154 5155 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5156 return ::send(fd, buf, (int)nBytes, flags); 5157 } 5158 5159 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5160 #if defined(IA32) 5161 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5162 #elif defined (AMD64) 5163 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5164 #endif 5165 5166 // returns true if thread could be suspended, 5167 // false otherwise 5168 static bool do_suspend(HANDLE* h) { 5169 if (h != NULL) { 5170 if (SuspendThread(*h) != ~0) { 5171 return true; 5172 } 5173 } 5174 return false; 5175 } 5176 5177 // resume the thread 5178 // calling resume on an active thread is a no-op 5179 static void do_resume(HANDLE* h) { 5180 if (h != NULL) { 5181 ResumeThread(*h); 5182 } 5183 } 5184 5185 // retrieve a suspend/resume context capable handle 5186 // from the tid. Caller validates handle return value. 5187 void get_thread_handle_for_extended_context(HANDLE* h, 5188 OSThread::thread_id_t tid) { 5189 if (h != NULL) { 5190 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5191 } 5192 } 5193 5194 // Thread sampling implementation 5195 // 5196 void os::SuspendedThreadTask::internal_do_task() { 5197 CONTEXT ctxt; 5198 HANDLE h = NULL; 5199 5200 // get context capable handle for thread 5201 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5202 5203 // sanity 5204 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5205 return; 5206 } 5207 5208 // suspend the thread 5209 if (do_suspend(&h)) { 5210 ctxt.ContextFlags = sampling_context_flags; 5211 // get thread context 5212 GetThreadContext(h, &ctxt); 5213 SuspendedThreadTaskContext context(_thread, &ctxt); 5214 // pass context to Thread Sampling impl 5215 do_task(context); 5216 // resume thread 5217 do_resume(&h); 5218 } 5219 5220 // close handle 5221 CloseHandle(h); 5222 } 5223 5224 bool os::start_debugging(char *buf, int buflen) { 5225 int len = (int)strlen(buf); 5226 char *p = &buf[len]; 5227 5228 jio_snprintf(p, buflen-len, 5229 "\n\n" 5230 "Do you want to debug the problem?\n\n" 5231 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5232 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5233 "Otherwise, select 'No' to abort...", 5234 os::current_process_id(), os::current_thread_id()); 5235 5236 bool yes = os::message_box("Unexpected Error", buf); 5237 5238 if (yes) { 5239 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5240 // exception. If VM is running inside a debugger, the debugger will 5241 // catch the exception. Otherwise, the breakpoint exception will reach 5242 // the default windows exception handler, which can spawn a debugger and 5243 // automatically attach to the dying VM. 5244 os::breakpoint(); 5245 yes = false; 5246 } 5247 return yes; 5248 } 5249 5250 void* os::get_default_process_handle() { 5251 return (void*)GetModuleHandle(NULL); 5252 } 5253 5254 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5255 // which is used to find statically linked in agents. 5256 // Additionally for windows, takes into account __stdcall names. 5257 // Parameters: 5258 // sym_name: Symbol in library we are looking for 5259 // lib_name: Name of library to look in, NULL for shared libs. 5260 // is_absolute_path == true if lib_name is absolute path to agent 5261 // such as "C:/a/b/L.dll" 5262 // == false if only the base name of the library is passed in 5263 // such as "L" 5264 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5265 bool is_absolute_path) { 5266 char *agent_entry_name; 5267 size_t len; 5268 size_t name_len; 5269 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5270 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5271 const char *start; 5272 5273 if (lib_name != NULL) { 5274 len = name_len = strlen(lib_name); 5275 if (is_absolute_path) { 5276 // Need to strip path, prefix and suffix 5277 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5278 lib_name = ++start; 5279 } else { 5280 // Need to check for drive prefix 5281 if ((start = strchr(lib_name, ':')) != NULL) { 5282 lib_name = ++start; 5283 } 5284 } 5285 if (len <= (prefix_len + suffix_len)) { 5286 return NULL; 5287 } 5288 lib_name += prefix_len; 5289 name_len = strlen(lib_name) - suffix_len; 5290 } 5291 } 5292 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5293 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5294 if (agent_entry_name == NULL) { 5295 return NULL; 5296 } 5297 if (lib_name != NULL) { 5298 const char *p = strrchr(sym_name, '@'); 5299 if (p != NULL && p != sym_name) { 5300 // sym_name == _Agent_OnLoad@XX 5301 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5302 agent_entry_name[(p-sym_name)] = '\0'; 5303 // agent_entry_name == _Agent_OnLoad 5304 strcat(agent_entry_name, "_"); 5305 strncat(agent_entry_name, lib_name, name_len); 5306 strcat(agent_entry_name, p); 5307 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5308 } else { 5309 strcpy(agent_entry_name, sym_name); 5310 strcat(agent_entry_name, "_"); 5311 strncat(agent_entry_name, lib_name, name_len); 5312 } 5313 } else { 5314 strcpy(agent_entry_name, sym_name); 5315 } 5316 return agent_entry_name; 5317 } 5318 5319 #ifndef PRODUCT 5320 5321 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5322 // contiguous memory block at a particular address. 5323 // The test first tries to find a good approximate address to allocate at by using the same 5324 // method to allocate some memory at any address. The test then tries to allocate memory in 5325 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5326 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5327 // the previously allocated memory is available for allocation. The only actual failure 5328 // that is reported is when the test tries to allocate at a particular location but gets a 5329 // different valid one. A NULL return value at this point is not considered an error but may 5330 // be legitimate. 5331 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5332 void TestReserveMemorySpecial_test() { 5333 if (!UseLargePages) { 5334 if (VerboseInternalVMTests) { 5335 tty->print("Skipping test because large pages are disabled"); 5336 } 5337 return; 5338 } 5339 // save current value of globals 5340 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5341 bool old_use_numa_interleaving = UseNUMAInterleaving; 5342 5343 // set globals to make sure we hit the correct code path 5344 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5345 5346 // do an allocation at an address selected by the OS to get a good one. 5347 const size_t large_allocation_size = os::large_page_size() * 4; 5348 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5349 if (result == NULL) { 5350 if (VerboseInternalVMTests) { 5351 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5352 large_allocation_size); 5353 } 5354 } else { 5355 os::release_memory_special(result, large_allocation_size); 5356 5357 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5358 // we managed to get it once. 5359 const size_t expected_allocation_size = os::large_page_size(); 5360 char* expected_location = result + os::large_page_size(); 5361 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5362 if (actual_location == NULL) { 5363 if (VerboseInternalVMTests) { 5364 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5365 expected_location, large_allocation_size); 5366 } 5367 } else { 5368 // release memory 5369 os::release_memory_special(actual_location, expected_allocation_size); 5370 // only now check, after releasing any memory to avoid any leaks. 5371 assert(actual_location == expected_location, 5372 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5373 expected_location, expected_allocation_size, actual_location); 5374 } 5375 } 5376 5377 // restore globals 5378 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5379 UseNUMAInterleaving = old_use_numa_interleaving; 5380 } 5381 #endif // PRODUCT 5382 5383 /* 5384 All the defined signal names for Windows. 5385 5386 NOTE that not all of these names are accepted by FindSignal! 5387 5388 For various reasons some of these may be rejected at runtime. 5389 5390 Here are the names currently accepted by a user of sun.misc.Signal with 5391 1.4.1 (ignoring potential interaction with use of chaining, etc): 5392 5393 (LIST TBD) 5394 5395 */ 5396 int os::get_signal_number(const char* name) { 5397 static const struct { 5398 char* name; 5399 int number; 5400 } siglabels [] = 5401 // derived from version 6.0 VC98/include/signal.h 5402 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5403 "FPE", SIGFPE, // floating point exception 5404 "SEGV", SIGSEGV, // segment violation 5405 "INT", SIGINT, // interrupt 5406 "TERM", SIGTERM, // software term signal from kill 5407 "BREAK", SIGBREAK, // Ctrl-Break sequence 5408 "ILL", SIGILL}; // illegal instruction 5409 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5410 if (strcmp(name, siglabels[i].name) == 0) { 5411 return siglabels[i].number; 5412 } 5413 } 5414 return -1; 5415 } 5416 5417 // Fast current thread access 5418 5419 int os::win32::_thread_ptr_offset = 0; 5420 5421 static void call_wrapper_dummy() {} 5422 5423 // We need to call the os_exception_wrapper once so that it sets 5424 // up the offset from FS of the thread pointer. 5425 void os::win32::initialize_thread_ptr_offset() { 5426 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5427 NULL, NULL, NULL, NULL); 5428 } --- EOF ---