1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/extendedPC.hpp" 48 #include "runtime/globals.hpp" 49 #include "runtime/interfaceSupport.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/javaCalls.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/objectMonitor.hpp" 54 #include "runtime/orderAccess.inline.hpp" 55 #include "runtime/osThread.hpp" 56 #include "runtime/perfMemory.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/statSampler.hpp" 59 #include "runtime/stubRoutines.hpp" 60 #include "runtime/thread.inline.hpp" 61 #include "runtime/threadCritical.hpp" 62 #include "runtime/timer.hpp" 63 #include "services/attachListener.hpp" 64 #include "services/memTracker.hpp" 65 #include "services/runtimeService.hpp" 66 #include "utilities/decoder.hpp" 67 #include "utilities/defaultStream.hpp" 68 #include "utilities/events.hpp" 69 #include "utilities/growableArray.hpp" 70 #include "utilities/vmError.hpp" 71 72 #ifdef _DEBUG 73 #include <crtdbg.h> 74 #endif 75 76 77 #include <windows.h> 78 #include <sys/types.h> 79 #include <sys/stat.h> 80 #include <sys/timeb.h> 81 #include <objidl.h> 82 #include <shlobj.h> 83 84 #include <malloc.h> 85 #include <signal.h> 86 #include <direct.h> 87 #include <errno.h> 88 #include <fcntl.h> 89 #include <io.h> 90 #include <process.h> // For _beginthreadex(), _endthreadex() 91 #include <imagehlp.h> // For os::dll_address_to_function_name 92 /* for enumerating dll libraries */ 93 #include <vdmdbg.h> 94 95 // for timer info max values which include all bits 96 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 97 98 // For DLL loading/load error detection 99 // Values of PE COFF 100 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 101 #define IMAGE_FILE_SIGNATURE_LENGTH 4 102 103 static HANDLE main_process; 104 static HANDLE main_thread; 105 static int main_thread_id; 106 107 static FILETIME process_creation_time; 108 static FILETIME process_exit_time; 109 static FILETIME process_user_time; 110 static FILETIME process_kernel_time; 111 112 #ifdef _M_IA64 113 #define __CPU__ ia64 114 #else 115 #ifdef _M_AMD64 116 #define __CPU__ amd64 117 #else 118 #define __CPU__ i486 119 #endif 120 #endif 121 122 // save DLL module handle, used by GetModuleFileName 123 124 HINSTANCE vm_lib_handle; 125 126 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 127 switch (reason) { 128 case DLL_PROCESS_ATTACH: 129 vm_lib_handle = hinst; 130 if(ForceTimeHighResolution) 131 timeBeginPeriod(1L); 132 break; 133 case DLL_PROCESS_DETACH: 134 if(ForceTimeHighResolution) 135 timeEndPeriod(1L); 136 137 break; 138 default: 139 break; 140 } 141 return true; 142 } 143 144 static inline double fileTimeAsDouble(FILETIME* time) { 145 const double high = (double) ((unsigned int) ~0); 146 const double split = 10000000.0; 147 double result = (time->dwLowDateTime / split) + 148 time->dwHighDateTime * (high/split); 149 return result; 150 } 151 152 // Implementation of os 153 154 bool os::getenv(const char* name, char* buffer, int len) { 155 int result = GetEnvironmentVariable(name, buffer, len); 156 return result > 0 && result < len; 157 } 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 #ifndef _WIN64 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 #endif 183 void os::init_system_properties_values() { 184 /* sysclasspath, java_home, dll_dir */ 185 { 186 char *home_path; 187 char *dll_path; 188 char *pslash; 189 char *bin = "\\bin"; 190 char home_dir[MAX_PATH]; 191 192 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 193 os::jvm_path(home_dir, sizeof(home_dir)); 194 // Found the full path to jvm.dll. 195 // Now cut the path to <java_home>/jre if we can. 196 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 197 pslash = strrchr(home_dir, '\\'); 198 if (pslash != NULL) { 199 *pslash = '\0'; /* get rid of \{client|server} */ 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) 202 *pslash = '\0'; /* get rid of \bin */ 203 } 204 } 205 206 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 207 if (home_path == NULL) 208 return; 209 strcpy(home_path, home_dir); 210 Arguments::set_java_home(home_path); 211 212 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 213 if (dll_path == NULL) 214 return; 215 strcpy(dll_path, home_dir); 216 strcat(dll_path, bin); 217 Arguments::set_dll_dir(dll_path); 218 219 if (!set_boot_path('\\', ';')) 220 return; 221 } 222 223 /* library_path */ 224 #define EXT_DIR "\\lib\\ext" 225 #define BIN_DIR "\\bin" 226 #define PACKAGE_DIR "\\Sun\\Java" 227 { 228 /* Win32 library search order (See the documentation for LoadLibrary): 229 * 230 * 1. The directory from which application is loaded. 231 * 2. The system wide Java Extensions directory (Java only) 232 * 3. System directory (GetSystemDirectory) 233 * 4. Windows directory (GetWindowsDirectory) 234 * 5. The PATH environment variable 235 * 6. The current directory 236 */ 237 238 char *library_path; 239 char tmp[MAX_PATH]; 240 char *path_str = ::getenv("PATH"); 241 242 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 243 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 244 245 library_path[0] = '\0'; 246 247 GetModuleFileName(NULL, tmp, sizeof(tmp)); 248 *(strrchr(tmp, '\\')) = '\0'; 249 strcat(library_path, tmp); 250 251 GetWindowsDirectory(tmp, sizeof(tmp)); 252 strcat(library_path, ";"); 253 strcat(library_path, tmp); 254 strcat(library_path, PACKAGE_DIR BIN_DIR); 255 256 GetSystemDirectory(tmp, sizeof(tmp)); 257 strcat(library_path, ";"); 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 264 if (path_str) { 265 strcat(library_path, ";"); 266 strcat(library_path, path_str); 267 } 268 269 strcat(library_path, ";."); 270 271 Arguments::set_library_path(library_path); 272 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 273 } 274 275 /* Default extensions directory */ 276 { 277 char path[MAX_PATH]; 278 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 279 GetWindowsDirectory(path, MAX_PATH); 280 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 281 path, PACKAGE_DIR, EXT_DIR); 282 Arguments::set_ext_dirs(buf); 283 } 284 #undef EXT_DIR 285 #undef BIN_DIR 286 #undef PACKAGE_DIR 287 288 /* Default endorsed standards directory. */ 289 { 290 #define ENDORSED_DIR "\\lib\\endorsed" 291 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 292 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 293 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 294 Arguments::set_endorsed_dirs(buf); 295 #undef ENDORSED_DIR 296 } 297 298 #ifndef _WIN64 299 // set our UnhandledExceptionFilter and save any previous one 300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 301 #endif 302 303 // Done 304 return; 305 } 306 307 void os::breakpoint() { 308 DebugBreak(); 309 } 310 311 // Invoked from the BREAKPOINT Macro 312 extern "C" void breakpoint() { 313 os::breakpoint(); 314 } 315 316 /* 317 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 318 * So far, this method is only used by Native Memory Tracking, which is 319 * only supported on Windows XP or later. 320 */ 321 322 int os::get_native_stack(address* stack, int frames, int toSkip) { 323 #ifdef _NMT_NOINLINE_ 324 toSkip ++; 325 #endif 326 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 327 (PVOID*)stack, NULL); 328 for (int index = captured; index < frames; index ++) { 329 stack[index] = NULL; 330 } 331 return captured; 332 } 333 334 335 // os::current_stack_base() 336 // 337 // Returns the base of the stack, which is the stack's 338 // starting address. This function must be called 339 // while running on the stack of the thread being queried. 340 341 address os::current_stack_base() { 342 MEMORY_BASIC_INFORMATION minfo; 343 address stack_bottom; 344 size_t stack_size; 345 346 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 347 stack_bottom = (address)minfo.AllocationBase; 348 stack_size = minfo.RegionSize; 349 350 // Add up the sizes of all the regions with the same 351 // AllocationBase. 352 while( 1 ) 353 { 354 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 355 if ( stack_bottom == (address)minfo.AllocationBase ) 356 stack_size += minfo.RegionSize; 357 else 358 break; 359 } 360 361 #ifdef _M_IA64 362 // IA64 has memory and register stacks 363 // 364 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 365 // at thread creation (1MB backing store growing upwards, 1MB memory stack 366 // growing downwards, 2MB summed up) 367 // 368 // ... 369 // ------- top of stack (high address) ----- 370 // | 371 // | 1MB 372 // | Backing Store (Register Stack) 373 // | 374 // | / \ 375 // | | 376 // | | 377 // | | 378 // ------------------------ stack base ----- 379 // | 1MB 380 // | Memory Stack 381 // | 382 // | | 383 // | | 384 // | | 385 // | \ / 386 // | 387 // ----- bottom of stack (low address) ----- 388 // ... 389 390 stack_size = stack_size / 2; 391 #endif 392 return stack_bottom + stack_size; 393 } 394 395 size_t os::current_stack_size() { 396 size_t sz; 397 MEMORY_BASIC_INFORMATION minfo; 398 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 399 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 400 return sz; 401 } 402 403 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 404 const struct tm* time_struct_ptr = localtime(clock); 405 if (time_struct_ptr != NULL) { 406 *res = *time_struct_ptr; 407 return res; 408 } 409 return NULL; 410 } 411 412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 413 414 // Thread start routine for all new Java threads 415 static unsigned __stdcall java_start(Thread* thread) { 416 // Try to randomize the cache line index of hot stack frames. 417 // This helps when threads of the same stack traces evict each other's 418 // cache lines. The threads can be either from the same JVM instance, or 419 // from different JVM instances. The benefit is especially true for 420 // processors with hyperthreading technology. 421 static int counter = 0; 422 int pid = os::current_process_id(); 423 _alloca(((pid ^ counter++) & 7) * 128); 424 425 OSThread* osthr = thread->osthread(); 426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 427 428 if (UseNUMA) { 429 int lgrp_id = os::numa_get_group_id(); 430 if (lgrp_id != -1) { 431 thread->set_lgrp_id(lgrp_id); 432 } 433 } 434 435 436 // Install a win32 structured exception handler around every thread created 437 // by VM, so VM can genrate error dump when an exception occurred in non- 438 // Java thread (e.g. VM thread). 439 __try { 440 thread->run(); 441 } __except(topLevelExceptionFilter( 442 (_EXCEPTION_POINTERS*)_exception_info())) { 443 // Nothing to do. 444 } 445 446 // One less thread is executing 447 // When the VMThread gets here, the main thread may have already exited 448 // which frees the CodeHeap containing the Atomic::add code 449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 451 } 452 453 return 0; 454 } 455 456 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 457 // Allocate the OSThread object 458 OSThread* osthread = new OSThread(NULL, NULL); 459 if (osthread == NULL) return NULL; 460 461 // Initialize support for Java interrupts 462 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 463 if (interrupt_event == NULL) { 464 delete osthread; 465 return NULL; 466 } 467 osthread->set_interrupt_event(interrupt_event); 468 469 // Store info on the Win32 thread into the OSThread 470 osthread->set_thread_handle(thread_handle); 471 osthread->set_thread_id(thread_id); 472 473 if (UseNUMA) { 474 int lgrp_id = os::numa_get_group_id(); 475 if (lgrp_id != -1) { 476 thread->set_lgrp_id(lgrp_id); 477 } 478 } 479 480 // Initial thread state is INITIALIZED, not SUSPENDED 481 osthread->set_state(INITIALIZED); 482 483 return osthread; 484 } 485 486 487 bool os::create_attached_thread(JavaThread* thread) { 488 #ifdef ASSERT 489 thread->verify_not_published(); 490 #endif 491 HANDLE thread_h; 492 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 493 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 494 fatal("DuplicateHandle failed\n"); 495 } 496 OSThread* osthread = create_os_thread(thread, thread_h, 497 (int)current_thread_id()); 498 if (osthread == NULL) { 499 return false; 500 } 501 502 // Initial thread state is RUNNABLE 503 osthread->set_state(RUNNABLE); 504 505 thread->set_osthread(osthread); 506 return true; 507 } 508 509 bool os::create_main_thread(JavaThread* thread) { 510 #ifdef ASSERT 511 thread->verify_not_published(); 512 #endif 513 if (_starting_thread == NULL) { 514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 515 if (_starting_thread == NULL) { 516 return false; 517 } 518 } 519 520 // The primordial thread is runnable from the start) 521 _starting_thread->set_state(RUNNABLE); 522 523 thread->set_osthread(_starting_thread); 524 return true; 525 } 526 527 // Allocate and initialize a new OSThread 528 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 529 unsigned thread_id; 530 531 // Allocate the OSThread object 532 OSThread* osthread = new OSThread(NULL, NULL); 533 if (osthread == NULL) { 534 return false; 535 } 536 537 // Initialize support for Java interrupts 538 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 539 if (interrupt_event == NULL) { 540 delete osthread; 541 return NULL; 542 } 543 osthread->set_interrupt_event(interrupt_event); 544 osthread->set_interrupted(false); 545 546 thread->set_osthread(osthread); 547 548 if (stack_size == 0) { 549 switch (thr_type) { 550 case os::java_thread: 551 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 552 if (JavaThread::stack_size_at_create() > 0) 553 stack_size = JavaThread::stack_size_at_create(); 554 break; 555 case os::compiler_thread: 556 if (CompilerThreadStackSize > 0) { 557 stack_size = (size_t)(CompilerThreadStackSize * K); 558 break; 559 } // else fall through: 560 // use VMThreadStackSize if CompilerThreadStackSize is not defined 561 case os::vm_thread: 562 case os::pgc_thread: 563 case os::cgc_thread: 564 case os::watcher_thread: 565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 566 break; 567 } 568 } 569 570 // Create the Win32 thread 571 // 572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 573 // does not specify stack size. Instead, it specifies the size of 574 // initially committed space. The stack size is determined by 575 // PE header in the executable. If the committed "stack_size" is larger 576 // than default value in the PE header, the stack is rounded up to the 577 // nearest multiple of 1MB. For example if the launcher has default 578 // stack size of 320k, specifying any size less than 320k does not 579 // affect the actual stack size at all, it only affects the initial 580 // commitment. On the other hand, specifying 'stack_size' larger than 581 // default value may cause significant increase in memory usage, because 582 // not only the stack space will be rounded up to MB, but also the 583 // entire space is committed upfront. 584 // 585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 586 // for CreateThread() that can treat 'stack_size' as stack size. However we 587 // are not supposed to call CreateThread() directly according to MSDN 588 // document because JVM uses C runtime library. The good news is that the 589 // flag appears to work with _beginthredex() as well. 590 591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 593 #endif 594 595 HANDLE thread_handle = 596 (HANDLE)_beginthreadex(NULL, 597 (unsigned)stack_size, 598 (unsigned (__stdcall *)(void*)) java_start, 599 thread, 600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 601 &thread_id); 602 if (thread_handle == NULL) { 603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 604 // without the flag. 605 thread_handle = 606 (HANDLE)_beginthreadex(NULL, 607 (unsigned)stack_size, 608 (unsigned (__stdcall *)(void*)) java_start, 609 thread, 610 CREATE_SUSPENDED, 611 &thread_id); 612 } 613 if (thread_handle == NULL) { 614 // Need to clean up stuff we've allocated so far 615 CloseHandle(osthread->interrupt_event()); 616 thread->set_osthread(NULL); 617 delete osthread; 618 return NULL; 619 } 620 621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 622 623 // Store info on the Win32 thread into the OSThread 624 osthread->set_thread_handle(thread_handle); 625 osthread->set_thread_id(thread_id); 626 627 // Initial thread state is INITIALIZED, not SUSPENDED 628 osthread->set_state(INITIALIZED); 629 630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 631 return true; 632 } 633 634 635 // Free Win32 resources related to the OSThread 636 void os::free_thread(OSThread* osthread) { 637 assert(osthread != NULL, "osthread not set"); 638 CloseHandle(osthread->thread_handle()); 639 CloseHandle(osthread->interrupt_event()); 640 delete osthread; 641 } 642 643 644 static int has_performance_count = 0; 645 static jlong first_filetime; 646 static jlong initial_performance_count; 647 static jlong performance_frequency; 648 649 650 jlong as_long(LARGE_INTEGER x) { 651 jlong result = 0; // initialization to avoid warning 652 set_high(&result, x.HighPart); 653 set_low(&result, x.LowPart); 654 return result; 655 } 656 657 658 jlong os::elapsed_counter() { 659 LARGE_INTEGER count; 660 if (has_performance_count) { 661 QueryPerformanceCounter(&count); 662 return as_long(count) - initial_performance_count; 663 } else { 664 FILETIME wt; 665 GetSystemTimeAsFileTime(&wt); 666 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 667 } 668 } 669 670 671 jlong os::elapsed_frequency() { 672 if (has_performance_count) { 673 return performance_frequency; 674 } else { 675 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 676 return 10000000; 677 } 678 } 679 680 681 julong os::available_memory() { 682 return win32::available_memory(); 683 } 684 685 julong os::win32::available_memory() { 686 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 687 // value if total memory is larger than 4GB 688 MEMORYSTATUSEX ms; 689 ms.dwLength = sizeof(ms); 690 GlobalMemoryStatusEx(&ms); 691 692 return (julong)ms.ullAvailPhys; 693 } 694 695 julong os::physical_memory() { 696 return win32::physical_memory(); 697 } 698 699 bool os::has_allocatable_memory_limit(julong* limit) { 700 MEMORYSTATUSEX ms; 701 ms.dwLength = sizeof(ms); 702 GlobalMemoryStatusEx(&ms); 703 #ifdef _LP64 704 *limit = (julong)ms.ullAvailVirtual; 705 return true; 706 #else 707 // Limit to 1400m because of the 2gb address space wall 708 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 709 return true; 710 #endif 711 } 712 713 // VC6 lacks DWORD_PTR 714 #if _MSC_VER < 1300 715 typedef UINT_PTR DWORD_PTR; 716 #endif 717 718 int os::active_processor_count() { 719 // User has overridden the number of active processors 720 if (ActiveProcessorCount > 0) { 721 if (PrintActiveCpus) { 722 tty->print_cr("active_processor_count: " 723 "active processor count set by user : %d", 724 ActiveProcessorCount); 725 } 726 return ActiveProcessorCount; 727 } 728 729 DWORD_PTR lpProcessAffinityMask = 0; 730 DWORD_PTR lpSystemAffinityMask = 0; 731 int proc_count = processor_count(); 732 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 733 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 734 // Nof active processors is number of bits in process affinity mask 735 int bitcount = 0; 736 while (lpProcessAffinityMask != 0) { 737 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 738 bitcount++; 739 } 740 return bitcount; 741 } else { 742 return proc_count; 743 } 744 } 745 746 void os::set_native_thread_name(const char *name) { 747 // Not yet implemented. 748 return; 749 } 750 751 bool os::distribute_processes(uint length, uint* distribution) { 752 // Not yet implemented. 753 return false; 754 } 755 756 bool os::bind_to_processor(uint processor_id) { 757 // Not yet implemented. 758 return false; 759 } 760 761 static void initialize_performance_counter() { 762 LARGE_INTEGER count; 763 if (QueryPerformanceFrequency(&count)) { 764 has_performance_count = 1; 765 performance_frequency = as_long(count); 766 QueryPerformanceCounter(&count); 767 initial_performance_count = as_long(count); 768 } else { 769 has_performance_count = 0; 770 FILETIME wt; 771 GetSystemTimeAsFileTime(&wt); 772 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 773 } 774 } 775 776 777 double os::elapsedTime() { 778 return (double) elapsed_counter() / (double) elapsed_frequency(); 779 } 780 781 782 // Windows format: 783 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 784 // Java format: 785 // Java standards require the number of milliseconds since 1/1/1970 786 787 // Constant offset - calculated using offset() 788 static jlong _offset = 116444736000000000; 789 // Fake time counter for reproducible results when debugging 790 static jlong fake_time = 0; 791 792 #ifdef ASSERT 793 // Just to be safe, recalculate the offset in debug mode 794 static jlong _calculated_offset = 0; 795 static int _has_calculated_offset = 0; 796 797 jlong offset() { 798 if (_has_calculated_offset) return _calculated_offset; 799 SYSTEMTIME java_origin; 800 java_origin.wYear = 1970; 801 java_origin.wMonth = 1; 802 java_origin.wDayOfWeek = 0; // ignored 803 java_origin.wDay = 1; 804 java_origin.wHour = 0; 805 java_origin.wMinute = 0; 806 java_origin.wSecond = 0; 807 java_origin.wMilliseconds = 0; 808 FILETIME jot; 809 if (!SystemTimeToFileTime(&java_origin, &jot)) { 810 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 811 } 812 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 813 _has_calculated_offset = 1; 814 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 815 return _calculated_offset; 816 } 817 #else 818 jlong offset() { 819 return _offset; 820 } 821 #endif 822 823 jlong windows_to_java_time(FILETIME wt) { 824 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 825 return (a - offset()) / 10000; 826 } 827 828 FILETIME java_to_windows_time(jlong l) { 829 jlong a = (l * 10000) + offset(); 830 FILETIME result; 831 result.dwHighDateTime = high(a); 832 result.dwLowDateTime = low(a); 833 return result; 834 } 835 836 bool os::supports_vtime() { return true; } 837 bool os::enable_vtime() { return false; } 838 bool os::vtime_enabled() { return false; } 839 840 double os::elapsedVTime() { 841 FILETIME created; 842 FILETIME exited; 843 FILETIME kernel; 844 FILETIME user; 845 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 846 // the resolution of windows_to_java_time() should be sufficient (ms) 847 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 848 } else { 849 return elapsedTime(); 850 } 851 } 852 853 jlong os::javaTimeMillis() { 854 if (UseFakeTimers) { 855 return fake_time++; 856 } else { 857 FILETIME wt; 858 GetSystemTimeAsFileTime(&wt); 859 return windows_to_java_time(wt); 860 } 861 } 862 863 jlong os::javaTimeNanos() { 864 if (!has_performance_count) { 865 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 866 } else { 867 LARGE_INTEGER current_count; 868 QueryPerformanceCounter(¤t_count); 869 double current = as_long(current_count); 870 double freq = performance_frequency; 871 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 872 return time; 873 } 874 } 875 876 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 877 if (!has_performance_count) { 878 // javaTimeMillis() doesn't have much percision, 879 // but it is not going to wrap -- so all 64 bits 880 info_ptr->max_value = ALL_64_BITS; 881 882 // this is a wall clock timer, so may skip 883 info_ptr->may_skip_backward = true; 884 info_ptr->may_skip_forward = true; 885 } else { 886 jlong freq = performance_frequency; 887 if (freq < NANOSECS_PER_SEC) { 888 // the performance counter is 64 bits and we will 889 // be multiplying it -- so no wrap in 64 bits 890 info_ptr->max_value = ALL_64_BITS; 891 } else if (freq > NANOSECS_PER_SEC) { 892 // use the max value the counter can reach to 893 // determine the max value which could be returned 894 julong max_counter = (julong)ALL_64_BITS; 895 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 896 } else { 897 // the performance counter is 64 bits and we will 898 // be using it directly -- so no wrap in 64 bits 899 info_ptr->max_value = ALL_64_BITS; 900 } 901 902 // using a counter, so no skipping 903 info_ptr->may_skip_backward = false; 904 info_ptr->may_skip_forward = false; 905 } 906 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 907 } 908 909 char* os::local_time_string(char *buf, size_t buflen) { 910 SYSTEMTIME st; 911 GetLocalTime(&st); 912 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 913 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 914 return buf; 915 } 916 917 bool os::getTimesSecs(double* process_real_time, 918 double* process_user_time, 919 double* process_system_time) { 920 HANDLE h_process = GetCurrentProcess(); 921 FILETIME create_time, exit_time, kernel_time, user_time; 922 BOOL result = GetProcessTimes(h_process, 923 &create_time, 924 &exit_time, 925 &kernel_time, 926 &user_time); 927 if (result != 0) { 928 FILETIME wt; 929 GetSystemTimeAsFileTime(&wt); 930 jlong rtc_millis = windows_to_java_time(wt); 931 jlong user_millis = windows_to_java_time(user_time); 932 jlong system_millis = windows_to_java_time(kernel_time); 933 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 934 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 935 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 936 return true; 937 } else { 938 return false; 939 } 940 } 941 942 void os::shutdown() { 943 944 // allow PerfMemory to attempt cleanup of any persistent resources 945 perfMemory_exit(); 946 947 // flush buffered output, finish log files 948 ostream_abort(); 949 950 // Check for abort hook 951 abort_hook_t abort_hook = Arguments::abort_hook(); 952 if (abort_hook != NULL) { 953 abort_hook(); 954 } 955 } 956 957 958 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 959 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 960 961 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 962 HINSTANCE dbghelp; 963 EXCEPTION_POINTERS ep; 964 MINIDUMP_EXCEPTION_INFORMATION mei; 965 MINIDUMP_EXCEPTION_INFORMATION* pmei; 966 967 HANDLE hProcess = GetCurrentProcess(); 968 DWORD processId = GetCurrentProcessId(); 969 HANDLE dumpFile; 970 MINIDUMP_TYPE dumpType; 971 static const char* cwd; 972 973 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 974 #ifndef ASSERT 975 // If running on a client version of Windows and user has not explicitly enabled dumping 976 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 977 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 978 return; 979 // If running on a server version of Windows and user has explictly disabled dumping 980 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 981 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 982 return; 983 } 984 #else 985 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 986 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 987 return; 988 } 989 #endif 990 991 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 992 993 if (dbghelp == NULL) { 994 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 995 return; 996 } 997 998 _MiniDumpWriteDump = CAST_TO_FN_PTR( 999 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 1000 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 1001 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 1002 1003 if (_MiniDumpWriteDump == NULL) { 1004 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1005 return; 1006 } 1007 1008 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1009 1010 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1011 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1012 #if API_VERSION_NUMBER >= 11 1013 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1014 MiniDumpWithUnloadedModules); 1015 #endif 1016 1017 cwd = get_current_directory(NULL, 0); 1018 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id()); 1019 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1020 1021 if (dumpFile == INVALID_HANDLE_VALUE) { 1022 VMError::report_coredump_status("Failed to create file for dumping", false); 1023 return; 1024 } 1025 if (exceptionRecord != NULL && contextRecord != NULL) { 1026 ep.ContextRecord = (PCONTEXT) contextRecord; 1027 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1028 1029 mei.ThreadId = GetCurrentThreadId(); 1030 mei.ExceptionPointers = &ep; 1031 pmei = &mei; 1032 } else { 1033 pmei = NULL; 1034 } 1035 1036 1037 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1038 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1039 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1040 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1041 DWORD error = GetLastError(); 1042 LPTSTR msgbuf = NULL; 1043 1044 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1045 FORMAT_MESSAGE_FROM_SYSTEM | 1046 FORMAT_MESSAGE_IGNORE_INSERTS, 1047 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1048 1049 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1050 LocalFree(msgbuf); 1051 } else { 1052 // Call to FormatMessage failed, just include the result from GetLastError 1053 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1054 } 1055 VMError::report_coredump_status(buffer, false); 1056 } else { 1057 VMError::report_coredump_status(buffer, true); 1058 } 1059 1060 CloseHandle(dumpFile); 1061 } 1062 1063 1064 1065 void os::abort(bool dump_core) 1066 { 1067 os::shutdown(); 1068 // no core dump on Windows 1069 ::exit(1); 1070 } 1071 1072 // Die immediately, no exit hook, no abort hook, no cleanup. 1073 void os::die() { 1074 _exit(-1); 1075 } 1076 1077 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1078 // * dirent_md.c 1.15 00/02/02 1079 // 1080 // The declarations for DIR and struct dirent are in jvm_win32.h. 1081 1082 /* Caller must have already run dirname through JVM_NativePath, which removes 1083 duplicate slashes and converts all instances of '/' into '\\'. */ 1084 1085 DIR * 1086 os::opendir(const char *dirname) 1087 { 1088 assert(dirname != NULL, "just checking"); // hotspot change 1089 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1090 DWORD fattr; // hotspot change 1091 char alt_dirname[4] = { 0, 0, 0, 0 }; 1092 1093 if (dirp == 0) { 1094 errno = ENOMEM; 1095 return 0; 1096 } 1097 1098 /* 1099 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1100 * as a directory in FindFirstFile(). We detect this case here and 1101 * prepend the current drive name. 1102 */ 1103 if (dirname[1] == '\0' && dirname[0] == '\\') { 1104 alt_dirname[0] = _getdrive() + 'A' - 1; 1105 alt_dirname[1] = ':'; 1106 alt_dirname[2] = '\\'; 1107 alt_dirname[3] = '\0'; 1108 dirname = alt_dirname; 1109 } 1110 1111 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1112 if (dirp->path == 0) { 1113 free(dirp, mtInternal); 1114 errno = ENOMEM; 1115 return 0; 1116 } 1117 strcpy(dirp->path, dirname); 1118 1119 fattr = GetFileAttributes(dirp->path); 1120 if (fattr == 0xffffffff) { 1121 free(dirp->path, mtInternal); 1122 free(dirp, mtInternal); 1123 errno = ENOENT; 1124 return 0; 1125 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1126 free(dirp->path, mtInternal); 1127 free(dirp, mtInternal); 1128 errno = ENOTDIR; 1129 return 0; 1130 } 1131 1132 /* Append "*.*", or possibly "\\*.*", to path */ 1133 if (dirp->path[1] == ':' 1134 && (dirp->path[2] == '\0' 1135 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1136 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1137 strcat(dirp->path, "*.*"); 1138 } else { 1139 strcat(dirp->path, "\\*.*"); 1140 } 1141 1142 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1143 if (dirp->handle == INVALID_HANDLE_VALUE) { 1144 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1145 free(dirp->path, mtInternal); 1146 free(dirp, mtInternal); 1147 errno = EACCES; 1148 return 0; 1149 } 1150 } 1151 return dirp; 1152 } 1153 1154 /* parameter dbuf unused on Windows */ 1155 1156 struct dirent * 1157 os::readdir(DIR *dirp, dirent *dbuf) 1158 { 1159 assert(dirp != NULL, "just checking"); // hotspot change 1160 if (dirp->handle == INVALID_HANDLE_VALUE) { 1161 return 0; 1162 } 1163 1164 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1165 1166 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1167 if (GetLastError() == ERROR_INVALID_HANDLE) { 1168 errno = EBADF; 1169 return 0; 1170 } 1171 FindClose(dirp->handle); 1172 dirp->handle = INVALID_HANDLE_VALUE; 1173 } 1174 1175 return &dirp->dirent; 1176 } 1177 1178 int 1179 os::closedir(DIR *dirp) 1180 { 1181 assert(dirp != NULL, "just checking"); // hotspot change 1182 if (dirp->handle != INVALID_HANDLE_VALUE) { 1183 if (!FindClose(dirp->handle)) { 1184 errno = EBADF; 1185 return -1; 1186 } 1187 dirp->handle = INVALID_HANDLE_VALUE; 1188 } 1189 free(dirp->path, mtInternal); 1190 free(dirp, mtInternal); 1191 return 0; 1192 } 1193 1194 // This must be hard coded because it's the system's temporary 1195 // directory not the java application's temp directory, ala java.io.tmpdir. 1196 const char* os::get_temp_directory() { 1197 static char path_buf[MAX_PATH]; 1198 if (GetTempPath(MAX_PATH, path_buf)>0) 1199 return path_buf; 1200 else{ 1201 path_buf[0]='\0'; 1202 return path_buf; 1203 } 1204 } 1205 1206 static bool file_exists(const char* filename) { 1207 if (filename == NULL || strlen(filename) == 0) { 1208 return false; 1209 } 1210 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1211 } 1212 1213 bool os::dll_build_name(char *buffer, size_t buflen, 1214 const char* pname, const char* fname) { 1215 bool retval = false; 1216 const size_t pnamelen = pname ? strlen(pname) : 0; 1217 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1218 1219 // Return error on buffer overflow. 1220 if (pnamelen + strlen(fname) + 10 > buflen) { 1221 return retval; 1222 } 1223 1224 if (pnamelen == 0) { 1225 jio_snprintf(buffer, buflen, "%s.dll", fname); 1226 retval = true; 1227 } else if (c == ':' || c == '\\') { 1228 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1229 retval = true; 1230 } else if (strchr(pname, *os::path_separator()) != NULL) { 1231 int n; 1232 char** pelements = split_path(pname, &n); 1233 if (pelements == NULL) { 1234 return false; 1235 } 1236 for (int i = 0 ; i < n ; i++) { 1237 char* path = pelements[i]; 1238 // Really shouldn't be NULL, but check can't hurt 1239 size_t plen = (path == NULL) ? 0 : strlen(path); 1240 if (plen == 0) { 1241 continue; // skip the empty path values 1242 } 1243 const char lastchar = path[plen - 1]; 1244 if (lastchar == ':' || lastchar == '\\') { 1245 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1246 } else { 1247 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1248 } 1249 if (file_exists(buffer)) { 1250 retval = true; 1251 break; 1252 } 1253 } 1254 // release the storage 1255 for (int i = 0 ; i < n ; i++) { 1256 if (pelements[i] != NULL) { 1257 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1258 } 1259 } 1260 if (pelements != NULL) { 1261 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1262 } 1263 } else { 1264 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1265 retval = true; 1266 } 1267 return retval; 1268 } 1269 1270 // Needs to be in os specific directory because windows requires another 1271 // header file <direct.h> 1272 const char* os::get_current_directory(char *buf, size_t buflen) { 1273 int n = static_cast<int>(buflen); 1274 if (buflen > INT_MAX) n = INT_MAX; 1275 return _getcwd(buf, n); 1276 } 1277 1278 //----------------------------------------------------------- 1279 // Helper functions for fatal error handler 1280 #ifdef _WIN64 1281 // Helper routine which returns true if address in 1282 // within the NTDLL address space. 1283 // 1284 static bool _addr_in_ntdll( address addr ) 1285 { 1286 HMODULE hmod; 1287 MODULEINFO minfo; 1288 1289 hmod = GetModuleHandle("NTDLL.DLL"); 1290 if ( hmod == NULL ) return false; 1291 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1292 &minfo, sizeof(MODULEINFO)) ) 1293 return false; 1294 1295 if ( (addr >= minfo.lpBaseOfDll) && 1296 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1297 return true; 1298 else 1299 return false; 1300 } 1301 #endif 1302 1303 1304 // Enumerate all modules for a given process ID 1305 // 1306 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1307 // different API for doing this. We use PSAPI.DLL on NT based 1308 // Windows and ToolHelp on 95/98/Me. 1309 1310 // Callback function that is called by enumerate_modules() on 1311 // every DLL module. 1312 // Input parameters: 1313 // int pid, 1314 // char* module_file_name, 1315 // address module_base_addr, 1316 // unsigned module_size, 1317 // void* param 1318 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1319 1320 // enumerate_modules for Windows NT, using PSAPI 1321 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1322 { 1323 HANDLE hProcess ; 1324 1325 # define MAX_NUM_MODULES 128 1326 HMODULE modules[MAX_NUM_MODULES]; 1327 static char filename[ MAX_PATH ]; 1328 int result = 0; 1329 1330 if (!os::PSApiDll::PSApiAvailable()) { 1331 return 0; 1332 } 1333 1334 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1335 FALSE, pid ) ; 1336 if (hProcess == NULL) return 0; 1337 1338 DWORD size_needed; 1339 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1340 sizeof(modules), &size_needed)) { 1341 CloseHandle( hProcess ); 1342 return 0; 1343 } 1344 1345 // number of modules that are currently loaded 1346 int num_modules = size_needed / sizeof(HMODULE); 1347 1348 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1349 // Get Full pathname: 1350 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1351 filename, sizeof(filename))) { 1352 filename[0] = '\0'; 1353 } 1354 1355 MODULEINFO modinfo; 1356 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1357 &modinfo, sizeof(modinfo))) { 1358 modinfo.lpBaseOfDll = NULL; 1359 modinfo.SizeOfImage = 0; 1360 } 1361 1362 // Invoke callback function 1363 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1364 modinfo.SizeOfImage, param); 1365 if (result) break; 1366 } 1367 1368 CloseHandle( hProcess ) ; 1369 return result; 1370 } 1371 1372 1373 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1374 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1375 { 1376 HANDLE hSnapShot ; 1377 static MODULEENTRY32 modentry ; 1378 int result = 0; 1379 1380 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1381 return 0; 1382 } 1383 1384 // Get a handle to a Toolhelp snapshot of the system 1385 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; 1386 if( hSnapShot == INVALID_HANDLE_VALUE ) { 1387 return FALSE ; 1388 } 1389 1390 // iterate through all modules 1391 modentry.dwSize = sizeof(MODULEENTRY32) ; 1392 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1393 1394 while( not_done ) { 1395 // invoke the callback 1396 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1397 modentry.modBaseSize, param); 1398 if (result) break; 1399 1400 modentry.dwSize = sizeof(MODULEENTRY32) ; 1401 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1402 } 1403 1404 CloseHandle(hSnapShot); 1405 return result; 1406 } 1407 1408 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1409 { 1410 // Get current process ID if caller doesn't provide it. 1411 if (!pid) pid = os::current_process_id(); 1412 1413 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1414 else return _enumerate_modules_windows(pid, func, param); 1415 } 1416 1417 struct _modinfo { 1418 address addr; 1419 char* full_path; // point to a char buffer 1420 int buflen; // size of the buffer 1421 address base_addr; 1422 }; 1423 1424 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1425 unsigned size, void * param) { 1426 struct _modinfo *pmod = (struct _modinfo *)param; 1427 if (!pmod) return -1; 1428 1429 if (base_addr <= pmod->addr && 1430 base_addr+size > pmod->addr) { 1431 // if a buffer is provided, copy path name to the buffer 1432 if (pmod->full_path) { 1433 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1434 } 1435 pmod->base_addr = base_addr; 1436 return 1; 1437 } 1438 return 0; 1439 } 1440 1441 bool os::dll_address_to_library_name(address addr, char* buf, 1442 int buflen, int* offset) { 1443 // buf is not optional, but offset is optional 1444 assert(buf != NULL, "sanity check"); 1445 1446 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1447 // return the full path to the DLL file, sometimes it returns path 1448 // to the corresponding PDB file (debug info); sometimes it only 1449 // returns partial path, which makes life painful. 1450 1451 struct _modinfo mi; 1452 mi.addr = addr; 1453 mi.full_path = buf; 1454 mi.buflen = buflen; 1455 int pid = os::current_process_id(); 1456 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1457 // buf already contains path name 1458 if (offset) *offset = addr - mi.base_addr; 1459 return true; 1460 } 1461 1462 buf[0] = '\0'; 1463 if (offset) *offset = -1; 1464 return false; 1465 } 1466 1467 bool os::dll_address_to_function_name(address addr, char *buf, 1468 int buflen, int *offset) { 1469 // buf is not optional, but offset is optional 1470 assert(buf != NULL, "sanity check"); 1471 1472 if (Decoder::decode(addr, buf, buflen, offset)) { 1473 return true; 1474 } 1475 if (offset != NULL) *offset = -1; 1476 buf[0] = '\0'; 1477 return false; 1478 } 1479 1480 // save the start and end address of jvm.dll into param[0] and param[1] 1481 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1482 unsigned size, void * param) { 1483 if (!param) return -1; 1484 1485 if (base_addr <= (address)_locate_jvm_dll && 1486 base_addr+size > (address)_locate_jvm_dll) { 1487 ((address*)param)[0] = base_addr; 1488 ((address*)param)[1] = base_addr + size; 1489 return 1; 1490 } 1491 return 0; 1492 } 1493 1494 address vm_lib_location[2]; // start and end address of jvm.dll 1495 1496 // check if addr is inside jvm.dll 1497 bool os::address_is_in_vm(address addr) { 1498 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1499 int pid = os::current_process_id(); 1500 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1501 assert(false, "Can't find jvm module."); 1502 return false; 1503 } 1504 } 1505 1506 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1507 } 1508 1509 // print module info; param is outputStream* 1510 static int _print_module(int pid, char* fname, address base, 1511 unsigned size, void* param) { 1512 if (!param) return -1; 1513 1514 outputStream* st = (outputStream*)param; 1515 1516 address end_addr = base + size; 1517 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1518 return 0; 1519 } 1520 1521 // Loads .dll/.so and 1522 // in case of error it checks if .dll/.so was built for the 1523 // same architecture as Hotspot is running on 1524 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1525 { 1526 void * result = LoadLibrary(name); 1527 if (result != NULL) 1528 { 1529 return result; 1530 } 1531 1532 DWORD errcode = GetLastError(); 1533 if (errcode == ERROR_MOD_NOT_FOUND) { 1534 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1535 ebuf[ebuflen-1]='\0'; 1536 return NULL; 1537 } 1538 1539 // Parsing dll below 1540 // If we can read dll-info and find that dll was built 1541 // for an architecture other than Hotspot is running in 1542 // - then print to buffer "DLL was built for a different architecture" 1543 // else call os::lasterror to obtain system error message 1544 1545 // Read system error message into ebuf 1546 // It may or may not be overwritten below (in the for loop and just above) 1547 lasterror(ebuf, (size_t) ebuflen); 1548 ebuf[ebuflen-1]='\0'; 1549 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1550 if (file_descriptor<0) 1551 { 1552 return NULL; 1553 } 1554 1555 uint32_t signature_offset; 1556 uint16_t lib_arch=0; 1557 bool failed_to_get_lib_arch= 1558 ( 1559 //Go to position 3c in the dll 1560 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1561 || 1562 // Read loacation of signature 1563 (sizeof(signature_offset)!= 1564 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1565 || 1566 //Go to COFF File Header in dll 1567 //that is located after"signature" (4 bytes long) 1568 (os::seek_to_file_offset(file_descriptor, 1569 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1570 || 1571 //Read field that contains code of architecture 1572 // that dll was build for 1573 (sizeof(lib_arch)!= 1574 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1575 ); 1576 1577 ::close(file_descriptor); 1578 if (failed_to_get_lib_arch) 1579 { 1580 // file i/o error - report os::lasterror(...) msg 1581 return NULL; 1582 } 1583 1584 typedef struct 1585 { 1586 uint16_t arch_code; 1587 char* arch_name; 1588 } arch_t; 1589 1590 static const arch_t arch_array[]={ 1591 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1592 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1593 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1594 }; 1595 #if (defined _M_IA64) 1596 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1597 #elif (defined _M_AMD64) 1598 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1599 #elif (defined _M_IX86) 1600 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1601 #else 1602 #error Method os::dll_load requires that one of following \ 1603 is defined :_M_IA64,_M_AMD64 or _M_IX86 1604 #endif 1605 1606 1607 // Obtain a string for printf operation 1608 // lib_arch_str shall contain string what platform this .dll was built for 1609 // running_arch_str shall string contain what platform Hotspot was built for 1610 char *running_arch_str=NULL,*lib_arch_str=NULL; 1611 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1612 { 1613 if (lib_arch==arch_array[i].arch_code) 1614 lib_arch_str=arch_array[i].arch_name; 1615 if (running_arch==arch_array[i].arch_code) 1616 running_arch_str=arch_array[i].arch_name; 1617 } 1618 1619 assert(running_arch_str, 1620 "Didn't find runing architecture code in arch_array"); 1621 1622 // If the architure is right 1623 // but some other error took place - report os::lasterror(...) msg 1624 if (lib_arch == running_arch) 1625 { 1626 return NULL; 1627 } 1628 1629 if (lib_arch_str!=NULL) 1630 { 1631 ::_snprintf(ebuf, ebuflen-1, 1632 "Can't load %s-bit .dll on a %s-bit platform", 1633 lib_arch_str,running_arch_str); 1634 } 1635 else 1636 { 1637 // don't know what architecture this dll was build for 1638 ::_snprintf(ebuf, ebuflen-1, 1639 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1640 lib_arch,running_arch_str); 1641 } 1642 1643 return NULL; 1644 } 1645 1646 1647 void os::print_dll_info(outputStream *st) { 1648 int pid = os::current_process_id(); 1649 st->print_cr("Dynamic libraries:"); 1650 enumerate_modules(pid, _print_module, (void *)st); 1651 } 1652 1653 void os::print_os_info_brief(outputStream* st) { 1654 os::print_os_info(st); 1655 } 1656 1657 void os::print_os_info(outputStream* st) { 1658 st->print("OS:"); 1659 1660 os::win32::print_windows_version(st); 1661 } 1662 1663 void os::win32::print_windows_version(outputStream* st) { 1664 OSVERSIONINFOEX osvi; 1665 VS_FIXEDFILEINFO *file_info; 1666 TCHAR kernel32_path[MAX_PATH]; 1667 UINT len, ret; 1668 1669 // Use the GetVersionEx information to see if we're on a server or 1670 // workstation edition of Windows. Starting with Windows 8.1 we can't 1671 // trust the OS version information returned by this API. 1672 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1673 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1674 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1675 st->print_cr("Call to GetVersionEx failed"); 1676 return; 1677 } 1678 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1679 1680 // Get the full path to \Windows\System32\kernel32.dll and use that for 1681 // determining what version of Windows we're running on. 1682 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1683 ret = GetSystemDirectory(kernel32_path, len); 1684 if (ret == 0 || ret > len) { 1685 st->print_cr("Call to GetSystemDirectory failed"); 1686 return; 1687 } 1688 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1689 1690 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1691 if (version_size == 0) { 1692 st->print_cr("Call to GetFileVersionInfoSize failed"); 1693 return; 1694 } 1695 1696 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1697 if (version_info == NULL) { 1698 st->print_cr("Failed to allocate version_info"); 1699 return; 1700 } 1701 1702 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1703 os::free(version_info); 1704 st->print_cr("Call to GetFileVersionInfo failed"); 1705 return; 1706 } 1707 1708 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1709 os::free(version_info); 1710 st->print_cr("Call to VerQueryValue failed"); 1711 return; 1712 } 1713 1714 int major_version = HIWORD(file_info->dwProductVersionMS); 1715 int minor_version = LOWORD(file_info->dwProductVersionMS); 1716 int build_number = HIWORD(file_info->dwProductVersionLS); 1717 int build_minor = LOWORD(file_info->dwProductVersionLS); 1718 int os_vers = major_version * 1000 + minor_version; 1719 os::free(version_info); 1720 1721 st->print(" Windows "); 1722 switch (os_vers) { 1723 1724 case 6000: 1725 if (is_workstation) { 1726 st->print("Vista"); 1727 } else { 1728 st->print("Server 2008"); 1729 } 1730 break; 1731 1732 case 6001: 1733 if (is_workstation) { 1734 st->print("7"); 1735 } else { 1736 st->print("Server 2008 R2"); 1737 } 1738 break; 1739 1740 case 6002: 1741 if (is_workstation) { 1742 st->print("8"); 1743 } else { 1744 st->print("Server 2012"); 1745 } 1746 break; 1747 1748 case 6003: 1749 if (is_workstation) { 1750 st->print("8.1"); 1751 } else { 1752 st->print("Server 2012 R2"); 1753 } 1754 break; 1755 1756 case 6004: 1757 if (is_workstation) { 1758 st->print("10"); 1759 } else { 1760 st->print("Server 2016"); 1761 } 1762 break; 1763 1764 default: 1765 // Unrecognized windows, print out its major and minor versions 1766 st->print("%d.%d", major_version, minor_version); 1767 break; 1768 } 1769 1770 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1771 // find out whether we are running on 64 bit processor or not 1772 SYSTEM_INFO si; 1773 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1774 os::Kernel32Dll::GetNativeSystemInfo(&si); 1775 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1776 st->print(" , 64 bit"); 1777 } 1778 1779 st->print(" Build %d", build_number); 1780 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1781 st->cr(); 1782 } 1783 1784 void os::pd_print_cpu_info(outputStream* st) { 1785 // Nothing to do for now. 1786 } 1787 1788 void os::print_memory_info(outputStream* st) { 1789 st->print("Memory:"); 1790 st->print(" %dk page", os::vm_page_size()>>10); 1791 1792 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1793 // value if total memory is larger than 4GB 1794 MEMORYSTATUSEX ms; 1795 ms.dwLength = sizeof(ms); 1796 GlobalMemoryStatusEx(&ms); 1797 1798 st->print(", physical %uk", os::physical_memory() >> 10); 1799 st->print("(%uk free)", os::available_memory() >> 10); 1800 1801 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1802 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1803 st->cr(); 1804 } 1805 1806 void os::print_siginfo(outputStream *st, void *siginfo) { 1807 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1808 st->print("siginfo:"); 1809 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1810 1811 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1812 er->NumberParameters >= 2) { 1813 switch (er->ExceptionInformation[0]) { 1814 case 0: st->print(", reading address"); break; 1815 case 1: st->print(", writing address"); break; 1816 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1817 er->ExceptionInformation[0]); 1818 } 1819 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1820 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1821 er->NumberParameters >= 2 && UseSharedSpaces) { 1822 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1823 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1824 st->print("\n\nError accessing class data sharing archive." \ 1825 " Mapped file inaccessible during execution, " \ 1826 " possible disk/network problem."); 1827 } 1828 } else { 1829 int num = er->NumberParameters; 1830 if (num > 0) { 1831 st->print(", ExceptionInformation="); 1832 for (int i = 0; i < num; i++) { 1833 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1834 } 1835 } 1836 } 1837 st->cr(); 1838 } 1839 1840 1841 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1842 #if _MSC_VER >= 1900 1843 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1844 int result = ::vsnprintf(buf, len, fmt, args); 1845 // If an encoding error occurred (result < 0) then it's not clear 1846 // whether the buffer is NUL terminated, so ensure it is. 1847 if ((result < 0) && (len > 0)) { 1848 buf[len - 1] = '\0'; 1849 } 1850 return result; 1851 #else 1852 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1853 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1854 // versions. However, when len == 0, avoid _vsnprintf too, and just 1855 // go straight to _vscprintf. The output is going to be truncated in 1856 // that case, except in the unusual case of empty output. More 1857 // importantly, the documentation for various versions of Visual Studio 1858 // are inconsistent about the behavior of _vsnprintf when len == 0, 1859 // including it possibly being an error. 1860 int result = -1; 1861 if (len > 0) { 1862 result = _vsnprintf(buf, len, fmt, args); 1863 // If output (including NUL terminator) is truncated, the buffer 1864 // won't be NUL terminated. Add the trailing NUL specified by C99. 1865 if ((result < 0) || (result >= (int) len)) { 1866 buf[len - 1] = '\0'; 1867 } 1868 } 1869 if (result < 0) { 1870 result = _vscprintf(fmt, args); 1871 } 1872 return result; 1873 #endif // _MSC_VER dispatch 1874 } 1875 1876 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1877 // do nothing 1878 } 1879 1880 static char saved_jvm_path[MAX_PATH] = {0}; 1881 1882 // Find the full path to the current module, jvm.dll 1883 void os::jvm_path(char *buf, jint buflen) { 1884 // Error checking. 1885 if (buflen < MAX_PATH) { 1886 assert(false, "must use a large-enough buffer"); 1887 buf[0] = '\0'; 1888 return; 1889 } 1890 // Lazy resolve the path to current module. 1891 if (saved_jvm_path[0] != 0) { 1892 strcpy(buf, saved_jvm_path); 1893 return; 1894 } 1895 1896 buf[0] = '\0'; 1897 if (Arguments::created_by_gamma_launcher()) { 1898 // Support for the gamma launcher. Check for an 1899 // JAVA_HOME environment variable 1900 // and fix up the path so it looks like 1901 // libjvm.so is installed there (append a fake suffix 1902 // hotspot/libjvm.so). 1903 char* java_home_var = ::getenv("JAVA_HOME"); 1904 if (java_home_var != NULL && java_home_var[0] != 0 && 1905 strlen(java_home_var) < (size_t)buflen) { 1906 1907 strncpy(buf, java_home_var, buflen); 1908 1909 // determine if this is a legacy image or modules image 1910 // modules image doesn't have "jre" subdirectory 1911 size_t len = strlen(buf); 1912 char* jrebin_p = buf + len; 1913 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1914 if (0 != _access(buf, 0)) { 1915 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1916 } 1917 len = strlen(buf); 1918 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1919 } 1920 } 1921 1922 if(buf[0] == '\0') { 1923 GetModuleFileName(vm_lib_handle, buf, buflen); 1924 } 1925 strncpy(saved_jvm_path, buf, MAX_PATH); 1926 } 1927 1928 1929 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1930 #ifndef _WIN64 1931 st->print("_"); 1932 #endif 1933 } 1934 1935 1936 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1937 #ifndef _WIN64 1938 st->print("@%d", args_size * sizeof(int)); 1939 #endif 1940 } 1941 1942 // This method is a copy of JDK's sysGetLastErrorString 1943 // from src/windows/hpi/src/system_md.c 1944 1945 size_t os::lasterror(char* buf, size_t len) { 1946 DWORD errval; 1947 1948 if ((errval = GetLastError()) != 0) { 1949 // DOS error 1950 size_t n = (size_t)FormatMessage( 1951 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1952 NULL, 1953 errval, 1954 0, 1955 buf, 1956 (DWORD)len, 1957 NULL); 1958 if (n > 3) { 1959 // Drop final '.', CR, LF 1960 if (buf[n - 1] == '\n') n--; 1961 if (buf[n - 1] == '\r') n--; 1962 if (buf[n - 1] == '.') n--; 1963 buf[n] = '\0'; 1964 } 1965 return n; 1966 } 1967 1968 if (errno != 0) { 1969 // C runtime error that has no corresponding DOS error code 1970 const char* s = strerror(errno); 1971 size_t n = strlen(s); 1972 if (n >= len) n = len - 1; 1973 strncpy(buf, s, n); 1974 buf[n] = '\0'; 1975 return n; 1976 } 1977 1978 return 0; 1979 } 1980 1981 int os::get_last_error() { 1982 DWORD error = GetLastError(); 1983 if (error == 0) 1984 error = errno; 1985 return (int)error; 1986 } 1987 1988 // sun.misc.Signal 1989 // NOTE that this is a workaround for an apparent kernel bug where if 1990 // a signal handler for SIGBREAK is installed then that signal handler 1991 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1992 // See bug 4416763. 1993 static void (*sigbreakHandler)(int) = NULL; 1994 1995 static void UserHandler(int sig, void *siginfo, void *context) { 1996 os::signal_notify(sig); 1997 // We need to reinstate the signal handler each time... 1998 os::signal(sig, (void*)UserHandler); 1999 } 2000 2001 void* os::user_handler() { 2002 return (void*) UserHandler; 2003 } 2004 2005 void* os::signal(int signal_number, void* handler) { 2006 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 2007 void (*oldHandler)(int) = sigbreakHandler; 2008 sigbreakHandler = (void (*)(int)) handler; 2009 return (void*) oldHandler; 2010 } else { 2011 return (void*)::signal(signal_number, (void (*)(int))handler); 2012 } 2013 } 2014 2015 void os::signal_raise(int signal_number) { 2016 raise(signal_number); 2017 } 2018 2019 // The Win32 C runtime library maps all console control events other than ^C 2020 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 2021 // logoff, and shutdown events. We therefore install our own console handler 2022 // that raises SIGTERM for the latter cases. 2023 // 2024 static BOOL WINAPI consoleHandler(DWORD event) { 2025 switch(event) { 2026 case CTRL_C_EVENT: 2027 if (is_error_reported()) { 2028 // Ctrl-C is pressed during error reporting, likely because the error 2029 // handler fails to abort. Let VM die immediately. 2030 os::die(); 2031 } 2032 2033 os::signal_raise(SIGINT); 2034 return TRUE; 2035 break; 2036 case CTRL_BREAK_EVENT: 2037 if (sigbreakHandler != NULL) { 2038 (*sigbreakHandler)(SIGBREAK); 2039 } 2040 return TRUE; 2041 break; 2042 case CTRL_LOGOFF_EVENT: { 2043 // Don't terminate JVM if it is running in a non-interactive session, 2044 // such as a service process. 2045 USEROBJECTFLAGS flags; 2046 HANDLE handle = GetProcessWindowStation(); 2047 if (handle != NULL && 2048 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 2049 sizeof( USEROBJECTFLAGS), NULL)) { 2050 // If it is a non-interactive session, let next handler to deal 2051 // with it. 2052 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 2053 return FALSE; 2054 } 2055 } 2056 } 2057 case CTRL_CLOSE_EVENT: 2058 case CTRL_SHUTDOWN_EVENT: 2059 os::signal_raise(SIGTERM); 2060 return TRUE; 2061 break; 2062 default: 2063 break; 2064 } 2065 return FALSE; 2066 } 2067 2068 /* 2069 * The following code is moved from os.cpp for making this 2070 * code platform specific, which it is by its very nature. 2071 */ 2072 2073 // Return maximum OS signal used + 1 for internal use only 2074 // Used as exit signal for signal_thread 2075 int os::sigexitnum_pd(){ 2076 return NSIG; 2077 } 2078 2079 // a counter for each possible signal value, including signal_thread exit signal 2080 static volatile jint pending_signals[NSIG+1] = { 0 }; 2081 static HANDLE sig_sem = NULL; 2082 2083 void os::signal_init_pd() { 2084 // Initialize signal structures 2085 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2086 2087 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2088 2089 // Programs embedding the VM do not want it to attempt to receive 2090 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2091 // shutdown hooks mechanism introduced in 1.3. For example, when 2092 // the VM is run as part of a Windows NT service (i.e., a servlet 2093 // engine in a web server), the correct behavior is for any console 2094 // control handler to return FALSE, not TRUE, because the OS's 2095 // "final" handler for such events allows the process to continue if 2096 // it is a service (while terminating it if it is not a service). 2097 // To make this behavior uniform and the mechanism simpler, we 2098 // completely disable the VM's usage of these console events if -Xrs 2099 // (=ReduceSignalUsage) is specified. This means, for example, that 2100 // the CTRL-BREAK thread dump mechanism is also disabled in this 2101 // case. See bugs 4323062, 4345157, and related bugs. 2102 2103 if (!ReduceSignalUsage) { 2104 // Add a CTRL-C handler 2105 SetConsoleCtrlHandler(consoleHandler, TRUE); 2106 } 2107 } 2108 2109 void os::signal_notify(int signal_number) { 2110 BOOL ret; 2111 if (sig_sem != NULL) { 2112 Atomic::inc(&pending_signals[signal_number]); 2113 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2114 assert(ret != 0, "ReleaseSemaphore() failed"); 2115 } 2116 } 2117 2118 static int check_pending_signals(bool wait_for_signal) { 2119 DWORD ret; 2120 while (true) { 2121 for (int i = 0; i < NSIG + 1; i++) { 2122 jint n = pending_signals[i]; 2123 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2124 return i; 2125 } 2126 } 2127 if (!wait_for_signal) { 2128 return -1; 2129 } 2130 2131 JavaThread *thread = JavaThread::current(); 2132 2133 ThreadBlockInVM tbivm(thread); 2134 2135 bool threadIsSuspended; 2136 do { 2137 thread->set_suspend_equivalent(); 2138 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2139 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2140 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2141 2142 // were we externally suspended while we were waiting? 2143 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2144 if (threadIsSuspended) { 2145 // 2146 // The semaphore has been incremented, but while we were waiting 2147 // another thread suspended us. We don't want to continue running 2148 // while suspended because that would surprise the thread that 2149 // suspended us. 2150 // 2151 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2152 assert(ret != 0, "ReleaseSemaphore() failed"); 2153 2154 thread->java_suspend_self(); 2155 } 2156 } while (threadIsSuspended); 2157 } 2158 } 2159 2160 int os::signal_lookup() { 2161 return check_pending_signals(false); 2162 } 2163 2164 int os::signal_wait() { 2165 return check_pending_signals(true); 2166 } 2167 2168 // Implicit OS exception handling 2169 2170 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2171 JavaThread* thread = JavaThread::current(); 2172 // Save pc in thread 2173 #ifdef _M_IA64 2174 // Do not blow up if no thread info available. 2175 if (thread) { 2176 // Saving PRECISE pc (with slot information) in thread. 2177 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2178 // Convert precise PC into "Unix" format 2179 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2180 thread->set_saved_exception_pc((address)precise_pc); 2181 } 2182 // Set pc to handler 2183 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2184 // Clear out psr.ri (= Restart Instruction) in order to continue 2185 // at the beginning of the target bundle. 2186 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2187 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2188 #else 2189 #ifdef _M_AMD64 2190 // Do not blow up if no thread info available. 2191 if (thread) { 2192 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2193 } 2194 // Set pc to handler 2195 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2196 #else 2197 // Do not blow up if no thread info available. 2198 if (thread) { 2199 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2200 } 2201 // Set pc to handler 2202 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2203 #endif 2204 #endif 2205 2206 // Continue the execution 2207 return EXCEPTION_CONTINUE_EXECUTION; 2208 } 2209 2210 2211 // Used for PostMortemDump 2212 extern "C" void safepoints(); 2213 extern "C" void find(int x); 2214 extern "C" void events(); 2215 2216 // According to Windows API documentation, an illegal instruction sequence should generate 2217 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2218 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2219 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2220 2221 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2222 2223 // From "Execution Protection in the Windows Operating System" draft 0.35 2224 // Once a system header becomes available, the "real" define should be 2225 // included or copied here. 2226 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2227 2228 // Handle NAT Bit consumption on IA64. 2229 #ifdef _M_IA64 2230 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2231 #endif 2232 2233 // Windows Vista/2008 heap corruption check 2234 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2235 2236 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2237 // C++ compiler contain this error code. Because this is a compiler-generated 2238 // error, the code is not listed in the Win32 API header files. 2239 // The code is actually a cryptic mnemonic device, with the initial "E" 2240 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2241 // ASCII values of "msc". 2242 2243 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2244 2245 #define def_excpt(val) { #val, (val) } 2246 2247 static const struct { char* name; uint number; } exceptlabels[] = { 2248 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2249 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2250 def_excpt(EXCEPTION_BREAKPOINT), 2251 def_excpt(EXCEPTION_SINGLE_STEP), 2252 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2253 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2254 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2255 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2256 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2257 def_excpt(EXCEPTION_FLT_OVERFLOW), 2258 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2259 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2260 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2261 def_excpt(EXCEPTION_INT_OVERFLOW), 2262 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2263 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2264 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2265 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2266 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2267 def_excpt(EXCEPTION_STACK_OVERFLOW), 2268 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2269 def_excpt(EXCEPTION_GUARD_PAGE), 2270 def_excpt(EXCEPTION_INVALID_HANDLE), 2271 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2272 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2273 #ifdef _M_IA64 2274 , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION) 2275 #endif 2276 }; 2277 2278 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2279 uint code = static_cast<uint>(exception_code); 2280 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2281 if (exceptlabels[i].number == code) { 2282 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2283 return buf; 2284 } 2285 } 2286 2287 return NULL; 2288 } 2289 2290 //----------------------------------------------------------------------------- 2291 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2292 // handle exception caused by idiv; should only happen for -MinInt/-1 2293 // (division by zero is handled explicitly) 2294 #ifdef _M_IA64 2295 assert(0, "Fix Handle_IDiv_Exception"); 2296 #else 2297 #ifdef _M_AMD64 2298 PCONTEXT ctx = exceptionInfo->ContextRecord; 2299 address pc = (address)ctx->Rip; 2300 assert(pc[0] == 0xF7, "not an idiv opcode"); 2301 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2302 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2303 // set correct result values and continue after idiv instruction 2304 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2305 ctx->Rax = (DWORD64)min_jint; // result 2306 ctx->Rdx = (DWORD64)0; // remainder 2307 // Continue the execution 2308 #else 2309 PCONTEXT ctx = exceptionInfo->ContextRecord; 2310 address pc = (address)ctx->Eip; 2311 assert(pc[0] == 0xF7, "not an idiv opcode"); 2312 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2313 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2314 // set correct result values and continue after idiv instruction 2315 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2316 ctx->Eax = (DWORD)min_jint; // result 2317 ctx->Edx = (DWORD)0; // remainder 2318 // Continue the execution 2319 #endif 2320 #endif 2321 return EXCEPTION_CONTINUE_EXECUTION; 2322 } 2323 2324 #ifndef _WIN64 2325 //----------------------------------------------------------------------------- 2326 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2327 // handle exception caused by native method modifying control word 2328 PCONTEXT ctx = exceptionInfo->ContextRecord; 2329 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2330 2331 switch (exception_code) { 2332 case EXCEPTION_FLT_DENORMAL_OPERAND: 2333 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2334 case EXCEPTION_FLT_INEXACT_RESULT: 2335 case EXCEPTION_FLT_INVALID_OPERATION: 2336 case EXCEPTION_FLT_OVERFLOW: 2337 case EXCEPTION_FLT_STACK_CHECK: 2338 case EXCEPTION_FLT_UNDERFLOW: 2339 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2340 if (fp_control_word != ctx->FloatSave.ControlWord) { 2341 // Restore FPCW and mask out FLT exceptions 2342 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2343 // Mask out pending FLT exceptions 2344 ctx->FloatSave.StatusWord &= 0xffffff00; 2345 return EXCEPTION_CONTINUE_EXECUTION; 2346 } 2347 } 2348 2349 if (prev_uef_handler != NULL) { 2350 // We didn't handle this exception so pass it to the previous 2351 // UnhandledExceptionFilter. 2352 return (prev_uef_handler)(exceptionInfo); 2353 } 2354 2355 return EXCEPTION_CONTINUE_SEARCH; 2356 } 2357 #else //_WIN64 2358 /* 2359 On Windows, the mxcsr control bits are non-volatile across calls 2360 See also CR 6192333 2361 If EXCEPTION_FLT_* happened after some native method modified 2362 mxcsr - it is not a jvm fault. 2363 However should we decide to restore of mxcsr after a faulty 2364 native method we can uncomment following code 2365 jint MxCsr = INITIAL_MXCSR; 2366 // we can't use StubRoutines::addr_mxcsr_std() 2367 // because in Win64 mxcsr is not saved there 2368 if (MxCsr != ctx->MxCsr) { 2369 ctx->MxCsr = MxCsr; 2370 return EXCEPTION_CONTINUE_EXECUTION; 2371 } 2372 2373 */ 2374 #endif // _WIN64 2375 2376 2377 static inline void report_error(Thread* t, DWORD exception_code, 2378 address addr, void* siginfo, void* context) { 2379 VMError err(t, exception_code, addr, siginfo, context); 2380 err.report_and_die(); 2381 2382 // If UseOsErrorReporting, this will return here and save the error file 2383 // somewhere where we can find it in the minidump. 2384 } 2385 2386 //----------------------------------------------------------------------------- 2387 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2388 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2389 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2390 #ifdef _M_IA64 2391 // On Itanium, we need the "precise pc", which has the slot number coded 2392 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2393 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2394 // Convert the pc to "Unix format", which has the slot number coded 2395 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2396 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2397 // information is saved in the Unix format. 2398 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2399 #else 2400 #ifdef _M_AMD64 2401 address pc = (address) exceptionInfo->ContextRecord->Rip; 2402 #else 2403 address pc = (address) exceptionInfo->ContextRecord->Eip; 2404 #endif 2405 #endif 2406 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2407 2408 // Handle SafeFetch32 and SafeFetchN exceptions. 2409 if (StubRoutines::is_safefetch_fault(pc)) { 2410 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2411 } 2412 2413 #ifndef _WIN64 2414 // Execution protection violation - win32 running on AMD64 only 2415 // Handled first to avoid misdiagnosis as a "normal" access violation; 2416 // This is safe to do because we have a new/unique ExceptionInformation 2417 // code for this condition. 2418 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2419 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2420 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2421 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2422 2423 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2424 int page_size = os::vm_page_size(); 2425 2426 // Make sure the pc and the faulting address are sane. 2427 // 2428 // If an instruction spans a page boundary, and the page containing 2429 // the beginning of the instruction is executable but the following 2430 // page is not, the pc and the faulting address might be slightly 2431 // different - we still want to unguard the 2nd page in this case. 2432 // 2433 // 15 bytes seems to be a (very) safe value for max instruction size. 2434 bool pc_is_near_addr = 2435 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2436 bool instr_spans_page_boundary = 2437 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2438 (intptr_t) page_size) > 0); 2439 2440 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2441 static volatile address last_addr = 2442 (address) os::non_memory_address_word(); 2443 2444 // In conservative mode, don't unguard unless the address is in the VM 2445 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2446 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2447 2448 // Set memory to RWX and retry 2449 address page_start = 2450 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2451 bool res = os::protect_memory((char*) page_start, page_size, 2452 os::MEM_PROT_RWX); 2453 2454 if (PrintMiscellaneous && Verbose) { 2455 char buf[256]; 2456 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2457 "at " INTPTR_FORMAT 2458 ", unguarding " INTPTR_FORMAT ": %s", addr, 2459 page_start, (res ? "success" : strerror(errno))); 2460 tty->print_raw_cr(buf); 2461 } 2462 2463 // Set last_addr so if we fault again at the same address, we don't 2464 // end up in an endless loop. 2465 // 2466 // There are two potential complications here. Two threads trapping 2467 // at the same address at the same time could cause one of the 2468 // threads to think it already unguarded, and abort the VM. Likely 2469 // very rare. 2470 // 2471 // The other race involves two threads alternately trapping at 2472 // different addresses and failing to unguard the page, resulting in 2473 // an endless loop. This condition is probably even more unlikely 2474 // than the first. 2475 // 2476 // Although both cases could be avoided by using locks or thread 2477 // local last_addr, these solutions are unnecessary complication: 2478 // this handler is a best-effort safety net, not a complete solution. 2479 // It is disabled by default and should only be used as a workaround 2480 // in case we missed any no-execute-unsafe VM code. 2481 2482 last_addr = addr; 2483 2484 return EXCEPTION_CONTINUE_EXECUTION; 2485 } 2486 } 2487 2488 // Last unguard failed or not unguarding 2489 tty->print_raw_cr("Execution protection violation"); 2490 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2491 exceptionInfo->ContextRecord); 2492 return EXCEPTION_CONTINUE_SEARCH; 2493 } 2494 } 2495 #endif // _WIN64 2496 2497 // Check to see if we caught the safepoint code in the 2498 // process of write protecting the memory serialization page. 2499 // It write enables the page immediately after protecting it 2500 // so just return. 2501 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 2502 JavaThread* thread = (JavaThread*) t; 2503 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2504 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2505 if ( os::is_memory_serialize_page(thread, addr) ) { 2506 // Block current thread until the memory serialize page permission restored. 2507 os::block_on_serialize_page_trap(); 2508 return EXCEPTION_CONTINUE_EXECUTION; 2509 } 2510 } 2511 2512 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2513 VM_Version::is_cpuinfo_segv_addr(pc)) { 2514 // Verify that OS save/restore AVX registers. 2515 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2516 } 2517 2518 if (t != NULL && t->is_Java_thread()) { 2519 JavaThread* thread = (JavaThread*) t; 2520 bool in_java = thread->thread_state() == _thread_in_Java; 2521 2522 // Handle potential stack overflows up front. 2523 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2524 if (os::uses_stack_guard_pages()) { 2525 #ifdef _M_IA64 2526 // Use guard page for register stack. 2527 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2528 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2529 // Check for a register stack overflow on Itanium 2530 if (thread->addr_inside_register_stack_red_zone(addr)) { 2531 // Fatal red zone violation happens if the Java program 2532 // catches a StackOverflow error and does so much processing 2533 // that it runs beyond the unprotected yellow guard zone. As 2534 // a result, we are out of here. 2535 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2536 } else if(thread->addr_inside_register_stack(addr)) { 2537 // Disable the yellow zone which sets the state that 2538 // we've got a stack overflow problem. 2539 if (thread->stack_yellow_zone_enabled()) { 2540 thread->disable_stack_yellow_zone(); 2541 } 2542 // Give us some room to process the exception. 2543 thread->disable_register_stack_guard(); 2544 // Tracing with +Verbose. 2545 if (Verbose) { 2546 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2547 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2548 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2549 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2550 thread->register_stack_base(), 2551 thread->register_stack_base() + thread->stack_size()); 2552 } 2553 2554 // Reguard the permanent register stack red zone just to be sure. 2555 // We saw Windows silently disabling this without telling us. 2556 thread->enable_register_stack_red_zone(); 2557 2558 return Handle_Exception(exceptionInfo, 2559 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2560 } 2561 #endif 2562 if (thread->stack_yellow_zone_enabled()) { 2563 // Yellow zone violation. The o/s has unprotected the first yellow 2564 // zone page for us. Note: must call disable_stack_yellow_zone to 2565 // update the enabled status, even if the zone contains only one page. 2566 thread->disable_stack_yellow_zone(); 2567 // If not in java code, return and hope for the best. 2568 return in_java ? Handle_Exception(exceptionInfo, 2569 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2570 : EXCEPTION_CONTINUE_EXECUTION; 2571 } else { 2572 // Fatal red zone violation. 2573 thread->disable_stack_red_zone(); 2574 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2575 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2576 exceptionInfo->ContextRecord); 2577 return EXCEPTION_CONTINUE_SEARCH; 2578 } 2579 } else if (in_java) { 2580 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2581 // a one-time-only guard page, which it has released to us. The next 2582 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2583 return Handle_Exception(exceptionInfo, 2584 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2585 } else { 2586 // Can only return and hope for the best. Further stack growth will 2587 // result in an ACCESS_VIOLATION. 2588 return EXCEPTION_CONTINUE_EXECUTION; 2589 } 2590 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2591 // Either stack overflow or null pointer exception. 2592 if (in_java) { 2593 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2594 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2595 address stack_end = thread->stack_base() - thread->stack_size(); 2596 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2597 // Stack overflow. 2598 assert(!os::uses_stack_guard_pages(), 2599 "should be caught by red zone code above."); 2600 return Handle_Exception(exceptionInfo, 2601 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2602 } 2603 // 2604 // Check for safepoint polling and implicit null 2605 // We only expect null pointers in the stubs (vtable) 2606 // the rest are checked explicitly now. 2607 // 2608 CodeBlob* cb = CodeCache::find_blob(pc); 2609 if (cb != NULL) { 2610 if (os::is_poll_address(addr)) { 2611 address stub = SharedRuntime::get_poll_stub(pc); 2612 return Handle_Exception(exceptionInfo, stub); 2613 } 2614 } 2615 { 2616 #ifdef _WIN64 2617 // 2618 // If it's a legal stack address map the entire region in 2619 // 2620 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2621 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2622 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2623 addr = (address)((uintptr_t)addr & 2624 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2625 os::commit_memory((char *)addr, thread->stack_base() - addr, 2626 !ExecMem); 2627 return EXCEPTION_CONTINUE_EXECUTION; 2628 } 2629 else 2630 #endif 2631 { 2632 // Null pointer exception. 2633 #ifdef _M_IA64 2634 // Process implicit null checks in compiled code. Note: Implicit null checks 2635 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2636 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2637 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2638 // Handle implicit null check in UEP method entry 2639 if (cb && (cb->is_frame_complete_at(pc) || 2640 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2641 if (Verbose) { 2642 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2643 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2644 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2645 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2646 *(bundle_start + 1), *bundle_start); 2647 } 2648 return Handle_Exception(exceptionInfo, 2649 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2650 } 2651 } 2652 2653 // Implicit null checks were processed above. Hence, we should not reach 2654 // here in the usual case => die! 2655 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2656 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2657 exceptionInfo->ContextRecord); 2658 return EXCEPTION_CONTINUE_SEARCH; 2659 2660 #else // !IA64 2661 2662 // Windows 98 reports faulting addresses incorrectly 2663 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2664 !os::win32::is_nt()) { 2665 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2666 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2667 } 2668 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2669 exceptionInfo->ContextRecord); 2670 return EXCEPTION_CONTINUE_SEARCH; 2671 #endif 2672 } 2673 } 2674 } 2675 2676 #ifdef _WIN64 2677 // Special care for fast JNI field accessors. 2678 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2679 // in and the heap gets shrunk before the field access. 2680 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2681 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2682 if (addr != (address)-1) { 2683 return Handle_Exception(exceptionInfo, addr); 2684 } 2685 } 2686 #endif 2687 2688 // Stack overflow or null pointer exception in native code. 2689 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2690 exceptionInfo->ContextRecord); 2691 return EXCEPTION_CONTINUE_SEARCH; 2692 } // /EXCEPTION_ACCESS_VIOLATION 2693 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2694 #if defined _M_IA64 2695 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2696 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2697 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2698 2699 // Compiled method patched to be non entrant? Following conditions must apply: 2700 // 1. must be first instruction in bundle 2701 // 2. must be a break instruction with appropriate code 2702 if((((uint64_t) pc & 0x0F) == 0) && 2703 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2704 return Handle_Exception(exceptionInfo, 2705 (address)SharedRuntime::get_handle_wrong_method_stub()); 2706 } 2707 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2708 #endif 2709 2710 2711 if (in_java) { 2712 switch (exception_code) { 2713 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2714 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2715 2716 case EXCEPTION_INT_OVERFLOW: 2717 return Handle_IDiv_Exception(exceptionInfo); 2718 2719 } // switch 2720 } 2721 #ifndef _WIN64 2722 if (((thread->thread_state() == _thread_in_Java) || 2723 (thread->thread_state() == _thread_in_native)) && 2724 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2725 { 2726 LONG result=Handle_FLT_Exception(exceptionInfo); 2727 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2728 } 2729 #endif //_WIN64 2730 } 2731 2732 if (exception_code != EXCEPTION_BREAKPOINT) { 2733 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2734 exceptionInfo->ContextRecord); 2735 } 2736 return EXCEPTION_CONTINUE_SEARCH; 2737 } 2738 2739 #ifndef _WIN64 2740 // Special care for fast JNI accessors. 2741 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2742 // the heap gets shrunk before the field access. 2743 // Need to install our own structured exception handler since native code may 2744 // install its own. 2745 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2746 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2747 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2748 address pc = (address) exceptionInfo->ContextRecord->Eip; 2749 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2750 if (addr != (address)-1) { 2751 return Handle_Exception(exceptionInfo, addr); 2752 } 2753 } 2754 return EXCEPTION_CONTINUE_SEARCH; 2755 } 2756 2757 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2758 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2759 __try { \ 2760 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2761 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2762 } \ 2763 return 0; \ 2764 } 2765 2766 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2767 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2768 DEFINE_FAST_GETFIELD(jchar, char, Char) 2769 DEFINE_FAST_GETFIELD(jshort, short, Short) 2770 DEFINE_FAST_GETFIELD(jint, int, Int) 2771 DEFINE_FAST_GETFIELD(jlong, long, Long) 2772 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2773 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2774 2775 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2776 switch (type) { 2777 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2778 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2779 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2780 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2781 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2782 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2783 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2784 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2785 default: ShouldNotReachHere(); 2786 } 2787 return (address)-1; 2788 } 2789 #endif 2790 2791 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2792 // Install a win32 structured exception handler around the test 2793 // function call so the VM can generate an error dump if needed. 2794 __try { 2795 (*funcPtr)(); 2796 } __except(topLevelExceptionFilter( 2797 (_EXCEPTION_POINTERS*)_exception_info())) { 2798 // Nothing to do. 2799 } 2800 } 2801 2802 // Virtual Memory 2803 2804 int os::vm_page_size() { return os::win32::vm_page_size(); } 2805 int os::vm_allocation_granularity() { 2806 return os::win32::vm_allocation_granularity(); 2807 } 2808 2809 // Windows large page support is available on Windows 2003. In order to use 2810 // large page memory, the administrator must first assign additional privilege 2811 // to the user: 2812 // + select Control Panel -> Administrative Tools -> Local Security Policy 2813 // + select Local Policies -> User Rights Assignment 2814 // + double click "Lock pages in memory", add users and/or groups 2815 // + reboot 2816 // Note the above steps are needed for administrator as well, as administrators 2817 // by default do not have the privilege to lock pages in memory. 2818 // 2819 // Note about Windows 2003: although the API supports committing large page 2820 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2821 // scenario, I found through experiment it only uses large page if the entire 2822 // memory region is reserved and committed in a single VirtualAlloc() call. 2823 // This makes Windows large page support more or less like Solaris ISM, in 2824 // that the entire heap must be committed upfront. This probably will change 2825 // in the future, if so the code below needs to be revisited. 2826 2827 #ifndef MEM_LARGE_PAGES 2828 #define MEM_LARGE_PAGES 0x20000000 2829 #endif 2830 2831 static HANDLE _hProcess; 2832 static HANDLE _hToken; 2833 2834 // Container for NUMA node list info 2835 class NUMANodeListHolder { 2836 private: 2837 int *_numa_used_node_list; // allocated below 2838 int _numa_used_node_count; 2839 2840 void free_node_list() { 2841 if (_numa_used_node_list != NULL) { 2842 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2843 } 2844 } 2845 2846 public: 2847 NUMANodeListHolder() { 2848 _numa_used_node_count = 0; 2849 _numa_used_node_list = NULL; 2850 // do rest of initialization in build routine (after function pointers are set up) 2851 } 2852 2853 ~NUMANodeListHolder() { 2854 free_node_list(); 2855 } 2856 2857 bool build() { 2858 DWORD_PTR proc_aff_mask; 2859 DWORD_PTR sys_aff_mask; 2860 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2861 ULONG highest_node_number; 2862 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2863 free_node_list(); 2864 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2865 for (unsigned int i = 0; i <= highest_node_number; i++) { 2866 ULONGLONG proc_mask_numa_node; 2867 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2868 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2869 _numa_used_node_list[_numa_used_node_count++] = i; 2870 } 2871 } 2872 return (_numa_used_node_count > 1); 2873 } 2874 2875 int get_count() {return _numa_used_node_count;} 2876 int get_node_list_entry(int n) { 2877 // for indexes out of range, returns -1 2878 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2879 } 2880 2881 } numa_node_list_holder; 2882 2883 2884 2885 static size_t _large_page_size = 0; 2886 2887 static bool resolve_functions_for_large_page_init() { 2888 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2889 os::Advapi32Dll::AdvapiAvailable(); 2890 } 2891 2892 static bool request_lock_memory_privilege() { 2893 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2894 os::current_process_id()); 2895 2896 LUID luid; 2897 if (_hProcess != NULL && 2898 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2899 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2900 2901 TOKEN_PRIVILEGES tp; 2902 tp.PrivilegeCount = 1; 2903 tp.Privileges[0].Luid = luid; 2904 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2905 2906 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2907 // privilege. Check GetLastError() too. See MSDN document. 2908 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2909 (GetLastError() == ERROR_SUCCESS)) { 2910 return true; 2911 } 2912 } 2913 2914 return false; 2915 } 2916 2917 static void cleanup_after_large_page_init() { 2918 if (_hProcess) CloseHandle(_hProcess); 2919 _hProcess = NULL; 2920 if (_hToken) CloseHandle(_hToken); 2921 _hToken = NULL; 2922 } 2923 2924 static bool numa_interleaving_init() { 2925 bool success = false; 2926 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2927 2928 // print a warning if UseNUMAInterleaving flag is specified on command line 2929 bool warn_on_failure = use_numa_interleaving_specified; 2930 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2931 2932 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2933 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2934 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2935 2936 if (os::Kernel32Dll::NumaCallsAvailable()) { 2937 if (numa_node_list_holder.build()) { 2938 if (PrintMiscellaneous && Verbose) { 2939 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2940 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2941 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2942 } 2943 tty->print("\n"); 2944 } 2945 success = true; 2946 } else { 2947 WARN("Process does not cover multiple NUMA nodes."); 2948 } 2949 } else { 2950 WARN("NUMA Interleaving is not supported by the operating system."); 2951 } 2952 if (!success) { 2953 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2954 } 2955 return success; 2956 #undef WARN 2957 } 2958 2959 // this routine is used whenever we need to reserve a contiguous VA range 2960 // but we need to make separate VirtualAlloc calls for each piece of the range 2961 // Reasons for doing this: 2962 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2963 // * UseNUMAInterleaving requires a separate node for each piece 2964 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2965 bool should_inject_error=false) { 2966 char * p_buf; 2967 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2968 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2969 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2970 2971 // first reserve enough address space in advance since we want to be 2972 // able to break a single contiguous virtual address range into multiple 2973 // large page commits but WS2003 does not allow reserving large page space 2974 // so we just use 4K pages for reserve, this gives us a legal contiguous 2975 // address space. then we will deallocate that reservation, and re alloc 2976 // using large pages 2977 const size_t size_of_reserve = bytes + chunk_size; 2978 if (bytes > size_of_reserve) { 2979 // Overflowed. 2980 return NULL; 2981 } 2982 p_buf = (char *) VirtualAlloc(addr, 2983 size_of_reserve, // size of Reserve 2984 MEM_RESERVE, 2985 PAGE_READWRITE); 2986 // If reservation failed, return NULL 2987 if (p_buf == NULL) return NULL; 2988 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2989 os::release_memory(p_buf, bytes + chunk_size); 2990 2991 // we still need to round up to a page boundary (in case we are using large pages) 2992 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2993 // instead we handle this in the bytes_to_rq computation below 2994 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2995 2996 // now go through and allocate one chunk at a time until all bytes are 2997 // allocated 2998 size_t bytes_remaining = bytes; 2999 // An overflow of align_size_up() would have been caught above 3000 // in the calculation of size_of_reserve. 3001 char * next_alloc_addr = p_buf; 3002 HANDLE hProc = GetCurrentProcess(); 3003 3004 #ifdef ASSERT 3005 // Variable for the failure injection 3006 long ran_num = os::random(); 3007 size_t fail_after = ran_num % bytes; 3008 #endif 3009 3010 int count=0; 3011 while (bytes_remaining) { 3012 // select bytes_to_rq to get to the next chunk_size boundary 3013 3014 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 3015 // Note allocate and commit 3016 char * p_new; 3017 3018 #ifdef ASSERT 3019 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 3020 #else 3021 const bool inject_error_now = false; 3022 #endif 3023 3024 if (inject_error_now) { 3025 p_new = NULL; 3026 } else { 3027 if (!UseNUMAInterleaving) { 3028 p_new = (char *) VirtualAlloc(next_alloc_addr, 3029 bytes_to_rq, 3030 flags, 3031 prot); 3032 } else { 3033 // get the next node to use from the used_node_list 3034 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 3035 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 3036 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 3037 next_alloc_addr, 3038 bytes_to_rq, 3039 flags, 3040 prot, 3041 node); 3042 } 3043 } 3044 3045 if (p_new == NULL) { 3046 // Free any allocated pages 3047 if (next_alloc_addr > p_buf) { 3048 // Some memory was committed so release it. 3049 size_t bytes_to_release = bytes - bytes_remaining; 3050 // NMT has yet to record any individual blocks, so it 3051 // need to create a dummy 'reserve' record to match 3052 // the release. 3053 MemTracker::record_virtual_memory_reserve((address)p_buf, 3054 bytes_to_release, CALLER_PC); 3055 os::release_memory(p_buf, bytes_to_release); 3056 } 3057 #ifdef ASSERT 3058 if (should_inject_error) { 3059 if (TracePageSizes && Verbose) { 3060 tty->print_cr("Reserving pages individually failed."); 3061 } 3062 } 3063 #endif 3064 return NULL; 3065 } 3066 3067 bytes_remaining -= bytes_to_rq; 3068 next_alloc_addr += bytes_to_rq; 3069 count++; 3070 } 3071 // Although the memory is allocated individually, it is returned as one. 3072 // NMT records it as one block. 3073 if ((flags & MEM_COMMIT) != 0) { 3074 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3075 } else { 3076 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3077 } 3078 3079 // made it this far, success 3080 return p_buf; 3081 } 3082 3083 3084 3085 void os::large_page_init() { 3086 if (!UseLargePages) return; 3087 3088 // print a warning if any large page related flag is specified on command line 3089 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3090 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3091 bool success = false; 3092 3093 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3094 if (resolve_functions_for_large_page_init()) { 3095 if (request_lock_memory_privilege()) { 3096 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3097 if (s) { 3098 #if defined(IA32) || defined(AMD64) 3099 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3100 WARN("JVM cannot use large pages bigger than 4mb."); 3101 } else { 3102 #endif 3103 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3104 _large_page_size = LargePageSizeInBytes; 3105 } else { 3106 _large_page_size = s; 3107 } 3108 success = true; 3109 #if defined(IA32) || defined(AMD64) 3110 } 3111 #endif 3112 } else { 3113 WARN("Large page is not supported by the processor."); 3114 } 3115 } else { 3116 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3117 } 3118 } else { 3119 WARN("Large page is not supported by the operating system."); 3120 } 3121 #undef WARN 3122 3123 const size_t default_page_size = (size_t) vm_page_size(); 3124 if (success && _large_page_size > default_page_size) { 3125 _page_sizes[0] = _large_page_size; 3126 _page_sizes[1] = default_page_size; 3127 _page_sizes[2] = 0; 3128 } 3129 3130 cleanup_after_large_page_init(); 3131 UseLargePages = success; 3132 } 3133 3134 // On win32, one cannot release just a part of reserved memory, it's an 3135 // all or nothing deal. When we split a reservation, we must break the 3136 // reservation into two reservations. 3137 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3138 bool realloc) { 3139 if (size > 0) { 3140 release_memory(base, size); 3141 if (realloc) { 3142 reserve_memory(split, base); 3143 } 3144 if (size != split) { 3145 reserve_memory(size - split, base + split); 3146 } 3147 } 3148 } 3149 3150 // Multiple threads can race in this code but it's not possible to unmap small sections of 3151 // virtual space to get requested alignment, like posix-like os's. 3152 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3153 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3154 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3155 "Alignment must be a multiple of allocation granularity (page size)"); 3156 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3157 3158 size_t extra_size = size + alignment; 3159 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3160 3161 char* aligned_base = NULL; 3162 3163 do { 3164 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3165 if (extra_base == NULL) { 3166 return NULL; 3167 } 3168 // Do manual alignment 3169 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3170 3171 os::release_memory(extra_base, extra_size); 3172 3173 aligned_base = os::reserve_memory(size, aligned_base); 3174 3175 } while (aligned_base == NULL); 3176 3177 return aligned_base; 3178 } 3179 3180 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3181 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3182 "reserve alignment"); 3183 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3184 char* res; 3185 // note that if UseLargePages is on, all the areas that require interleaving 3186 // will go thru reserve_memory_special rather than thru here. 3187 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3188 if (!use_individual) { 3189 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3190 } else { 3191 elapsedTimer reserveTimer; 3192 if( Verbose && PrintMiscellaneous ) reserveTimer.start(); 3193 // in numa interleaving, we have to allocate pages individually 3194 // (well really chunks of NUMAInterleaveGranularity size) 3195 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3196 if (res == NULL) { 3197 warning("NUMA page allocation failed"); 3198 } 3199 if( Verbose && PrintMiscellaneous ) { 3200 reserveTimer.stop(); 3201 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3202 reserveTimer.milliseconds(), reserveTimer.ticks()); 3203 } 3204 } 3205 assert(res == NULL || addr == NULL || addr == res, 3206 "Unexpected address from reserve."); 3207 3208 return res; 3209 } 3210 3211 // Reserve memory at an arbitrary address, only if that area is 3212 // available (and not reserved for something else). 3213 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3214 // Windows os::reserve_memory() fails of the requested address range is 3215 // not avilable. 3216 return reserve_memory(bytes, requested_addr); 3217 } 3218 3219 size_t os::large_page_size() { 3220 return _large_page_size; 3221 } 3222 3223 bool os::can_commit_large_page_memory() { 3224 // Windows only uses large page memory when the entire region is reserved 3225 // and committed in a single VirtualAlloc() call. This may change in the 3226 // future, but with Windows 2003 it's not possible to commit on demand. 3227 return false; 3228 } 3229 3230 bool os::can_execute_large_page_memory() { 3231 return true; 3232 } 3233 3234 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3235 assert(UseLargePages, "only for large pages"); 3236 3237 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3238 return NULL; // Fallback to small pages. 3239 } 3240 3241 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3242 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3243 3244 // with large pages, there are two cases where we need to use Individual Allocation 3245 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3246 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3247 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3248 if (TracePageSizes && Verbose) { 3249 tty->print_cr("Reserving large pages individually."); 3250 } 3251 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3252 if (p_buf == NULL) { 3253 // give an appropriate warning message 3254 if (UseNUMAInterleaving) { 3255 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3256 } 3257 if (UseLargePagesIndividualAllocation) { 3258 warning("Individually allocated large pages failed, " 3259 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3260 } 3261 return NULL; 3262 } 3263 3264 return p_buf; 3265 3266 } else { 3267 if (TracePageSizes && Verbose) { 3268 tty->print_cr("Reserving large pages in a single large chunk."); 3269 } 3270 // normal policy just allocate it all at once 3271 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3272 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3273 if (res != NULL) { 3274 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3275 } 3276 3277 return res; 3278 } 3279 } 3280 3281 bool os::release_memory_special(char* base, size_t bytes) { 3282 assert(base != NULL, "Sanity check"); 3283 return release_memory(base, bytes); 3284 } 3285 3286 void os::print_statistics() { 3287 } 3288 3289 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3290 int err = os::get_last_error(); 3291 char buf[256]; 3292 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3293 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3294 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3295 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3296 } 3297 3298 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3299 if (bytes == 0) { 3300 // Don't bother the OS with noops. 3301 return true; 3302 } 3303 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3304 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3305 // Don't attempt to print anything if the OS call fails. We're 3306 // probably low on resources, so the print itself may cause crashes. 3307 3308 // unless we have NUMAInterleaving enabled, the range of a commit 3309 // is always within a reserve covered by a single VirtualAlloc 3310 // in that case we can just do a single commit for the requested size 3311 if (!UseNUMAInterleaving) { 3312 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3313 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3314 return false; 3315 } 3316 if (exec) { 3317 DWORD oldprot; 3318 // Windows doc says to use VirtualProtect to get execute permissions 3319 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3320 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3321 return false; 3322 } 3323 } 3324 return true; 3325 } else { 3326 3327 // when NUMAInterleaving is enabled, the commit might cover a range that 3328 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3329 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3330 // returns represents the number of bytes that can be committed in one step. 3331 size_t bytes_remaining = bytes; 3332 char * next_alloc_addr = addr; 3333 while (bytes_remaining > 0) { 3334 MEMORY_BASIC_INFORMATION alloc_info; 3335 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3336 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3337 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3338 PAGE_READWRITE) == NULL) { 3339 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3340 exec);) 3341 return false; 3342 } 3343 if (exec) { 3344 DWORD oldprot; 3345 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3346 PAGE_EXECUTE_READWRITE, &oldprot)) { 3347 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3348 exec);) 3349 return false; 3350 } 3351 } 3352 bytes_remaining -= bytes_to_rq; 3353 next_alloc_addr += bytes_to_rq; 3354 } 3355 } 3356 // if we made it this far, return true 3357 return true; 3358 } 3359 3360 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3361 bool exec) { 3362 // alignment_hint is ignored on this OS 3363 return pd_commit_memory(addr, size, exec); 3364 } 3365 3366 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3367 const char* mesg) { 3368 assert(mesg != NULL, "mesg must be specified"); 3369 if (!pd_commit_memory(addr, size, exec)) { 3370 warn_fail_commit_memory(addr, size, exec); 3371 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3372 } 3373 } 3374 3375 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3376 size_t alignment_hint, bool exec, 3377 const char* mesg) { 3378 // alignment_hint is ignored on this OS 3379 pd_commit_memory_or_exit(addr, size, exec, mesg); 3380 } 3381 3382 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3383 if (bytes == 0) { 3384 // Don't bother the OS with noops. 3385 return true; 3386 } 3387 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3388 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3389 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3390 } 3391 3392 bool os::pd_release_memory(char* addr, size_t bytes) { 3393 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3394 } 3395 3396 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3397 return os::commit_memory(addr, size, !ExecMem); 3398 } 3399 3400 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3401 return os::uncommit_memory(addr, size); 3402 } 3403 3404 // Set protections specified 3405 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3406 bool is_committed) { 3407 unsigned int p = 0; 3408 switch (prot) { 3409 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3410 case MEM_PROT_READ: p = PAGE_READONLY; break; 3411 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3412 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3413 default: 3414 ShouldNotReachHere(); 3415 } 3416 3417 DWORD old_status; 3418 3419 // Strange enough, but on Win32 one can change protection only for committed 3420 // memory, not a big deal anyway, as bytes less or equal than 64K 3421 if (!is_committed) { 3422 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3423 "cannot commit protection page"); 3424 } 3425 // One cannot use os::guard_memory() here, as on Win32 guard page 3426 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3427 // 3428 // Pages in the region become guard pages. Any attempt to access a guard page 3429 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3430 // the guard page status. Guard pages thus act as a one-time access alarm. 3431 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3432 } 3433 3434 bool os::guard_memory(char* addr, size_t bytes) { 3435 DWORD old_status; 3436 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3437 } 3438 3439 bool os::unguard_memory(char* addr, size_t bytes) { 3440 DWORD old_status; 3441 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3442 } 3443 3444 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3445 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3446 void os::numa_make_global(char *addr, size_t bytes) { } 3447 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3448 bool os::numa_topology_changed() { return false; } 3449 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3450 int os::numa_get_group_id() { return 0; } 3451 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3452 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3453 // Provide an answer for UMA systems 3454 ids[0] = 0; 3455 return 1; 3456 } else { 3457 // check for size bigger than actual groups_num 3458 size = MIN2(size, numa_get_groups_num()); 3459 for (int i = 0; i < (int)size; i++) { 3460 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3461 } 3462 return size; 3463 } 3464 } 3465 3466 bool os::get_page_info(char *start, page_info* info) { 3467 return false; 3468 } 3469 3470 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3471 return end; 3472 } 3473 3474 char* os::non_memory_address_word() { 3475 // Must never look like an address returned by reserve_memory, 3476 // even in its subfields (as defined by the CPU immediate fields, 3477 // if the CPU splits constants across multiple instructions). 3478 return (char*)-1; 3479 } 3480 3481 #define MAX_ERROR_COUNT 100 3482 #define SYS_THREAD_ERROR 0xffffffffUL 3483 3484 void os::pd_start_thread(Thread* thread) { 3485 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3486 // Returns previous suspend state: 3487 // 0: Thread was not suspended 3488 // 1: Thread is running now 3489 // >1: Thread is still suspended. 3490 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3491 } 3492 3493 class HighResolutionInterval : public CHeapObj<mtThread> { 3494 // The default timer resolution seems to be 10 milliseconds. 3495 // (Where is this written down?) 3496 // If someone wants to sleep for only a fraction of the default, 3497 // then we set the timer resolution down to 1 millisecond for 3498 // the duration of their interval. 3499 // We carefully set the resolution back, since otherwise we 3500 // seem to incur an overhead (3%?) that we don't need. 3501 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3502 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3503 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3504 // timeBeginPeriod() if the relative error exceeded some threshold. 3505 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3506 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3507 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3508 // resolution timers running. 3509 private: 3510 jlong resolution; 3511 public: 3512 HighResolutionInterval(jlong ms) { 3513 resolution = ms % 10L; 3514 if (resolution != 0) { 3515 MMRESULT result = timeBeginPeriod(1L); 3516 } 3517 } 3518 ~HighResolutionInterval() { 3519 if (resolution != 0) { 3520 MMRESULT result = timeEndPeriod(1L); 3521 } 3522 resolution = 0L; 3523 } 3524 }; 3525 3526 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3527 jlong limit = (jlong) MAXDWORD; 3528 3529 while(ms > limit) { 3530 int res; 3531 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3532 return res; 3533 ms -= limit; 3534 } 3535 3536 assert(thread == Thread::current(), "thread consistency check"); 3537 OSThread* osthread = thread->osthread(); 3538 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3539 int result; 3540 if (interruptable) { 3541 assert(thread->is_Java_thread(), "must be java thread"); 3542 JavaThread *jt = (JavaThread *) thread; 3543 ThreadBlockInVM tbivm(jt); 3544 3545 jt->set_suspend_equivalent(); 3546 // cleared by handle_special_suspend_equivalent_condition() or 3547 // java_suspend_self() via check_and_wait_while_suspended() 3548 3549 HANDLE events[1]; 3550 events[0] = osthread->interrupt_event(); 3551 HighResolutionInterval *phri=NULL; 3552 if(!ForceTimeHighResolution) 3553 phri = new HighResolutionInterval( ms ); 3554 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3555 result = OS_TIMEOUT; 3556 } else { 3557 ResetEvent(osthread->interrupt_event()); 3558 osthread->set_interrupted(false); 3559 result = OS_INTRPT; 3560 } 3561 delete phri; //if it is NULL, harmless 3562 3563 // were we externally suspended while we were waiting? 3564 jt->check_and_wait_while_suspended(); 3565 } else { 3566 assert(!thread->is_Java_thread(), "must not be java thread"); 3567 Sleep((long) ms); 3568 result = OS_TIMEOUT; 3569 } 3570 return result; 3571 } 3572 3573 // 3574 // Short sleep, direct OS call. 3575 // 3576 // ms = 0, means allow others (if any) to run. 3577 // 3578 void os::naked_short_sleep(jlong ms) { 3579 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3580 Sleep(ms); 3581 } 3582 3583 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3584 void os::infinite_sleep() { 3585 while (true) { // sleep forever ... 3586 Sleep(100000); // ... 100 seconds at a time 3587 } 3588 } 3589 3590 typedef BOOL (WINAPI * STTSignature)(void) ; 3591 3592 os::YieldResult os::NakedYield() { 3593 // Use either SwitchToThread() or Sleep(0) 3594 // Consider passing back the return value from SwitchToThread(). 3595 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3596 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; 3597 } else { 3598 Sleep(0); 3599 } 3600 return os::YIELD_UNKNOWN ; 3601 } 3602 3603 void os::yield() { os::NakedYield(); } 3604 3605 void os::yield_all(int attempts) { 3606 // Yields to all threads, including threads with lower priorities 3607 Sleep(1); 3608 } 3609 3610 // Win32 only gives you access to seven real priorities at a time, 3611 // so we compress Java's ten down to seven. It would be better 3612 // if we dynamically adjusted relative priorities. 3613 3614 int os::java_to_os_priority[CriticalPriority + 1] = { 3615 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3616 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3617 THREAD_PRIORITY_LOWEST, // 2 3618 THREAD_PRIORITY_BELOW_NORMAL, // 3 3619 THREAD_PRIORITY_BELOW_NORMAL, // 4 3620 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3621 THREAD_PRIORITY_NORMAL, // 6 3622 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3623 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3624 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3625 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3626 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3627 }; 3628 3629 int prio_policy1[CriticalPriority + 1] = { 3630 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3631 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3632 THREAD_PRIORITY_LOWEST, // 2 3633 THREAD_PRIORITY_BELOW_NORMAL, // 3 3634 THREAD_PRIORITY_BELOW_NORMAL, // 4 3635 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3636 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3637 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3638 THREAD_PRIORITY_HIGHEST, // 8 3639 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3640 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3641 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3642 }; 3643 3644 static int prio_init() { 3645 // If ThreadPriorityPolicy is 1, switch tables 3646 if (ThreadPriorityPolicy == 1) { 3647 int i; 3648 for (i = 0; i < CriticalPriority + 1; i++) { 3649 os::java_to_os_priority[i] = prio_policy1[i]; 3650 } 3651 } 3652 if (UseCriticalJavaThreadPriority) { 3653 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ; 3654 } 3655 return 0; 3656 } 3657 3658 OSReturn os::set_native_priority(Thread* thread, int priority) { 3659 if (!UseThreadPriorities) return OS_OK; 3660 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3661 return ret ? OS_OK : OS_ERR; 3662 } 3663 3664 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3665 if ( !UseThreadPriorities ) { 3666 *priority_ptr = java_to_os_priority[NormPriority]; 3667 return OS_OK; 3668 } 3669 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3670 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3671 assert(false, "GetThreadPriority failed"); 3672 return OS_ERR; 3673 } 3674 *priority_ptr = os_prio; 3675 return OS_OK; 3676 } 3677 3678 3679 // Hint to the underlying OS that a task switch would not be good. 3680 // Void return because it's a hint and can fail. 3681 void os::hint_no_preempt() {} 3682 3683 void os::interrupt(Thread* thread) { 3684 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3685 "possibility of dangling Thread pointer"); 3686 3687 OSThread* osthread = thread->osthread(); 3688 osthread->set_interrupted(true); 3689 // More than one thread can get here with the same value of osthread, 3690 // resulting in multiple notifications. We do, however, want the store 3691 // to interrupted() to be visible to other threads before we post 3692 // the interrupt event. 3693 OrderAccess::release(); 3694 SetEvent(osthread->interrupt_event()); 3695 // For JSR166: unpark after setting status 3696 if (thread->is_Java_thread()) 3697 ((JavaThread*)thread)->parker()->unpark(); 3698 3699 ParkEvent * ev = thread->_ParkEvent ; 3700 if (ev != NULL) ev->unpark() ; 3701 3702 } 3703 3704 3705 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3706 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3707 "possibility of dangling Thread pointer"); 3708 3709 OSThread* osthread = thread->osthread(); 3710 // There is no synchronization between the setting of the interrupt 3711 // and it being cleared here. It is critical - see 6535709 - that 3712 // we only clear the interrupt state, and reset the interrupt event, 3713 // if we are going to report that we were indeed interrupted - else 3714 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3715 // depending on the timing. By checking thread interrupt event to see 3716 // if the thread gets real interrupt thus prevent spurious wakeup. 3717 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3718 if (interrupted && clear_interrupted) { 3719 osthread->set_interrupted(false); 3720 ResetEvent(osthread->interrupt_event()); 3721 } // Otherwise leave the interrupted state alone 3722 3723 return interrupted; 3724 } 3725 3726 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3727 ExtendedPC os::get_thread_pc(Thread* thread) { 3728 CONTEXT context; 3729 context.ContextFlags = CONTEXT_CONTROL; 3730 HANDLE handle = thread->osthread()->thread_handle(); 3731 #ifdef _M_IA64 3732 assert(0, "Fix get_thread_pc"); 3733 return ExtendedPC(NULL); 3734 #else 3735 if (GetThreadContext(handle, &context)) { 3736 #ifdef _M_AMD64 3737 return ExtendedPC((address) context.Rip); 3738 #else 3739 return ExtendedPC((address) context.Eip); 3740 #endif 3741 } else { 3742 return ExtendedPC(NULL); 3743 } 3744 #endif 3745 } 3746 3747 // GetCurrentThreadId() returns DWORD 3748 intx os::current_thread_id() { return GetCurrentThreadId(); } 3749 3750 static int _initial_pid = 0; 3751 3752 int os::current_process_id() 3753 { 3754 return (_initial_pid ? _initial_pid : _getpid()); 3755 } 3756 3757 int os::win32::_vm_page_size = 0; 3758 int os::win32::_vm_allocation_granularity = 0; 3759 int os::win32::_processor_type = 0; 3760 // Processor level is not available on non-NT systems, use vm_version instead 3761 int os::win32::_processor_level = 0; 3762 julong os::win32::_physical_memory = 0; 3763 size_t os::win32::_default_stack_size = 0; 3764 3765 intx os::win32::_os_thread_limit = 0; 3766 volatile intx os::win32::_os_thread_count = 0; 3767 3768 bool os::win32::_is_nt = false; 3769 bool os::win32::_is_windows_2003 = false; 3770 bool os::win32::_is_windows_server = false; 3771 3772 void os::win32::initialize_system_info() { 3773 SYSTEM_INFO si; 3774 GetSystemInfo(&si); 3775 _vm_page_size = si.dwPageSize; 3776 _vm_allocation_granularity = si.dwAllocationGranularity; 3777 _processor_type = si.dwProcessorType; 3778 _processor_level = si.wProcessorLevel; 3779 set_processor_count(si.dwNumberOfProcessors); 3780 3781 MEMORYSTATUSEX ms; 3782 ms.dwLength = sizeof(ms); 3783 3784 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3785 // dwMemoryLoad (% of memory in use) 3786 GlobalMemoryStatusEx(&ms); 3787 _physical_memory = ms.ullTotalPhys; 3788 3789 OSVERSIONINFOEX oi; 3790 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3791 GetVersionEx((OSVERSIONINFO*)&oi); 3792 switch(oi.dwPlatformId) { 3793 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3794 case VER_PLATFORM_WIN32_NT: 3795 _is_nt = true; 3796 { 3797 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3798 if (os_vers == 5002) { 3799 _is_windows_2003 = true; 3800 } 3801 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3802 oi.wProductType == VER_NT_SERVER) { 3803 _is_windows_server = true; 3804 } 3805 } 3806 break; 3807 default: fatal("Unknown platform"); 3808 } 3809 3810 _default_stack_size = os::current_stack_size(); 3811 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3812 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3813 "stack size not a multiple of page size"); 3814 3815 initialize_performance_counter(); 3816 3817 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3818 // known to deadlock the system, if the VM issues to thread operations with 3819 // a too high frequency, e.g., such as changing the priorities. 3820 // The 6000 seems to work well - no deadlocks has been notices on the test 3821 // programs that we have seen experience this problem. 3822 if (!os::win32::is_nt()) { 3823 StarvationMonitorInterval = 6000; 3824 } 3825 } 3826 3827 3828 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3829 char path[MAX_PATH]; 3830 DWORD size; 3831 DWORD pathLen = (DWORD)sizeof(path); 3832 HINSTANCE result = NULL; 3833 3834 // only allow library name without path component 3835 assert(strchr(name, '\\') == NULL, "path not allowed"); 3836 assert(strchr(name, ':') == NULL, "path not allowed"); 3837 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3838 jio_snprintf(ebuf, ebuflen, 3839 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3840 return NULL; 3841 } 3842 3843 // search system directory 3844 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3845 strcat(path, "\\"); 3846 strcat(path, name); 3847 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3848 return result; 3849 } 3850 } 3851 3852 // try Windows directory 3853 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3854 strcat(path, "\\"); 3855 strcat(path, name); 3856 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3857 return result; 3858 } 3859 } 3860 3861 jio_snprintf(ebuf, ebuflen, 3862 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3863 return NULL; 3864 } 3865 3866 void os::win32::setmode_streams() { 3867 _setmode(_fileno(stdin), _O_BINARY); 3868 _setmode(_fileno(stdout), _O_BINARY); 3869 _setmode(_fileno(stderr), _O_BINARY); 3870 } 3871 3872 3873 bool os::is_debugger_attached() { 3874 return IsDebuggerPresent() ? true : false; 3875 } 3876 3877 3878 void os::wait_for_keypress_at_exit(void) { 3879 if (PauseAtExit) { 3880 fprintf(stderr, "Press any key to continue...\n"); 3881 fgetc(stdin); 3882 } 3883 } 3884 3885 3886 int os::message_box(const char* title, const char* message) { 3887 int result = MessageBox(NULL, message, title, 3888 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3889 return result == IDYES; 3890 } 3891 3892 int os::allocate_thread_local_storage() { 3893 return TlsAlloc(); 3894 } 3895 3896 3897 void os::free_thread_local_storage(int index) { 3898 TlsFree(index); 3899 } 3900 3901 3902 void os::thread_local_storage_at_put(int index, void* value) { 3903 TlsSetValue(index, value); 3904 assert(thread_local_storage_at(index) == value, "Just checking"); 3905 } 3906 3907 3908 void* os::thread_local_storage_at(int index) { 3909 return TlsGetValue(index); 3910 } 3911 3912 3913 #ifndef PRODUCT 3914 #ifndef _WIN64 3915 // Helpers to check whether NX protection is enabled 3916 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3917 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3918 pex->ExceptionRecord->NumberParameters > 0 && 3919 pex->ExceptionRecord->ExceptionInformation[0] == 3920 EXCEPTION_INFO_EXEC_VIOLATION) { 3921 return EXCEPTION_EXECUTE_HANDLER; 3922 } 3923 return EXCEPTION_CONTINUE_SEARCH; 3924 } 3925 3926 void nx_check_protection() { 3927 // If NX is enabled we'll get an exception calling into code on the stack 3928 char code[] = { (char)0xC3 }; // ret 3929 void *code_ptr = (void *)code; 3930 __try { 3931 __asm call code_ptr 3932 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3933 tty->print_raw_cr("NX protection detected."); 3934 } 3935 } 3936 #endif // _WIN64 3937 #endif // PRODUCT 3938 3939 // this is called _before_ the global arguments have been parsed 3940 void os::init(void) { 3941 _initial_pid = _getpid(); 3942 3943 init_random(1234567); 3944 3945 win32::initialize_system_info(); 3946 win32::setmode_streams(); 3947 init_page_sizes((size_t) win32::vm_page_size()); 3948 3949 // For better scalability on MP systems (must be called after initialize_system_info) 3950 #ifndef PRODUCT 3951 if (is_MP()) { 3952 NoYieldsInMicrolock = true; 3953 } 3954 #endif 3955 // This may be overridden later when argument processing is done. 3956 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3957 os::win32::is_windows_2003()); 3958 3959 // Initialize main_process and main_thread 3960 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3961 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3962 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3963 fatal("DuplicateHandle failed\n"); 3964 } 3965 main_thread_id = (int) GetCurrentThreadId(); 3966 } 3967 3968 // To install functions for atexit processing 3969 extern "C" { 3970 static void perfMemory_exit_helper() { 3971 perfMemory_exit(); 3972 } 3973 } 3974 3975 static jint initSock(); 3976 3977 // this is called _after_ the global arguments have been parsed 3978 jint os::init_2(void) { 3979 // Allocate a single page and mark it as readable for safepoint polling 3980 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3981 guarantee( polling_page != NULL, "Reserve Failed for polling page"); 3982 3983 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3984 guarantee( return_page != NULL, "Commit Failed for polling page"); 3985 3986 os::set_polling_page( polling_page ); 3987 3988 #ifndef PRODUCT 3989 if( Verbose && PrintMiscellaneous ) 3990 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3991 #endif 3992 3993 if (!UseMembar) { 3994 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3995 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3996 3997 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3998 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); 3999 4000 os::set_memory_serialize_page( mem_serialize_page ); 4001 4002 #ifndef PRODUCT 4003 if(Verbose && PrintMiscellaneous) 4004 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4005 #endif 4006 } 4007 4008 // Setup Windows Exceptions 4009 4010 // for debugging float code generation bugs 4011 if (ForceFloatExceptions) { 4012 #ifndef _WIN64 4013 static long fp_control_word = 0; 4014 __asm { fstcw fp_control_word } 4015 // see Intel PPro Manual, Vol. 2, p 7-16 4016 const long precision = 0x20; 4017 const long underflow = 0x10; 4018 const long overflow = 0x08; 4019 const long zero_div = 0x04; 4020 const long denorm = 0x02; 4021 const long invalid = 0x01; 4022 fp_control_word |= invalid; 4023 __asm { fldcw fp_control_word } 4024 #endif 4025 } 4026 4027 // If stack_commit_size is 0, windows will reserve the default size, 4028 // but only commit a small portion of it. 4029 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4030 size_t default_reserve_size = os::win32::default_stack_size(); 4031 size_t actual_reserve_size = stack_commit_size; 4032 if (stack_commit_size < default_reserve_size) { 4033 // If stack_commit_size == 0, we want this too 4034 actual_reserve_size = default_reserve_size; 4035 } 4036 4037 // Check minimum allowable stack size for thread creation and to initialize 4038 // the java system classes, including StackOverflowError - depends on page 4039 // size. Add a page for compiler2 recursion in main thread. 4040 // Add in 2*BytesPerWord times page size to account for VM stack during 4041 // class initialization depending on 32 or 64 bit VM. 4042 size_t min_stack_allowed = 4043 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4044 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4045 if (actual_reserve_size < min_stack_allowed) { 4046 tty->print_cr("\nThe stack size specified is too small, " 4047 "Specify at least %dk", 4048 min_stack_allowed / K); 4049 return JNI_ERR; 4050 } 4051 4052 JavaThread::set_stack_size_at_create(stack_commit_size); 4053 4054 // Calculate theoretical max. size of Threads to guard gainst artifical 4055 // out-of-memory situations, where all available address-space has been 4056 // reserved by thread stacks. 4057 assert(actual_reserve_size != 0, "Must have a stack"); 4058 4059 // Calculate the thread limit when we should start doing Virtual Memory 4060 // banging. Currently when the threads will have used all but 200Mb of space. 4061 // 4062 // TODO: consider performing a similar calculation for commit size instead 4063 // as reserve size, since on a 64-bit platform we'll run into that more 4064 // often than running out of virtual memory space. We can use the 4065 // lower value of the two calculations as the os_thread_limit. 4066 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4067 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4068 4069 // at exit methods are called in the reverse order of their registration. 4070 // there is no limit to the number of functions registered. atexit does 4071 // not set errno. 4072 4073 if (PerfAllowAtExitRegistration) { 4074 // only register atexit functions if PerfAllowAtExitRegistration is set. 4075 // atexit functions can be delayed until process exit time, which 4076 // can be problematic for embedded VM situations. Embedded VMs should 4077 // call DestroyJavaVM() to assure that VM resources are released. 4078 4079 // note: perfMemory_exit_helper atexit function may be removed in 4080 // the future if the appropriate cleanup code can be added to the 4081 // VM_Exit VMOperation's doit method. 4082 if (atexit(perfMemory_exit_helper) != 0) { 4083 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4084 } 4085 } 4086 4087 #ifndef _WIN64 4088 // Print something if NX is enabled (win32 on AMD64) 4089 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4090 #endif 4091 4092 // initialize thread priority policy 4093 prio_init(); 4094 4095 if (UseNUMA && !ForceNUMA) { 4096 UseNUMA = false; // We don't fully support this yet 4097 } 4098 4099 if (UseNUMAInterleaving) { 4100 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4101 bool success = numa_interleaving_init(); 4102 if (!success) UseNUMAInterleaving = false; 4103 } 4104 4105 if (initSock() != JNI_OK) { 4106 return JNI_ERR; 4107 } 4108 4109 return JNI_OK; 4110 } 4111 4112 // Mark the polling page as unreadable 4113 void os::make_polling_page_unreadable(void) { 4114 DWORD old_status; 4115 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) 4116 fatal("Could not disable polling page"); 4117 }; 4118 4119 // Mark the polling page as readable 4120 void os::make_polling_page_readable(void) { 4121 DWORD old_status; 4122 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) 4123 fatal("Could not enable polling page"); 4124 }; 4125 4126 4127 int os::stat(const char *path, struct stat *sbuf) { 4128 char pathbuf[MAX_PATH]; 4129 if (strlen(path) > MAX_PATH - 1) { 4130 errno = ENAMETOOLONG; 4131 return -1; 4132 } 4133 os::native_path(strcpy(pathbuf, path)); 4134 int ret = ::stat(pathbuf, sbuf); 4135 if (sbuf != NULL && UseUTCFileTimestamp) { 4136 // Fix for 6539723. st_mtime returned from stat() is dependent on 4137 // the system timezone and so can return different values for the 4138 // same file if/when daylight savings time changes. This adjustment 4139 // makes sure the same timestamp is returned regardless of the TZ. 4140 // 4141 // See: 4142 // http://msdn.microsoft.com/library/ 4143 // default.asp?url=/library/en-us/sysinfo/base/ 4144 // time_zone_information_str.asp 4145 // and 4146 // http://msdn.microsoft.com/library/default.asp?url= 4147 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4148 // 4149 // NOTE: there is a insidious bug here: If the timezone is changed 4150 // after the call to stat() but before 'GetTimeZoneInformation()', then 4151 // the adjustment we do here will be wrong and we'll return the wrong 4152 // value (which will likely end up creating an invalid class data 4153 // archive). Absent a better API for this, or some time zone locking 4154 // mechanism, we'll have to live with this risk. 4155 TIME_ZONE_INFORMATION tz; 4156 DWORD tzid = GetTimeZoneInformation(&tz); 4157 int daylightBias = 4158 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4159 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4160 } 4161 return ret; 4162 } 4163 4164 4165 #define FT2INT64(ft) \ 4166 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4167 4168 4169 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4170 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4171 // of a thread. 4172 // 4173 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4174 // the fast estimate available on the platform. 4175 4176 // current_thread_cpu_time() is not optimized for Windows yet 4177 jlong os::current_thread_cpu_time() { 4178 // return user + sys since the cost is the same 4179 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4180 } 4181 4182 jlong os::thread_cpu_time(Thread* thread) { 4183 // consistent with what current_thread_cpu_time() returns. 4184 return os::thread_cpu_time(thread, true /* user+sys */); 4185 } 4186 4187 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4188 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4189 } 4190 4191 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4192 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4193 // If this function changes, os::is_thread_cpu_time_supported() should too 4194 if (os::win32::is_nt()) { 4195 FILETIME CreationTime; 4196 FILETIME ExitTime; 4197 FILETIME KernelTime; 4198 FILETIME UserTime; 4199 4200 if ( GetThreadTimes(thread->osthread()->thread_handle(), 4201 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4202 return -1; 4203 else 4204 if (user_sys_cpu_time) { 4205 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4206 } else { 4207 return FT2INT64(UserTime) * 100; 4208 } 4209 } else { 4210 return (jlong) timeGetTime() * 1000000; 4211 } 4212 } 4213 4214 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4215 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4216 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4217 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4218 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4219 } 4220 4221 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4222 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4223 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4224 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4225 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4226 } 4227 4228 bool os::is_thread_cpu_time_supported() { 4229 // see os::thread_cpu_time 4230 if (os::win32::is_nt()) { 4231 FILETIME CreationTime; 4232 FILETIME ExitTime; 4233 FILETIME KernelTime; 4234 FILETIME UserTime; 4235 4236 if ( GetThreadTimes(GetCurrentThread(), 4237 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4238 return false; 4239 else 4240 return true; 4241 } else { 4242 return false; 4243 } 4244 } 4245 4246 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4247 // It does have primitives (PDH API) to get CPU usage and run queue length. 4248 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4249 // If we wanted to implement loadavg on Windows, we have a few options: 4250 // 4251 // a) Query CPU usage and run queue length and "fake" an answer by 4252 // returning the CPU usage if it's under 100%, and the run queue 4253 // length otherwise. It turns out that querying is pretty slow 4254 // on Windows, on the order of 200 microseconds on a fast machine. 4255 // Note that on the Windows the CPU usage value is the % usage 4256 // since the last time the API was called (and the first call 4257 // returns 100%), so we'd have to deal with that as well. 4258 // 4259 // b) Sample the "fake" answer using a sampling thread and store 4260 // the answer in a global variable. The call to loadavg would 4261 // just return the value of the global, avoiding the slow query. 4262 // 4263 // c) Sample a better answer using exponential decay to smooth the 4264 // value. This is basically the algorithm used by UNIX kernels. 4265 // 4266 // Note that sampling thread starvation could affect both (b) and (c). 4267 int os::loadavg(double loadavg[], int nelem) { 4268 return -1; 4269 } 4270 4271 4272 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4273 bool os::dont_yield() { 4274 return DontYieldALot; 4275 } 4276 4277 // This method is a slightly reworked copy of JDK's sysOpen 4278 // from src/windows/hpi/src/sys_api_md.c 4279 4280 int os::open(const char *path, int oflag, int mode) { 4281 char pathbuf[MAX_PATH]; 4282 4283 if (strlen(path) > MAX_PATH - 1) { 4284 errno = ENAMETOOLONG; 4285 return -1; 4286 } 4287 os::native_path(strcpy(pathbuf, path)); 4288 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4289 } 4290 4291 FILE* os::open(int fd, const char* mode) { 4292 return ::_fdopen(fd, mode); 4293 } 4294 4295 // Is a (classpath) directory empty? 4296 bool os::dir_is_empty(const char* path) { 4297 WIN32_FIND_DATA fd; 4298 HANDLE f = FindFirstFile(path, &fd); 4299 if (f == INVALID_HANDLE_VALUE) { 4300 return true; 4301 } 4302 FindClose(f); 4303 return false; 4304 } 4305 4306 // create binary file, rewriting existing file if required 4307 int os::create_binary_file(const char* path, bool rewrite_existing) { 4308 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4309 if (!rewrite_existing) { 4310 oflags |= _O_EXCL; 4311 } 4312 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4313 } 4314 4315 // return current position of file pointer 4316 jlong os::current_file_offset(int fd) { 4317 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4318 } 4319 4320 // move file pointer to the specified offset 4321 jlong os::seek_to_file_offset(int fd, jlong offset) { 4322 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4323 } 4324 4325 4326 jlong os::lseek(int fd, jlong offset, int whence) { 4327 return (jlong) ::_lseeki64(fd, offset, whence); 4328 } 4329 4330 // This method is a slightly reworked copy of JDK's sysNativePath 4331 // from src/windows/hpi/src/path_md.c 4332 4333 /* Convert a pathname to native format. On win32, this involves forcing all 4334 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4335 sometimes rejects '/') and removing redundant separators. The input path is 4336 assumed to have been converted into the character encoding used by the local 4337 system. Because this might be a double-byte encoding, care is taken to 4338 treat double-byte lead characters correctly. 4339 4340 This procedure modifies the given path in place, as the result is never 4341 longer than the original. There is no error return; this operation always 4342 succeeds. */ 4343 char * os::native_path(char *path) { 4344 char *src = path, *dst = path, *end = path; 4345 char *colon = NULL; /* If a drive specifier is found, this will 4346 point to the colon following the drive 4347 letter */ 4348 4349 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4350 assert(((!::IsDBCSLeadByte('/')) 4351 && (!::IsDBCSLeadByte('\\')) 4352 && (!::IsDBCSLeadByte(':'))), 4353 "Illegal lead byte"); 4354 4355 /* Check for leading separators */ 4356 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4357 while (isfilesep(*src)) { 4358 src++; 4359 } 4360 4361 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4362 /* Remove leading separators if followed by drive specifier. This 4363 hack is necessary to support file URLs containing drive 4364 specifiers (e.g., "file://c:/path"). As a side effect, 4365 "/c:/path" can be used as an alternative to "c:/path". */ 4366 *dst++ = *src++; 4367 colon = dst; 4368 *dst++ = ':'; 4369 src++; 4370 } else { 4371 src = path; 4372 if (isfilesep(src[0]) && isfilesep(src[1])) { 4373 /* UNC pathname: Retain first separator; leave src pointed at 4374 second separator so that further separators will be collapsed 4375 into the second separator. The result will be a pathname 4376 beginning with "\\\\" followed (most likely) by a host name. */ 4377 src = dst = path + 1; 4378 path[0] = '\\'; /* Force first separator to '\\' */ 4379 } 4380 } 4381 4382 end = dst; 4383 4384 /* Remove redundant separators from remainder of path, forcing all 4385 separators to be '\\' rather than '/'. Also, single byte space 4386 characters are removed from the end of the path because those 4387 are not legal ending characters on this operating system. 4388 */ 4389 while (*src != '\0') { 4390 if (isfilesep(*src)) { 4391 *dst++ = '\\'; src++; 4392 while (isfilesep(*src)) src++; 4393 if (*src == '\0') { 4394 /* Check for trailing separator */ 4395 end = dst; 4396 if (colon == dst - 2) break; /* "z:\\" */ 4397 if (dst == path + 1) break; /* "\\" */ 4398 if (dst == path + 2 && isfilesep(path[0])) { 4399 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4400 beginning of a UNC pathname. Even though it is not, by 4401 itself, a valid UNC pathname, we leave it as is in order 4402 to be consistent with the path canonicalizer as well 4403 as the win32 APIs, which treat this case as an invalid 4404 UNC pathname rather than as an alias for the root 4405 directory of the current drive. */ 4406 break; 4407 } 4408 end = --dst; /* Path does not denote a root directory, so 4409 remove trailing separator */ 4410 break; 4411 } 4412 end = dst; 4413 } else { 4414 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4415 *dst++ = *src++; 4416 if (*src) *dst++ = *src++; 4417 end = dst; 4418 } else { /* Copy a single-byte character */ 4419 char c = *src++; 4420 *dst++ = c; 4421 /* Space is not a legal ending character */ 4422 if (c != ' ') end = dst; 4423 } 4424 } 4425 } 4426 4427 *end = '\0'; 4428 4429 /* For "z:", add "." to work around a bug in the C runtime library */ 4430 if (colon == dst - 1) { 4431 path[2] = '.'; 4432 path[3] = '\0'; 4433 } 4434 4435 return path; 4436 } 4437 4438 // This code is a copy of JDK's sysSetLength 4439 // from src/windows/hpi/src/sys_api_md.c 4440 4441 int os::ftruncate(int fd, jlong length) { 4442 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4443 long high = (long)(length >> 32); 4444 DWORD ret; 4445 4446 if (h == (HANDLE)(-1)) { 4447 return -1; 4448 } 4449 4450 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4451 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4452 return -1; 4453 } 4454 4455 if (::SetEndOfFile(h) == FALSE) { 4456 return -1; 4457 } 4458 4459 return 0; 4460 } 4461 4462 4463 // This code is a copy of JDK's sysSync 4464 // from src/windows/hpi/src/sys_api_md.c 4465 // except for the legacy workaround for a bug in Win 98 4466 4467 int os::fsync(int fd) { 4468 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4469 4470 if ( (!::FlushFileBuffers(handle)) && 4471 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4472 /* from winerror.h */ 4473 return -1; 4474 } 4475 return 0; 4476 } 4477 4478 static int nonSeekAvailable(int, long *); 4479 static int stdinAvailable(int, long *); 4480 4481 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4482 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4483 4484 // This code is a copy of JDK's sysAvailable 4485 // from src/windows/hpi/src/sys_api_md.c 4486 4487 int os::available(int fd, jlong *bytes) { 4488 jlong cur, end; 4489 struct _stati64 stbuf64; 4490 4491 if (::_fstati64(fd, &stbuf64) >= 0) { 4492 int mode = stbuf64.st_mode; 4493 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4494 int ret; 4495 long lpbytes; 4496 if (fd == 0) { 4497 ret = stdinAvailable(fd, &lpbytes); 4498 } else { 4499 ret = nonSeekAvailable(fd, &lpbytes); 4500 } 4501 (*bytes) = (jlong)(lpbytes); 4502 return ret; 4503 } 4504 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4505 return FALSE; 4506 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4507 return FALSE; 4508 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4509 return FALSE; 4510 } 4511 *bytes = end - cur; 4512 return TRUE; 4513 } else { 4514 return FALSE; 4515 } 4516 } 4517 4518 // This code is a copy of JDK's nonSeekAvailable 4519 // from src/windows/hpi/src/sys_api_md.c 4520 4521 static int nonSeekAvailable(int fd, long *pbytes) { 4522 /* This is used for available on non-seekable devices 4523 * (like both named and anonymous pipes, such as pipes 4524 * connected to an exec'd process). 4525 * Standard Input is a special case. 4526 * 4527 */ 4528 HANDLE han; 4529 4530 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4531 return FALSE; 4532 } 4533 4534 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4535 /* PeekNamedPipe fails when at EOF. In that case we 4536 * simply make *pbytes = 0 which is consistent with the 4537 * behavior we get on Solaris when an fd is at EOF. 4538 * The only alternative is to raise an Exception, 4539 * which isn't really warranted. 4540 */ 4541 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4542 return FALSE; 4543 } 4544 *pbytes = 0; 4545 } 4546 return TRUE; 4547 } 4548 4549 #define MAX_INPUT_EVENTS 2000 4550 4551 // This code is a copy of JDK's stdinAvailable 4552 // from src/windows/hpi/src/sys_api_md.c 4553 4554 static int stdinAvailable(int fd, long *pbytes) { 4555 HANDLE han; 4556 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4557 DWORD numEvents = 0; /* Number of events in buffer */ 4558 DWORD i = 0; /* Loop index */ 4559 DWORD curLength = 0; /* Position marker */ 4560 DWORD actualLength = 0; /* Number of bytes readable */ 4561 BOOL error = FALSE; /* Error holder */ 4562 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4563 4564 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4565 return FALSE; 4566 } 4567 4568 /* Construct an array of input records in the console buffer */ 4569 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4570 if (error == 0) { 4571 return nonSeekAvailable(fd, pbytes); 4572 } 4573 4574 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4575 if (numEvents > MAX_INPUT_EVENTS) { 4576 numEvents = MAX_INPUT_EVENTS; 4577 } 4578 4579 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4580 if (lpBuffer == NULL) { 4581 return FALSE; 4582 } 4583 4584 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4585 if (error == 0) { 4586 os::free(lpBuffer, mtInternal); 4587 return FALSE; 4588 } 4589 4590 /* Examine input records for the number of bytes available */ 4591 for(i=0; i<numEvents; i++) { 4592 if (lpBuffer[i].EventType == KEY_EVENT) { 4593 4594 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4595 &(lpBuffer[i].Event); 4596 if (keyRecord->bKeyDown == TRUE) { 4597 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4598 curLength++; 4599 if (*keyPressed == '\r') { 4600 actualLength = curLength; 4601 } 4602 } 4603 } 4604 } 4605 4606 if(lpBuffer != NULL) { 4607 os::free(lpBuffer, mtInternal); 4608 } 4609 4610 *pbytes = (long) actualLength; 4611 return TRUE; 4612 } 4613 4614 // Map a block of memory. 4615 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4616 char *addr, size_t bytes, bool read_only, 4617 bool allow_exec) { 4618 HANDLE hFile; 4619 char* base; 4620 4621 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4622 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4623 if (hFile == NULL) { 4624 if (PrintMiscellaneous && Verbose) { 4625 DWORD err = GetLastError(); 4626 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4627 } 4628 return NULL; 4629 } 4630 4631 if (allow_exec) { 4632 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4633 // unless it comes from a PE image (which the shared archive is not.) 4634 // Even VirtualProtect refuses to give execute access to mapped memory 4635 // that was not previously executable. 4636 // 4637 // Instead, stick the executable region in anonymous memory. Yuck. 4638 // Penalty is that ~4 pages will not be shareable - in the future 4639 // we might consider DLLizing the shared archive with a proper PE 4640 // header so that mapping executable + sharing is possible. 4641 4642 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4643 PAGE_READWRITE); 4644 if (base == NULL) { 4645 if (PrintMiscellaneous && Verbose) { 4646 DWORD err = GetLastError(); 4647 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4648 } 4649 CloseHandle(hFile); 4650 return NULL; 4651 } 4652 4653 DWORD bytes_read; 4654 OVERLAPPED overlapped; 4655 overlapped.Offset = (DWORD)file_offset; 4656 overlapped.OffsetHigh = 0; 4657 overlapped.hEvent = NULL; 4658 // ReadFile guarantees that if the return value is true, the requested 4659 // number of bytes were read before returning. 4660 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4661 if (!res) { 4662 if (PrintMiscellaneous && Verbose) { 4663 DWORD err = GetLastError(); 4664 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4665 } 4666 release_memory(base, bytes); 4667 CloseHandle(hFile); 4668 return NULL; 4669 } 4670 } else { 4671 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4672 NULL /*file_name*/); 4673 if (hMap == NULL) { 4674 if (PrintMiscellaneous && Verbose) { 4675 DWORD err = GetLastError(); 4676 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4677 } 4678 CloseHandle(hFile); 4679 return NULL; 4680 } 4681 4682 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4683 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4684 (DWORD)bytes, addr); 4685 if (base == NULL) { 4686 if (PrintMiscellaneous && Verbose) { 4687 DWORD err = GetLastError(); 4688 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4689 } 4690 CloseHandle(hMap); 4691 CloseHandle(hFile); 4692 return NULL; 4693 } 4694 4695 if (CloseHandle(hMap) == 0) { 4696 if (PrintMiscellaneous && Verbose) { 4697 DWORD err = GetLastError(); 4698 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4699 } 4700 CloseHandle(hFile); 4701 return base; 4702 } 4703 } 4704 4705 if (allow_exec) { 4706 DWORD old_protect; 4707 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4708 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4709 4710 if (!res) { 4711 if (PrintMiscellaneous && Verbose) { 4712 DWORD err = GetLastError(); 4713 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4714 } 4715 // Don't consider this a hard error, on IA32 even if the 4716 // VirtualProtect fails, we should still be able to execute 4717 CloseHandle(hFile); 4718 return base; 4719 } 4720 } 4721 4722 if (CloseHandle(hFile) == 0) { 4723 if (PrintMiscellaneous && Verbose) { 4724 DWORD err = GetLastError(); 4725 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4726 } 4727 return base; 4728 } 4729 4730 return base; 4731 } 4732 4733 4734 // Remap a block of memory. 4735 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4736 char *addr, size_t bytes, bool read_only, 4737 bool allow_exec) { 4738 // This OS does not allow existing memory maps to be remapped so we 4739 // have to unmap the memory before we remap it. 4740 if (!os::unmap_memory(addr, bytes)) { 4741 return NULL; 4742 } 4743 4744 // There is a very small theoretical window between the unmap_memory() 4745 // call above and the map_memory() call below where a thread in native 4746 // code may be able to access an address that is no longer mapped. 4747 4748 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4749 read_only, allow_exec); 4750 } 4751 4752 4753 // Unmap a block of memory. 4754 // Returns true=success, otherwise false. 4755 4756 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4757 BOOL result = UnmapViewOfFile(addr); 4758 if (result == 0) { 4759 if (PrintMiscellaneous && Verbose) { 4760 DWORD err = GetLastError(); 4761 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4762 } 4763 return false; 4764 } 4765 return true; 4766 } 4767 4768 void os::pause() { 4769 char filename[MAX_PATH]; 4770 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4771 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4772 } else { 4773 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4774 } 4775 4776 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4777 if (fd != -1) { 4778 struct stat buf; 4779 ::close(fd); 4780 while (::stat(filename, &buf) == 0) { 4781 Sleep(100); 4782 } 4783 } else { 4784 jio_fprintf(stderr, 4785 "Could not open pause file '%s', continuing immediately.\n", filename); 4786 } 4787 } 4788 4789 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4790 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4791 } 4792 4793 /* 4794 * See the caveats for this class in os_windows.hpp 4795 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4796 * into this method and returns false. If no OS EXCEPTION was raised, returns 4797 * true. 4798 * The callback is supposed to provide the method that should be protected. 4799 */ 4800 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4801 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4802 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4803 "crash_protection already set?"); 4804 4805 bool success = true; 4806 __try { 4807 WatcherThread::watcher_thread()->set_crash_protection(this); 4808 cb.call(); 4809 } __except(EXCEPTION_EXECUTE_HANDLER) { 4810 // only for protection, nothing to do 4811 success = false; 4812 } 4813 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4814 return success; 4815 } 4816 4817 // An Event wraps a win32 "CreateEvent" kernel handle. 4818 // 4819 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4820 // 4821 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4822 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4823 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4824 // In addition, an unpark() operation might fetch the handle field, but the 4825 // event could recycle between the fetch and the SetEvent() operation. 4826 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4827 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4828 // on an stale but recycled handle would be harmless, but in practice this might 4829 // confuse other non-Sun code, so it's not a viable approach. 4830 // 4831 // 2: Once a win32 event handle is associated with an Event, it remains associated 4832 // with the Event. The event handle is never closed. This could be construed 4833 // as handle leakage, but only up to the maximum # of threads that have been extant 4834 // at any one time. This shouldn't be an issue, as windows platforms typically 4835 // permit a process to have hundreds of thousands of open handles. 4836 // 4837 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4838 // and release unused handles. 4839 // 4840 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4841 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4842 // 4843 // 5. Use an RCU-like mechanism (Read-Copy Update). 4844 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4845 // 4846 // We use (2). 4847 // 4848 // TODO-FIXME: 4849 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4850 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4851 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4852 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4853 // into a single win32 CreateEvent() handle. 4854 // 4855 // _Event transitions in park() 4856 // -1 => -1 : illegal 4857 // 1 => 0 : pass - return immediately 4858 // 0 => -1 : block 4859 // 4860 // _Event serves as a restricted-range semaphore : 4861 // -1 : thread is blocked 4862 // 0 : neutral - thread is running or ready 4863 // 1 : signaled - thread is running or ready 4864 // 4865 // Another possible encoding of _Event would be 4866 // with explicit "PARKED" and "SIGNALED" bits. 4867 4868 int os::PlatformEvent::park (jlong Millis) { 4869 guarantee (_ParkHandle != NULL , "Invariant") ; 4870 guarantee (Millis > 0 , "Invariant") ; 4871 int v ; 4872 4873 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4874 // the initial park() operation. 4875 4876 for (;;) { 4877 v = _Event ; 4878 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4879 } 4880 guarantee ((v == 0) || (v == 1), "invariant") ; 4881 if (v != 0) return OS_OK ; 4882 4883 // Do this the hard way by blocking ... 4884 // TODO: consider a brief spin here, gated on the success of recent 4885 // spin attempts by this thread. 4886 // 4887 // We decompose long timeouts into series of shorter timed waits. 4888 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4889 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4890 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4891 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4892 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4893 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4894 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4895 // for the already waited time. This policy does not admit any new outcomes. 4896 // In the future, however, we might want to track the accumulated wait time and 4897 // adjust Millis accordingly if we encounter a spurious wakeup. 4898 4899 const int MAXTIMEOUT = 0x10000000 ; 4900 DWORD rv = WAIT_TIMEOUT ; 4901 while (_Event < 0 && Millis > 0) { 4902 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) 4903 if (Millis > MAXTIMEOUT) { 4904 prd = MAXTIMEOUT ; 4905 } 4906 rv = ::WaitForSingleObject (_ParkHandle, prd) ; 4907 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; 4908 if (rv == WAIT_TIMEOUT) { 4909 Millis -= prd ; 4910 } 4911 } 4912 v = _Event ; 4913 _Event = 0 ; 4914 // see comment at end of os::PlatformEvent::park() below: 4915 OrderAccess::fence() ; 4916 // If we encounter a nearly simultanous timeout expiry and unpark() 4917 // we return OS_OK indicating we awoke via unpark(). 4918 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4919 return (v >= 0) ? OS_OK : OS_TIMEOUT ; 4920 } 4921 4922 void os::PlatformEvent::park () { 4923 guarantee (_ParkHandle != NULL, "Invariant") ; 4924 // Invariant: Only the thread associated with the Event/PlatformEvent 4925 // may call park(). 4926 int v ; 4927 for (;;) { 4928 v = _Event ; 4929 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4930 } 4931 guarantee ((v == 0) || (v == 1), "invariant") ; 4932 if (v != 0) return ; 4933 4934 // Do this the hard way by blocking ... 4935 // TODO: consider a brief spin here, gated on the success of recent 4936 // spin attempts by this thread. 4937 while (_Event < 0) { 4938 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; 4939 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; 4940 } 4941 4942 // Usually we'll find _Event == 0 at this point, but as 4943 // an optional optimization we clear it, just in case can 4944 // multiple unpark() operations drove _Event up to 1. 4945 _Event = 0 ; 4946 OrderAccess::fence() ; 4947 guarantee (_Event >= 0, "invariant") ; 4948 } 4949 4950 void os::PlatformEvent::unpark() { 4951 guarantee (_ParkHandle != NULL, "Invariant") ; 4952 4953 // Transitions for _Event: 4954 // 0 :=> 1 4955 // 1 :=> 1 4956 // -1 :=> either 0 or 1; must signal target thread 4957 // That is, we can safely transition _Event from -1 to either 4958 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 4959 // unpark() calls. 4960 // See also: "Semaphores in Plan 9" by Mullender & Cox 4961 // 4962 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4963 // that it will take two back-to-back park() calls for the owning 4964 // thread to block. This has the benefit of forcing a spurious return 4965 // from the first park() call after an unpark() call which will help 4966 // shake out uses of park() and unpark() without condition variables. 4967 4968 if (Atomic::xchg(1, &_Event) >= 0) return; 4969 4970 ::SetEvent(_ParkHandle); 4971 } 4972 4973 4974 // JSR166 4975 // ------------------------------------------------------- 4976 4977 /* 4978 * The Windows implementation of Park is very straightforward: Basic 4979 * operations on Win32 Events turn out to have the right semantics to 4980 * use them directly. We opportunistically resuse the event inherited 4981 * from Monitor. 4982 */ 4983 4984 4985 void Parker::park(bool isAbsolute, jlong time) { 4986 guarantee (_ParkEvent != NULL, "invariant") ; 4987 // First, demultiplex/decode time arguments 4988 if (time < 0) { // don't wait 4989 return; 4990 } 4991 else if (time == 0 && !isAbsolute) { 4992 time = INFINITE; 4993 } 4994 else if (isAbsolute) { 4995 time -= os::javaTimeMillis(); // convert to relative time 4996 if (time <= 0) // already elapsed 4997 return; 4998 } 4999 else { // relative 5000 time /= 1000000; // Must coarsen from nanos to millis 5001 if (time == 0) // Wait for the minimal time unit if zero 5002 time = 1; 5003 } 5004 5005 JavaThread* thread = (JavaThread*)(Thread::current()); 5006 assert(thread->is_Java_thread(), "Must be JavaThread"); 5007 JavaThread *jt = (JavaThread *)thread; 5008 5009 // Don't wait if interrupted or already triggered 5010 if (Thread::is_interrupted(thread, false) || 5011 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5012 ResetEvent(_ParkEvent); 5013 return; 5014 } 5015 else { 5016 ThreadBlockInVM tbivm(jt); 5017 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5018 jt->set_suspend_equivalent(); 5019 5020 WaitForSingleObject(_ParkEvent, time); 5021 ResetEvent(_ParkEvent); 5022 5023 // If externally suspended while waiting, re-suspend 5024 if (jt->handle_special_suspend_equivalent_condition()) { 5025 jt->java_suspend_self(); 5026 } 5027 } 5028 } 5029 5030 void Parker::unpark() { 5031 guarantee (_ParkEvent != NULL, "invariant") ; 5032 SetEvent(_ParkEvent); 5033 } 5034 5035 // Run the specified command in a separate process. Return its exit value, 5036 // or -1 on failure (e.g. can't create a new process). 5037 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { 5038 STARTUPINFO si; 5039 PROCESS_INFORMATION pi; 5040 5041 memset(&si, 0, sizeof(si)); 5042 si.cb = sizeof(si); 5043 memset(&pi, 0, sizeof(pi)); 5044 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5045 cmd, // command line 5046 NULL, // process security attribute 5047 NULL, // thread security attribute 5048 TRUE, // inherits system handles 5049 0, // no creation flags 5050 NULL, // use parent's environment block 5051 NULL, // use parent's starting directory 5052 &si, // (in) startup information 5053 &pi); // (out) process information 5054 5055 if (rslt) { 5056 // Wait until child process exits. 5057 WaitForSingleObject(pi.hProcess, INFINITE); 5058 5059 DWORD exit_code; 5060 GetExitCodeProcess(pi.hProcess, &exit_code); 5061 5062 // Close process and thread handles. 5063 CloseHandle(pi.hProcess); 5064 CloseHandle(pi.hThread); 5065 5066 return (int)exit_code; 5067 } else { 5068 return -1; 5069 } 5070 } 5071 5072 //-------------------------------------------------------------------------------------------------- 5073 // Non-product code 5074 5075 static int mallocDebugIntervalCounter = 0; 5076 static int mallocDebugCounter = 0; 5077 bool os::check_heap(bool force) { 5078 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5079 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5080 // Note: HeapValidate executes two hardware breakpoints when it finds something 5081 // wrong; at these points, eax contains the address of the offending block (I think). 5082 // To get to the exlicit error message(s) below, just continue twice. 5083 HANDLE heap = GetProcessHeap(); 5084 { HeapLock(heap); 5085 PROCESS_HEAP_ENTRY phe; 5086 phe.lpData = NULL; 5087 while (HeapWalk(heap, &phe) != 0) { 5088 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5089 !HeapValidate(heap, 0, phe.lpData)) { 5090 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5091 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5092 fatal("corrupted C heap"); 5093 } 5094 } 5095 DWORD err = GetLastError(); 5096 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5097 fatal(err_msg("heap walk aborted with error %d", err)); 5098 } 5099 HeapUnlock(heap); 5100 } 5101 mallocDebugIntervalCounter = 0; 5102 } 5103 return true; 5104 } 5105 5106 5107 bool os::find(address addr, outputStream* st) { 5108 // Nothing yet 5109 return false; 5110 } 5111 5112 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5113 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5114 5115 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 5116 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5117 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5118 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5119 5120 if (os::is_memory_serialize_page(thread, addr)) 5121 return EXCEPTION_CONTINUE_EXECUTION; 5122 } 5123 5124 return EXCEPTION_CONTINUE_SEARCH; 5125 } 5126 5127 // We don't build a headless jre for Windows 5128 bool os::is_headless_jre() { return false; } 5129 5130 static jint initSock() { 5131 WSADATA wsadata; 5132 5133 if (!os::WinSock2Dll::WinSock2Available()) { 5134 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5135 ::GetLastError()); 5136 return JNI_ERR; 5137 } 5138 5139 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5140 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5141 ::GetLastError()); 5142 return JNI_ERR; 5143 } 5144 return JNI_OK; 5145 } 5146 5147 struct hostent* os::get_host_by_name(char* name) { 5148 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5149 } 5150 5151 int os::socket_close(int fd) { 5152 return ::closesocket(fd); 5153 } 5154 5155 int os::socket_available(int fd, jint *pbytes) { 5156 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5157 return (ret < 0) ? 0 : 1; 5158 } 5159 5160 int os::socket(int domain, int type, int protocol) { 5161 return ::socket(domain, type, protocol); 5162 } 5163 5164 int os::listen(int fd, int count) { 5165 return ::listen(fd, count); 5166 } 5167 5168 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5169 return ::connect(fd, him, len); 5170 } 5171 5172 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5173 return ::accept(fd, him, len); 5174 } 5175 5176 int os::sendto(int fd, char* buf, size_t len, uint flags, 5177 struct sockaddr* to, socklen_t tolen) { 5178 5179 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5180 } 5181 5182 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5183 sockaddr* from, socklen_t* fromlen) { 5184 5185 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5186 } 5187 5188 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5189 return ::recv(fd, buf, (int)nBytes, flags); 5190 } 5191 5192 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5193 return ::send(fd, buf, (int)nBytes, flags); 5194 } 5195 5196 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5197 return ::send(fd, buf, (int)nBytes, flags); 5198 } 5199 5200 int os::timeout(int fd, long timeout) { 5201 fd_set tbl; 5202 struct timeval t; 5203 5204 t.tv_sec = timeout / 1000; 5205 t.tv_usec = (timeout % 1000) * 1000; 5206 5207 tbl.fd_count = 1; 5208 tbl.fd_array[0] = fd; 5209 5210 return ::select(1, &tbl, 0, 0, &t); 5211 } 5212 5213 int os::get_host_name(char* name, int namelen) { 5214 return ::gethostname(name, namelen); 5215 } 5216 5217 int os::socket_shutdown(int fd, int howto) { 5218 return ::shutdown(fd, howto); 5219 } 5220 5221 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5222 return ::bind(fd, him, len); 5223 } 5224 5225 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5226 return ::getsockname(fd, him, len); 5227 } 5228 5229 int os::get_sock_opt(int fd, int level, int optname, 5230 char* optval, socklen_t* optlen) { 5231 return ::getsockopt(fd, level, optname, optval, optlen); 5232 } 5233 5234 int os::set_sock_opt(int fd, int level, int optname, 5235 const char* optval, socklen_t optlen) { 5236 return ::setsockopt(fd, level, optname, optval, optlen); 5237 } 5238 5239 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5240 #if defined(IA32) 5241 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5242 #elif defined (AMD64) 5243 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5244 #endif 5245 5246 // returns true if thread could be suspended, 5247 // false otherwise 5248 static bool do_suspend(HANDLE* h) { 5249 if (h != NULL) { 5250 if (SuspendThread(*h) != ~0) { 5251 return true; 5252 } 5253 } 5254 return false; 5255 } 5256 5257 // resume the thread 5258 // calling resume on an active thread is a no-op 5259 static void do_resume(HANDLE* h) { 5260 if (h != NULL) { 5261 ResumeThread(*h); 5262 } 5263 } 5264 5265 // retrieve a suspend/resume context capable handle 5266 // from the tid. Caller validates handle return value. 5267 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5268 if (h != NULL) { 5269 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5270 } 5271 } 5272 5273 // 5274 // Thread sampling implementation 5275 // 5276 void os::SuspendedThreadTask::internal_do_task() { 5277 CONTEXT ctxt; 5278 HANDLE h = NULL; 5279 5280 // get context capable handle for thread 5281 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5282 5283 // sanity 5284 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5285 return; 5286 } 5287 5288 // suspend the thread 5289 if (do_suspend(&h)) { 5290 ctxt.ContextFlags = sampling_context_flags; 5291 // get thread context 5292 GetThreadContext(h, &ctxt); 5293 SuspendedThreadTaskContext context(_thread, &ctxt); 5294 // pass context to Thread Sampling impl 5295 do_task(context); 5296 // resume thread 5297 do_resume(&h); 5298 } 5299 5300 // close handle 5301 CloseHandle(h); 5302 } 5303 5304 5305 // Kernel32 API 5306 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5307 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5308 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5309 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5310 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5311 5312 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5313 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5314 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5315 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5316 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5317 5318 5319 BOOL os::Kernel32Dll::initialized = FALSE; 5320 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5321 assert(initialized && _GetLargePageMinimum != NULL, 5322 "GetLargePageMinimumAvailable() not yet called"); 5323 return _GetLargePageMinimum(); 5324 } 5325 5326 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5327 if (!initialized) { 5328 initialize(); 5329 } 5330 return _GetLargePageMinimum != NULL; 5331 } 5332 5333 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5334 if (!initialized) { 5335 initialize(); 5336 } 5337 return _VirtualAllocExNuma != NULL; 5338 } 5339 5340 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5341 assert(initialized && _VirtualAllocExNuma != NULL, 5342 "NUMACallsAvailable() not yet called"); 5343 5344 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5345 } 5346 5347 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5348 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5349 "NUMACallsAvailable() not yet called"); 5350 5351 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5352 } 5353 5354 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5355 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5356 "NUMACallsAvailable() not yet called"); 5357 5358 return _GetNumaNodeProcessorMask(node, proc_mask); 5359 } 5360 5361 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5362 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5363 if (!initialized) { 5364 initialize(); 5365 } 5366 5367 if (_RtlCaptureStackBackTrace != NULL) { 5368 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5369 BackTrace, BackTraceHash); 5370 } else { 5371 return 0; 5372 } 5373 } 5374 5375 void os::Kernel32Dll::initializeCommon() { 5376 if (!initialized) { 5377 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5378 assert(handle != NULL, "Just check"); 5379 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5380 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5381 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5382 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5383 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5384 initialized = TRUE; 5385 } 5386 } 5387 5388 5389 5390 #ifndef JDK6_OR_EARLIER 5391 5392 void os::Kernel32Dll::initialize() { 5393 initializeCommon(); 5394 } 5395 5396 5397 // Kernel32 API 5398 inline BOOL os::Kernel32Dll::SwitchToThread() { 5399 return ::SwitchToThread(); 5400 } 5401 5402 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5403 return true; 5404 } 5405 5406 // Help tools 5407 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5408 return true; 5409 } 5410 5411 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5412 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5413 } 5414 5415 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5416 return ::Module32First(hSnapshot, lpme); 5417 } 5418 5419 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5420 return ::Module32Next(hSnapshot, lpme); 5421 } 5422 5423 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5424 ::GetNativeSystemInfo(lpSystemInfo); 5425 } 5426 5427 // PSAPI API 5428 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5429 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5430 } 5431 5432 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5433 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5434 } 5435 5436 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5437 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5438 } 5439 5440 inline BOOL os::PSApiDll::PSApiAvailable() { 5441 return true; 5442 } 5443 5444 5445 // WinSock2 API 5446 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5447 return ::WSAStartup(wVersionRequested, lpWSAData); 5448 } 5449 5450 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5451 return ::gethostbyname(name); 5452 } 5453 5454 inline BOOL os::WinSock2Dll::WinSock2Available() { 5455 return true; 5456 } 5457 5458 // Advapi API 5459 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5460 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5461 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5462 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5463 BufferLength, PreviousState, ReturnLength); 5464 } 5465 5466 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5467 PHANDLE TokenHandle) { 5468 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5469 } 5470 5471 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5472 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5473 } 5474 5475 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5476 return true; 5477 } 5478 5479 void* os::get_default_process_handle() { 5480 return (void*)GetModuleHandle(NULL); 5481 } 5482 5483 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5484 // which is used to find statically linked in agents. 5485 // Additionally for windows, takes into account __stdcall names. 5486 // Parameters: 5487 // sym_name: Symbol in library we are looking for 5488 // lib_name: Name of library to look in, NULL for shared libs. 5489 // is_absolute_path == true if lib_name is absolute path to agent 5490 // such as "C:/a/b/L.dll" 5491 // == false if only the base name of the library is passed in 5492 // such as "L" 5493 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5494 bool is_absolute_path) { 5495 char *agent_entry_name; 5496 size_t len; 5497 size_t name_len; 5498 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5499 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5500 const char *start; 5501 5502 if (lib_name != NULL) { 5503 len = name_len = strlen(lib_name); 5504 if (is_absolute_path) { 5505 // Need to strip path, prefix and suffix 5506 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5507 lib_name = ++start; 5508 } else { 5509 // Need to check for drive prefix 5510 if ((start = strchr(lib_name, ':')) != NULL) { 5511 lib_name = ++start; 5512 } 5513 } 5514 if (len <= (prefix_len + suffix_len)) { 5515 return NULL; 5516 } 5517 lib_name += prefix_len; 5518 name_len = strlen(lib_name) - suffix_len; 5519 } 5520 } 5521 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5522 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5523 if (agent_entry_name == NULL) { 5524 return NULL; 5525 } 5526 if (lib_name != NULL) { 5527 const char *p = strrchr(sym_name, '@'); 5528 if (p != NULL && p != sym_name) { 5529 // sym_name == _Agent_OnLoad@XX 5530 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5531 agent_entry_name[(p-sym_name)] = '\0'; 5532 // agent_entry_name == _Agent_OnLoad 5533 strcat(agent_entry_name, "_"); 5534 strncat(agent_entry_name, lib_name, name_len); 5535 strcat(agent_entry_name, p); 5536 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5537 } else { 5538 strcpy(agent_entry_name, sym_name); 5539 strcat(agent_entry_name, "_"); 5540 strncat(agent_entry_name, lib_name, name_len); 5541 } 5542 } else { 5543 strcpy(agent_entry_name, sym_name); 5544 } 5545 return agent_entry_name; 5546 } 5547 5548 #else 5549 // Kernel32 API 5550 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5551 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5552 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5553 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5554 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5555 5556 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5557 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5558 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5559 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5560 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5561 5562 void os::Kernel32Dll::initialize() { 5563 if (!initialized) { 5564 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5565 assert(handle != NULL, "Just check"); 5566 5567 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5568 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5569 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5570 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5571 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5572 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5573 initializeCommon(); // resolve the functions that always need resolving 5574 5575 initialized = TRUE; 5576 } 5577 } 5578 5579 BOOL os::Kernel32Dll::SwitchToThread() { 5580 assert(initialized && _SwitchToThread != NULL, 5581 "SwitchToThreadAvailable() not yet called"); 5582 return _SwitchToThread(); 5583 } 5584 5585 5586 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5587 if (!initialized) { 5588 initialize(); 5589 } 5590 return _SwitchToThread != NULL; 5591 } 5592 5593 // Help tools 5594 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5595 if (!initialized) { 5596 initialize(); 5597 } 5598 return _CreateToolhelp32Snapshot != NULL && 5599 _Module32First != NULL && 5600 _Module32Next != NULL; 5601 } 5602 5603 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5604 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5605 "HelpToolsAvailable() not yet called"); 5606 5607 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5608 } 5609 5610 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5611 assert(initialized && _Module32First != NULL, 5612 "HelpToolsAvailable() not yet called"); 5613 5614 return _Module32First(hSnapshot, lpme); 5615 } 5616 5617 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5618 assert(initialized && _Module32Next != NULL, 5619 "HelpToolsAvailable() not yet called"); 5620 5621 return _Module32Next(hSnapshot, lpme); 5622 } 5623 5624 5625 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5626 if (!initialized) { 5627 initialize(); 5628 } 5629 return _GetNativeSystemInfo != NULL; 5630 } 5631 5632 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5633 assert(initialized && _GetNativeSystemInfo != NULL, 5634 "GetNativeSystemInfoAvailable() not yet called"); 5635 5636 _GetNativeSystemInfo(lpSystemInfo); 5637 } 5638 5639 // PSAPI API 5640 5641 5642 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5643 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5644 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5645 5646 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5647 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5648 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5649 BOOL os::PSApiDll::initialized = FALSE; 5650 5651 void os::PSApiDll::initialize() { 5652 if (!initialized) { 5653 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5654 if (handle != NULL) { 5655 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5656 "EnumProcessModules"); 5657 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5658 "GetModuleFileNameExA"); 5659 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5660 "GetModuleInformation"); 5661 } 5662 initialized = TRUE; 5663 } 5664 } 5665 5666 5667 5668 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5669 assert(initialized && _EnumProcessModules != NULL, 5670 "PSApiAvailable() not yet called"); 5671 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5672 } 5673 5674 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5675 assert(initialized && _GetModuleFileNameEx != NULL, 5676 "PSApiAvailable() not yet called"); 5677 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5678 } 5679 5680 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5681 assert(initialized && _GetModuleInformation != NULL, 5682 "PSApiAvailable() not yet called"); 5683 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5684 } 5685 5686 BOOL os::PSApiDll::PSApiAvailable() { 5687 if (!initialized) { 5688 initialize(); 5689 } 5690 return _EnumProcessModules != NULL && 5691 _GetModuleFileNameEx != NULL && 5692 _GetModuleInformation != NULL; 5693 } 5694 5695 5696 // WinSock2 API 5697 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5698 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5699 5700 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5701 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5702 BOOL os::WinSock2Dll::initialized = FALSE; 5703 5704 void os::WinSock2Dll::initialize() { 5705 if (!initialized) { 5706 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5707 if (handle != NULL) { 5708 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5709 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5710 } 5711 initialized = TRUE; 5712 } 5713 } 5714 5715 5716 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5717 assert(initialized && _WSAStartup != NULL, 5718 "WinSock2Available() not yet called"); 5719 return _WSAStartup(wVersionRequested, lpWSAData); 5720 } 5721 5722 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5723 assert(initialized && _gethostbyname != NULL, 5724 "WinSock2Available() not yet called"); 5725 return _gethostbyname(name); 5726 } 5727 5728 BOOL os::WinSock2Dll::WinSock2Available() { 5729 if (!initialized) { 5730 initialize(); 5731 } 5732 return _WSAStartup != NULL && 5733 _gethostbyname != NULL; 5734 } 5735 5736 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5737 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5738 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5739 5740 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5741 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5742 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5743 BOOL os::Advapi32Dll::initialized = FALSE; 5744 5745 void os::Advapi32Dll::initialize() { 5746 if (!initialized) { 5747 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5748 if (handle != NULL) { 5749 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5750 "AdjustTokenPrivileges"); 5751 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5752 "OpenProcessToken"); 5753 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5754 "LookupPrivilegeValueA"); 5755 } 5756 initialized = TRUE; 5757 } 5758 } 5759 5760 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5761 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5762 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5763 assert(initialized && _AdjustTokenPrivileges != NULL, 5764 "AdvapiAvailable() not yet called"); 5765 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5766 BufferLength, PreviousState, ReturnLength); 5767 } 5768 5769 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5770 PHANDLE TokenHandle) { 5771 assert(initialized && _OpenProcessToken != NULL, 5772 "AdvapiAvailable() not yet called"); 5773 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5774 } 5775 5776 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5777 assert(initialized && _LookupPrivilegeValue != NULL, 5778 "AdvapiAvailable() not yet called"); 5779 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5780 } 5781 5782 BOOL os::Advapi32Dll::AdvapiAvailable() { 5783 if (!initialized) { 5784 initialize(); 5785 } 5786 return _AdjustTokenPrivileges != NULL && 5787 _OpenProcessToken != NULL && 5788 _LookupPrivilegeValue != NULL; 5789 } 5790 5791 #endif 5792 5793 #ifndef PRODUCT 5794 5795 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5796 // contiguous memory block at a particular address. 5797 // The test first tries to find a good approximate address to allocate at by using the same 5798 // method to allocate some memory at any address. The test then tries to allocate memory in 5799 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5800 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5801 // the previously allocated memory is available for allocation. The only actual failure 5802 // that is reported is when the test tries to allocate at a particular location but gets a 5803 // different valid one. A NULL return value at this point is not considered an error but may 5804 // be legitimate. 5805 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5806 void TestReserveMemorySpecial_test() { 5807 if (!UseLargePages) { 5808 if (VerboseInternalVMTests) { 5809 gclog_or_tty->print("Skipping test because large pages are disabled"); 5810 } 5811 return; 5812 } 5813 // save current value of globals 5814 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5815 bool old_use_numa_interleaving = UseNUMAInterleaving; 5816 5817 // set globals to make sure we hit the correct code path 5818 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5819 5820 // do an allocation at an address selected by the OS to get a good one. 5821 const size_t large_allocation_size = os::large_page_size() * 4; 5822 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5823 if (result == NULL) { 5824 if (VerboseInternalVMTests) { 5825 gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5826 large_allocation_size); 5827 } 5828 } else { 5829 os::release_memory_special(result, large_allocation_size); 5830 5831 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5832 // we managed to get it once. 5833 const size_t expected_allocation_size = os::large_page_size(); 5834 char* expected_location = result + os::large_page_size(); 5835 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5836 if (actual_location == NULL) { 5837 if (VerboseInternalVMTests) { 5838 gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5839 expected_location, large_allocation_size); 5840 } 5841 } else { 5842 // release memory 5843 os::release_memory_special(actual_location, expected_allocation_size); 5844 // only now check, after releasing any memory to avoid any leaks. 5845 assert(actual_location == expected_location, 5846 err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5847 expected_location, expected_allocation_size, actual_location)); 5848 } 5849 } 5850 5851 // restore globals 5852 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5853 UseNUMAInterleaving = old_use_numa_interleaving; 5854 } 5855 #endif // PRODUCT 5856