1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.inline.hpp" 48 #include "runtime/extendedPC.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/interfaceSupport.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/orderAccess.inline.hpp" 56 #include "runtime/osThread.hpp" 57 #include "runtime/perfMemory.hpp" 58 #include "runtime/sharedRuntime.hpp" 59 #include "runtime/statSampler.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "runtime/thread.inline.hpp" 62 #include "runtime/threadCritical.hpp" 63 #include "runtime/timer.hpp" 64 #include "services/attachListener.hpp" 65 #include "services/memTracker.hpp" 66 #include "services/runtimeService.hpp" 67 #include "utilities/decoder.hpp" 68 #include "utilities/defaultStream.hpp" 69 #include "utilities/events.hpp" 70 #include "utilities/growableArray.hpp" 71 #include "utilities/vmError.hpp" 72 73 #ifdef _DEBUG 74 #include <crtdbg.h> 75 #endif 76 77 78 #include <windows.h> 79 #include <sys/types.h> 80 #include <sys/stat.h> 81 #include <sys/timeb.h> 82 #include <objidl.h> 83 #include <shlobj.h> 84 85 #include <malloc.h> 86 #include <signal.h> 87 #include <direct.h> 88 #include <errno.h> 89 #include <fcntl.h> 90 #include <io.h> 91 #include <process.h> // For _beginthreadex(), _endthreadex() 92 #include <imagehlp.h> // For os::dll_address_to_function_name 93 /* for enumerating dll libraries */ 94 #include <vdmdbg.h> 95 96 // for timer info max values which include all bits 97 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 98 99 // For DLL loading/load error detection 100 // Values of PE COFF 101 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 102 #define IMAGE_FILE_SIGNATURE_LENGTH 4 103 104 static HANDLE main_process; 105 static HANDLE main_thread; 106 static int main_thread_id; 107 108 static FILETIME process_creation_time; 109 static FILETIME process_exit_time; 110 static FILETIME process_user_time; 111 static FILETIME process_kernel_time; 112 113 #ifdef _M_IA64 114 #define __CPU__ ia64 115 #elif _M_AMD64 116 #define __CPU__ amd64 117 #else 118 #define __CPU__ i486 119 #endif 120 121 // save DLL module handle, used by GetModuleFileName 122 123 HINSTANCE vm_lib_handle; 124 125 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 126 switch (reason) { 127 case DLL_PROCESS_ATTACH: 128 vm_lib_handle = hinst; 129 if (ForceTimeHighResolution) 130 timeBeginPeriod(1L); 131 break; 132 case DLL_PROCESS_DETACH: 133 if (ForceTimeHighResolution) 134 timeEndPeriod(1L); 135 136 // Workaround for issue when a custom launcher doesn't call 137 // DestroyJavaVM and NMT is trying to track memory when free is 138 // called from a static destructor 139 if (MemTracker::is_on()) { 140 MemTracker::shutdown(MemTracker::NMT_normal); 141 } 142 break; 143 default: 144 break; 145 } 146 return true; 147 } 148 149 static inline double fileTimeAsDouble(FILETIME* time) { 150 const double high = (double) ((unsigned int) ~0); 151 const double split = 10000000.0; 152 double result = (time->dwLowDateTime / split) + 153 time->dwHighDateTime * (high/split); 154 return result; 155 } 156 157 // Implementation of os 158 159 bool os::getenv(const char* name, char* buffer, int len) { 160 int result = GetEnvironmentVariable(name, buffer, len); 161 return result > 0 && result < len; 162 } 163 164 165 // No setuid programs under Windows. 166 bool os::have_special_privileges() { 167 return false; 168 } 169 170 171 // This method is a periodic task to check for misbehaving JNI applications 172 // under CheckJNI, we can add any periodic checks here. 173 // For Windows at the moment does nothing 174 void os::run_periodic_checks() { 175 return; 176 } 177 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 void os::init_system_properties_values() { 183 /* sysclasspath, java_home, dll_dir */ 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH]; 190 191 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 192 os::jvm_path(home_dir, sizeof(home_dir)); 193 // Found the full path to jvm.dll. 194 // Now cut the path to <java_home>/jre if we can. 195 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 196 pslash = strrchr(home_dir, '\\'); 197 if (pslash != NULL) { 198 *pslash = '\0'; /* get rid of \{client|server} */ 199 pslash = strrchr(home_dir, '\\'); 200 if (pslash != NULL) 201 *pslash = '\0'; /* get rid of \bin */ 202 } 203 } 204 205 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 206 if (home_path == NULL) 207 return; 208 strcpy(home_path, home_dir); 209 Arguments::set_java_home(home_path); 210 211 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 212 if (dll_path == NULL) 213 return; 214 strcpy(dll_path, home_dir); 215 strcat(dll_path, bin); 216 Arguments::set_dll_dir(dll_path); 217 218 if (!set_boot_path('\\', ';')) 219 return; 220 } 221 222 /* library_path */ 223 #define EXT_DIR "\\lib\\ext" 224 #define BIN_DIR "\\bin" 225 #define PACKAGE_DIR "\\Sun\\Java" 226 { 227 /* Win32 library search order (See the documentation for LoadLibrary): 228 * 229 * 1. The directory from which application is loaded. 230 * 2. The system wide Java Extensions directory (Java only) 231 * 3. System directory (GetSystemDirectory) 232 * 4. Windows directory (GetWindowsDirectory) 233 * 5. The PATH environment variable 234 * 6. The current directory 235 */ 236 237 char *library_path; 238 char tmp[MAX_PATH]; 239 char *path_str = ::getenv("PATH"); 240 241 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 242 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 243 244 library_path[0] = '\0'; 245 246 GetModuleFileName(NULL, tmp, sizeof(tmp)); 247 *(strrchr(tmp, '\\')) = '\0'; 248 strcat(library_path, tmp); 249 250 GetWindowsDirectory(tmp, sizeof(tmp)); 251 strcat(library_path, ";"); 252 strcat(library_path, tmp); 253 strcat(library_path, PACKAGE_DIR BIN_DIR); 254 255 GetSystemDirectory(tmp, sizeof(tmp)); 256 strcat(library_path, ";"); 257 strcat(library_path, tmp); 258 259 GetWindowsDirectory(tmp, sizeof(tmp)); 260 strcat(library_path, ";"); 261 strcat(library_path, tmp); 262 263 if (path_str) { 264 strcat(library_path, ";"); 265 strcat(library_path, path_str); 266 } 267 268 strcat(library_path, ";."); 269 270 Arguments::set_library_path(library_path); 271 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 272 } 273 274 /* Default extensions directory */ 275 { 276 char path[MAX_PATH]; 277 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 278 GetWindowsDirectory(path, MAX_PATH); 279 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 280 path, PACKAGE_DIR, EXT_DIR); 281 Arguments::set_ext_dirs(buf); 282 } 283 #undef EXT_DIR 284 #undef BIN_DIR 285 #undef PACKAGE_DIR 286 287 /* Default endorsed standards directory. */ 288 { 289 #define ENDORSED_DIR "\\lib\\endorsed" 290 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 291 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 292 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 293 Arguments::set_endorsed_dirs(buf); 294 #undef ENDORSED_DIR 295 } 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 /* 316 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 317 * So far, this method is only used by Native Memory Tracking, which is 318 * only supported on Windows XP or later. 319 */ 320 address os::get_caller_pc(int n) { 321 #ifdef _NMT_NOINLINE_ 322 n++; 323 #endif 324 address pc; 325 if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) { 326 return pc; 327 } 328 return NULL; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) 350 { 351 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 352 if (stack_bottom == (address)minfo.AllocationBase) 353 stack_size += minfo.RegionSize; 354 else 355 break; 356 } 357 358 #ifdef _M_IA64 359 // IA64 has memory and register stacks 360 // 361 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 362 // at thread creation (1MB backing store growing upwards, 1MB memory stack 363 // growing downwards, 2MB summed up) 364 // 365 // ... 366 // ------- top of stack (high address) ----- 367 // | 368 // | 1MB 369 // | Backing Store (Register Stack) 370 // | 371 // | / \ 372 // | | 373 // | | 374 // | | 375 // ------------------------ stack base ----- 376 // | 1MB 377 // | Memory Stack 378 // | 379 // | | 380 // | | 381 // | | 382 // | \ / 383 // | 384 // ----- bottom of stack (low address) ----- 385 // ... 386 387 stack_size = stack_size / 2; 388 #endif 389 return stack_bottom + stack_size; 390 } 391 392 size_t os::current_stack_size() { 393 size_t sz; 394 MEMORY_BASIC_INFORMATION minfo; 395 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 396 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 397 return sz; 398 } 399 400 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 401 const struct tm* time_struct_ptr = localtime(clock); 402 if (time_struct_ptr != NULL) { 403 *res = *time_struct_ptr; 404 return res; 405 } 406 return NULL; 407 } 408 409 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 410 411 // Thread start routine for all new Java threads 412 static unsigned __stdcall java_start(Thread* thread) { 413 // Try to randomize the cache line index of hot stack frames. 414 // This helps when threads of the same stack traces evict each other's 415 // cache lines. The threads can be either from the same JVM instance, or 416 // from different JVM instances. The benefit is especially true for 417 // processors with hyperthreading technology. 418 static int counter = 0; 419 int pid = os::current_process_id(); 420 _alloca(((pid ^ counter++) & 7) * 128); 421 422 OSThread* osthr = thread->osthread(); 423 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 424 425 if (UseNUMA) { 426 int lgrp_id = os::numa_get_group_id(); 427 if (lgrp_id != -1) { 428 thread->set_lgrp_id(lgrp_id); 429 } 430 } 431 432 433 // Install a win32 structured exception handler around every thread created 434 // by VM, so VM can genrate error dump when an exception occurred in non- 435 // Java thread (e.g. VM thread). 436 __try { 437 thread->run(); 438 } __except(topLevelExceptionFilter( 439 (_EXCEPTION_POINTERS*)_exception_info())) { 440 // Nothing to do. 441 } 442 443 // One less thread is executing 444 // When the VMThread gets here, the main thread may have already exited 445 // which frees the CodeHeap containing the Atomic::add code 446 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 447 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 448 } 449 450 return 0; 451 } 452 453 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 454 // Allocate the OSThread object 455 OSThread* osthread = new OSThread(NULL, NULL); 456 if (osthread == NULL) return NULL; 457 458 // Initialize support for Java interrupts 459 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 460 if (interrupt_event == NULL) { 461 delete osthread; 462 return NULL; 463 } 464 osthread->set_interrupt_event(interrupt_event); 465 466 // Store info on the Win32 thread into the OSThread 467 osthread->set_thread_handle(thread_handle); 468 osthread->set_thread_id(thread_id); 469 470 if (UseNUMA) { 471 int lgrp_id = os::numa_get_group_id(); 472 if (lgrp_id != -1) { 473 thread->set_lgrp_id(lgrp_id); 474 } 475 } 476 477 // Initial thread state is INITIALIZED, not SUSPENDED 478 osthread->set_state(INITIALIZED); 479 480 return osthread; 481 } 482 483 484 bool os::create_attached_thread(JavaThread* thread) { 485 #ifdef ASSERT 486 thread->verify_not_published(); 487 #endif 488 HANDLE thread_h; 489 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 490 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 491 fatal("DuplicateHandle failed\n"); 492 } 493 OSThread* osthread = create_os_thread(thread, thread_h, 494 (int)current_thread_id()); 495 if (osthread == NULL) { 496 return false; 497 } 498 499 // Initial thread state is RUNNABLE 500 osthread->set_state(RUNNABLE); 501 502 thread->set_osthread(osthread); 503 return true; 504 } 505 506 bool os::create_main_thread(JavaThread* thread) { 507 #ifdef ASSERT 508 thread->verify_not_published(); 509 #endif 510 if (_starting_thread == NULL) { 511 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 512 if (_starting_thread == NULL) { 513 return false; 514 } 515 } 516 517 // The primordial thread is runnable from the start) 518 _starting_thread->set_state(RUNNABLE); 519 520 thread->set_osthread(_starting_thread); 521 return true; 522 } 523 524 // Allocate and initialize a new OSThread 525 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 526 unsigned thread_id; 527 528 // Allocate the OSThread object 529 OSThread* osthread = new OSThread(NULL, NULL); 530 if (osthread == NULL) { 531 return false; 532 } 533 534 // Initialize support for Java interrupts 535 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 536 if (interrupt_event == NULL) { 537 delete osthread; 538 return NULL; 539 } 540 osthread->set_interrupt_event(interrupt_event); 541 osthread->set_interrupted(false); 542 543 thread->set_osthread(osthread); 544 545 if (stack_size == 0) { 546 switch (thr_type) { 547 case os::java_thread: 548 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 549 if (JavaThread::stack_size_at_create() > 0) 550 stack_size = JavaThread::stack_size_at_create(); 551 break; 552 case os::compiler_thread: 553 if (CompilerThreadStackSize > 0) { 554 stack_size = (size_t)(CompilerThreadStackSize * K); 555 break; 556 } // else fall through: 557 // use VMThreadStackSize if CompilerThreadStackSize is not defined 558 case os::vm_thread: 559 case os::pgc_thread: 560 case os::cgc_thread: 561 case os::watcher_thread: 562 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 563 break; 564 } 565 } 566 567 // Create the Win32 thread 568 // 569 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 570 // does not specify stack size. Instead, it specifies the size of 571 // initially committed space. The stack size is determined by 572 // PE header in the executable. If the committed "stack_size" is larger 573 // than default value in the PE header, the stack is rounded up to the 574 // nearest multiple of 1MB. For example if the launcher has default 575 // stack size of 320k, specifying any size less than 320k does not 576 // affect the actual stack size at all, it only affects the initial 577 // commitment. On the other hand, specifying 'stack_size' larger than 578 // default value may cause significant increase in memory usage, because 579 // not only the stack space will be rounded up to MB, but also the 580 // entire space is committed upfront. 581 // 582 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 583 // for CreateThread() that can treat 'stack_size' as stack size. However we 584 // are not supposed to call CreateThread() directly according to MSDN 585 // document because JVM uses C runtime library. The good news is that the 586 // flag appears to work with _beginthredex() as well. 587 588 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 589 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 590 #endif 591 592 HANDLE thread_handle = 593 (HANDLE)_beginthreadex(NULL, 594 (unsigned)stack_size, 595 (unsigned (__stdcall *)(void*)) java_start, 596 thread, 597 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 598 &thread_id); 599 if (thread_handle == NULL) { 600 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 601 // without the flag. 602 thread_handle = 603 (HANDLE)_beginthreadex(NULL, 604 (unsigned)stack_size, 605 (unsigned (__stdcall *)(void*)) java_start, 606 thread, 607 CREATE_SUSPENDED, 608 &thread_id); 609 } 610 if (thread_handle == NULL) { 611 // Need to clean up stuff we've allocated so far 612 CloseHandle(osthread->interrupt_event()); 613 thread->set_osthread(NULL); 614 delete osthread; 615 return NULL; 616 } 617 618 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 619 620 // Store info on the Win32 thread into the OSThread 621 osthread->set_thread_handle(thread_handle); 622 osthread->set_thread_id(thread_id); 623 624 // Initial thread state is INITIALIZED, not SUSPENDED 625 osthread->set_state(INITIALIZED); 626 627 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 628 return true; 629 } 630 631 632 // Free Win32 resources related to the OSThread 633 void os::free_thread(OSThread* osthread) { 634 assert(osthread != NULL, "osthread not set"); 635 CloseHandle(osthread->thread_handle()); 636 CloseHandle(osthread->interrupt_event()); 637 delete osthread; 638 } 639 640 static jlong first_filetime; 641 static jlong initial_performance_count; 642 static jlong performance_frequency; 643 644 645 jlong as_long(LARGE_INTEGER x) { 646 jlong result = 0; // initialization to avoid warning 647 set_high(&result, x.HighPart); 648 set_low(&result, x.LowPart); 649 return result; 650 } 651 652 653 jlong os::elapsed_counter() { 654 LARGE_INTEGER count; 655 if (win32::_has_performance_count) { 656 QueryPerformanceCounter(&count); 657 return as_long(count) - initial_performance_count; 658 } else { 659 FILETIME wt; 660 GetSystemTimeAsFileTime(&wt); 661 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 662 } 663 } 664 665 666 jlong os::elapsed_frequency() { 667 if (win32::_has_performance_count) { 668 return performance_frequency; 669 } else { 670 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 671 return 10000000; 672 } 673 } 674 675 676 julong os::available_memory() { 677 return win32::available_memory(); 678 } 679 680 julong os::win32::available_memory() { 681 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 682 // value if total memory is larger than 4GB 683 MEMORYSTATUSEX ms; 684 ms.dwLength = sizeof(ms); 685 GlobalMemoryStatusEx(&ms); 686 687 return (julong)ms.ullAvailPhys; 688 } 689 690 julong os::physical_memory() { 691 return win32::physical_memory(); 692 } 693 694 bool os::has_allocatable_memory_limit(julong* limit) { 695 MEMORYSTATUSEX ms; 696 ms.dwLength = sizeof(ms); 697 GlobalMemoryStatusEx(&ms); 698 #ifdef _LP64 699 *limit = (julong)ms.ullAvailVirtual; 700 return true; 701 #else 702 // Limit to 1400m because of the 2gb address space wall 703 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 704 return true; 705 #endif 706 } 707 708 // VC6 lacks DWORD_PTR 709 #if _MSC_VER < 1300 710 typedef UINT_PTR DWORD_PTR; 711 #endif 712 713 int os::active_processor_count() { 714 DWORD_PTR lpProcessAffinityMask = 0; 715 DWORD_PTR lpSystemAffinityMask = 0; 716 int proc_count = processor_count(); 717 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 718 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 719 // Nof active processors is number of bits in process affinity mask 720 int bitcount = 0; 721 while (lpProcessAffinityMask != 0) { 722 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 723 bitcount++; 724 } 725 return bitcount; 726 } else { 727 return proc_count; 728 } 729 } 730 731 void os::set_native_thread_name(const char *name) { 732 // Not yet implemented. 733 return; 734 } 735 736 bool os::distribute_processes(uint length, uint* distribution) { 737 // Not yet implemented. 738 return false; 739 } 740 741 bool os::bind_to_processor(uint processor_id) { 742 // Not yet implemented. 743 return false; 744 } 745 746 void os::win32::initialize_performance_counter() { 747 LARGE_INTEGER count; 748 if (QueryPerformanceFrequency(&count)) { 749 win32::_has_performance_count = 1; 750 performance_frequency = as_long(count); 751 QueryPerformanceCounter(&count); 752 initial_performance_count = as_long(count); 753 } else { 754 win32::_has_performance_count = 0; 755 FILETIME wt; 756 GetSystemTimeAsFileTime(&wt); 757 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 758 } 759 } 760 761 762 double os::elapsedTime() { 763 return (double) elapsed_counter() / (double) elapsed_frequency(); 764 } 765 766 767 // Windows format: 768 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 769 // Java format: 770 // Java standards require the number of milliseconds since 1/1/1970 771 772 // Constant offset - calculated using offset() 773 static jlong _offset = 116444736000000000; 774 // Fake time counter for reproducible results when debugging 775 static jlong fake_time = 0; 776 777 #ifdef ASSERT 778 // Just to be safe, recalculate the offset in debug mode 779 static jlong _calculated_offset = 0; 780 static int _has_calculated_offset = 0; 781 782 jlong offset() { 783 if (_has_calculated_offset) return _calculated_offset; 784 SYSTEMTIME java_origin; 785 java_origin.wYear = 1970; 786 java_origin.wMonth = 1; 787 java_origin.wDayOfWeek = 0; // ignored 788 java_origin.wDay = 1; 789 java_origin.wHour = 0; 790 java_origin.wMinute = 0; 791 java_origin.wSecond = 0; 792 java_origin.wMilliseconds = 0; 793 FILETIME jot; 794 if (!SystemTimeToFileTime(&java_origin, &jot)) { 795 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 796 } 797 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 798 _has_calculated_offset = 1; 799 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 800 return _calculated_offset; 801 } 802 #else 803 jlong offset() { 804 return _offset; 805 } 806 #endif 807 808 jlong windows_to_java_time(FILETIME wt) { 809 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 810 return (a - offset()) / 10000; 811 } 812 813 FILETIME java_to_windows_time(jlong l) { 814 jlong a = (l * 10000) + offset(); 815 FILETIME result; 816 result.dwHighDateTime = high(a); 817 result.dwLowDateTime = low(a); 818 return result; 819 } 820 821 bool os::supports_vtime() { return true; } 822 bool os::enable_vtime() { return false; } 823 bool os::vtime_enabled() { return false; } 824 825 double os::elapsedVTime() { 826 FILETIME created; 827 FILETIME exited; 828 FILETIME kernel; 829 FILETIME user; 830 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 831 // the resolution of windows_to_java_time() should be sufficient (ms) 832 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 833 } else { 834 return elapsedTime(); 835 } 836 } 837 838 jlong os::javaTimeMillis() { 839 if (UseFakeTimers) { 840 return fake_time++; 841 } else { 842 FILETIME wt; 843 GetSystemTimeAsFileTime(&wt); 844 return windows_to_java_time(wt); 845 } 846 } 847 848 jlong os::javaTimeNanos() { 849 if (!win32::_has_performance_count) { 850 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 851 } else { 852 LARGE_INTEGER current_count; 853 QueryPerformanceCounter(¤t_count); 854 double current = as_long(current_count); 855 double freq = performance_frequency; 856 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 857 return time; 858 } 859 } 860 861 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 862 if (!win32::_has_performance_count) { 863 // javaTimeMillis() doesn't have much percision, 864 // but it is not going to wrap -- so all 64 bits 865 info_ptr->max_value = ALL_64_BITS; 866 867 // this is a wall clock timer, so may skip 868 info_ptr->may_skip_backward = true; 869 info_ptr->may_skip_forward = true; 870 } else { 871 jlong freq = performance_frequency; 872 if (freq < NANOSECS_PER_SEC) { 873 // the performance counter is 64 bits and we will 874 // be multiplying it -- so no wrap in 64 bits 875 info_ptr->max_value = ALL_64_BITS; 876 } else if (freq > NANOSECS_PER_SEC) { 877 // use the max value the counter can reach to 878 // determine the max value which could be returned 879 julong max_counter = (julong)ALL_64_BITS; 880 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 881 } else { 882 // the performance counter is 64 bits and we will 883 // be using it directly -- so no wrap in 64 bits 884 info_ptr->max_value = ALL_64_BITS; 885 } 886 887 // using a counter, so no skipping 888 info_ptr->may_skip_backward = false; 889 info_ptr->may_skip_forward = false; 890 } 891 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 892 } 893 894 char* os::local_time_string(char *buf, size_t buflen) { 895 SYSTEMTIME st; 896 GetLocalTime(&st); 897 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 898 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 899 return buf; 900 } 901 902 bool os::getTimesSecs(double* process_real_time, 903 double* process_user_time, 904 double* process_system_time) { 905 HANDLE h_process = GetCurrentProcess(); 906 FILETIME create_time, exit_time, kernel_time, user_time; 907 BOOL result = GetProcessTimes(h_process, 908 &create_time, 909 &exit_time, 910 &kernel_time, 911 &user_time); 912 if (result != 0) { 913 FILETIME wt; 914 GetSystemTimeAsFileTime(&wt); 915 jlong rtc_millis = windows_to_java_time(wt); 916 jlong user_millis = windows_to_java_time(user_time); 917 jlong system_millis = windows_to_java_time(kernel_time); 918 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 919 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 920 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 921 return true; 922 } else { 923 return false; 924 } 925 } 926 927 void os::shutdown() { 928 929 // allow PerfMemory to attempt cleanup of any persistent resources 930 perfMemory_exit(); 931 932 // flush buffered output, finish log files 933 ostream_abort(); 934 935 // Check for abort hook 936 abort_hook_t abort_hook = Arguments::abort_hook(); 937 if (abort_hook != NULL) { 938 abort_hook(); 939 } 940 } 941 942 943 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 944 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 945 946 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 947 HINSTANCE dbghelp; 948 EXCEPTION_POINTERS ep; 949 MINIDUMP_EXCEPTION_INFORMATION mei; 950 MINIDUMP_EXCEPTION_INFORMATION* pmei; 951 952 HANDLE hProcess = GetCurrentProcess(); 953 DWORD processId = GetCurrentProcessId(); 954 HANDLE dumpFile; 955 MINIDUMP_TYPE dumpType; 956 static const char* cwd; 957 958 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 959 #ifndef ASSERT 960 // If running on a client version of Windows and user has not explicitly enabled dumping 961 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 962 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 963 return; 964 // If running on a server version of Windows and user has explictly disabled dumping 965 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 966 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 967 return; 968 } 969 #else 970 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 971 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 972 return; 973 } 974 #endif 975 976 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 977 978 if (dbghelp == NULL) { 979 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 980 return; 981 } 982 983 _MiniDumpWriteDump = CAST_TO_FN_PTR( 984 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 985 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 986 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 987 988 if (_MiniDumpWriteDump == NULL) { 989 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 990 return; 991 } 992 993 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 994 995 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 996 // API_VERSION_NUMBER 11 or higher contains the ones we want though 997 #if API_VERSION_NUMBER >= 11 998 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 999 MiniDumpWithUnloadedModules); 1000 #endif 1001 1002 cwd = get_current_directory(NULL, 0); 1003 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id()); 1004 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1005 1006 if (dumpFile == INVALID_HANDLE_VALUE) { 1007 VMError::report_coredump_status("Failed to create file for dumping", false); 1008 return; 1009 } 1010 if (exceptionRecord != NULL && contextRecord != NULL) { 1011 ep.ContextRecord = (PCONTEXT) contextRecord; 1012 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1013 1014 mei.ThreadId = GetCurrentThreadId(); 1015 mei.ExceptionPointers = &ep; 1016 pmei = &mei; 1017 } else { 1018 pmei = NULL; 1019 } 1020 1021 1022 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1023 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1024 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1025 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1026 DWORD error = GetLastError(); 1027 LPTSTR msgbuf = NULL; 1028 1029 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1030 FORMAT_MESSAGE_FROM_SYSTEM | 1031 FORMAT_MESSAGE_IGNORE_INSERTS, 1032 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1033 1034 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1035 LocalFree(msgbuf); 1036 } else { 1037 // Call to FormatMessage failed, just include the result from GetLastError 1038 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1039 } 1040 VMError::report_coredump_status(buffer, false); 1041 } else { 1042 VMError::report_coredump_status(buffer, true); 1043 } 1044 1045 CloseHandle(dumpFile); 1046 } 1047 1048 1049 1050 void os::abort(bool dump_core) 1051 { 1052 os::shutdown(); 1053 // no core dump on Windows 1054 ::exit(1); 1055 } 1056 1057 // Die immediately, no exit hook, no abort hook, no cleanup. 1058 void os::die() { 1059 _exit(-1); 1060 } 1061 1062 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1063 // * dirent_md.c 1.15 00/02/02 1064 // 1065 // The declarations for DIR and struct dirent are in jvm_win32.h. 1066 1067 /* Caller must have already run dirname through JVM_NativePath, which removes 1068 duplicate slashes and converts all instances of '/' into '\\'. */ 1069 1070 DIR * 1071 os::opendir(const char *dirname) 1072 { 1073 assert(dirname != NULL, "just checking"); // hotspot change 1074 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1075 DWORD fattr; // hotspot change 1076 char alt_dirname[4] = { 0, 0, 0, 0 }; 1077 1078 if (dirp == 0) { 1079 errno = ENOMEM; 1080 return 0; 1081 } 1082 1083 /* 1084 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1085 * as a directory in FindFirstFile(). We detect this case here and 1086 * prepend the current drive name. 1087 */ 1088 if (dirname[1] == '\0' && dirname[0] == '\\') { 1089 alt_dirname[0] = _getdrive() + 'A' - 1; 1090 alt_dirname[1] = ':'; 1091 alt_dirname[2] = '\\'; 1092 alt_dirname[3] = '\0'; 1093 dirname = alt_dirname; 1094 } 1095 1096 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1097 if (dirp->path == 0) { 1098 free(dirp, mtInternal); 1099 errno = ENOMEM; 1100 return 0; 1101 } 1102 strcpy(dirp->path, dirname); 1103 1104 fattr = GetFileAttributes(dirp->path); 1105 if (fattr == 0xffffffff) { 1106 free(dirp->path, mtInternal); 1107 free(dirp, mtInternal); 1108 errno = ENOENT; 1109 return 0; 1110 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1111 free(dirp->path, mtInternal); 1112 free(dirp, mtInternal); 1113 errno = ENOTDIR; 1114 return 0; 1115 } 1116 1117 /* Append "*.*", or possibly "\\*.*", to path */ 1118 if (dirp->path[1] == ':' 1119 && (dirp->path[2] == '\0' 1120 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1121 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1122 strcat(dirp->path, "*.*"); 1123 } else { 1124 strcat(dirp->path, "\\*.*"); 1125 } 1126 1127 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1128 if (dirp->handle == INVALID_HANDLE_VALUE) { 1129 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1130 free(dirp->path, mtInternal); 1131 free(dirp, mtInternal); 1132 errno = EACCES; 1133 return 0; 1134 } 1135 } 1136 return dirp; 1137 } 1138 1139 /* parameter dbuf unused on Windows */ 1140 1141 struct dirent * 1142 os::readdir(DIR *dirp, dirent *dbuf) 1143 { 1144 assert(dirp != NULL, "just checking"); // hotspot change 1145 if (dirp->handle == INVALID_HANDLE_VALUE) { 1146 return 0; 1147 } 1148 1149 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1150 1151 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1152 if (GetLastError() == ERROR_INVALID_HANDLE) { 1153 errno = EBADF; 1154 return 0; 1155 } 1156 FindClose(dirp->handle); 1157 dirp->handle = INVALID_HANDLE_VALUE; 1158 } 1159 1160 return &dirp->dirent; 1161 } 1162 1163 int 1164 os::closedir(DIR *dirp) 1165 { 1166 assert(dirp != NULL, "just checking"); // hotspot change 1167 if (dirp->handle != INVALID_HANDLE_VALUE) { 1168 if (!FindClose(dirp->handle)) { 1169 errno = EBADF; 1170 return -1; 1171 } 1172 dirp->handle = INVALID_HANDLE_VALUE; 1173 } 1174 free(dirp->path, mtInternal); 1175 free(dirp, mtInternal); 1176 return 0; 1177 } 1178 1179 // This must be hard coded because it's the system's temporary 1180 // directory not the java application's temp directory, ala java.io.tmpdir. 1181 const char* os::get_temp_directory() { 1182 static char path_buf[MAX_PATH]; 1183 if (GetTempPath(MAX_PATH, path_buf)>0) 1184 return path_buf; 1185 else{ 1186 path_buf[0]='\0'; 1187 return path_buf; 1188 } 1189 } 1190 1191 static bool file_exists(const char* filename) { 1192 if (filename == NULL || strlen(filename) == 0) { 1193 return false; 1194 } 1195 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1196 } 1197 1198 bool os::dll_build_name(char *buffer, size_t buflen, 1199 const char* pname, const char* fname) { 1200 bool retval = false; 1201 const size_t pnamelen = pname ? strlen(pname) : 0; 1202 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1203 1204 // Return error on buffer overflow. 1205 if (pnamelen + strlen(fname) + 10 > buflen) { 1206 return retval; 1207 } 1208 1209 if (pnamelen == 0) { 1210 jio_snprintf(buffer, buflen, "%s.dll", fname); 1211 retval = true; 1212 } else if (c == ':' || c == '\\') { 1213 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1214 retval = true; 1215 } else if (strchr(pname, *os::path_separator()) != NULL) { 1216 int n; 1217 char** pelements = split_path(pname, &n); 1218 if (pelements == NULL) { 1219 return false; 1220 } 1221 for (int i = 0; i < n; i++) { 1222 char* path = pelements[i]; 1223 // Really shouldn't be NULL, but check can't hurt 1224 size_t plen = (path == NULL) ? 0 : strlen(path); 1225 if (plen == 0) { 1226 continue; // skip the empty path values 1227 } 1228 const char lastchar = path[plen - 1]; 1229 if (lastchar == ':' || lastchar == '\\') { 1230 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1231 } else { 1232 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1233 } 1234 if (file_exists(buffer)) { 1235 retval = true; 1236 break; 1237 } 1238 } 1239 // release the storage 1240 for (int i = 0; i < n; i++) { 1241 if (pelements[i] != NULL) { 1242 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1243 } 1244 } 1245 if (pelements != NULL) { 1246 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1247 } 1248 } else { 1249 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1250 retval = true; 1251 } 1252 return retval; 1253 } 1254 1255 // Needs to be in os specific directory because windows requires another 1256 // header file <direct.h> 1257 const char* os::get_current_directory(char *buf, size_t buflen) { 1258 int n = static_cast<int>(buflen); 1259 if (buflen > INT_MAX) n = INT_MAX; 1260 return _getcwd(buf, n); 1261 } 1262 1263 //----------------------------------------------------------- 1264 // Helper functions for fatal error handler 1265 #ifdef _WIN64 1266 // Helper routine which returns true if address in 1267 // within the NTDLL address space. 1268 // 1269 static bool _addr_in_ntdll( address addr ) 1270 { 1271 HMODULE hmod; 1272 MODULEINFO minfo; 1273 1274 hmod = GetModuleHandle("NTDLL.DLL"); 1275 if (hmod == NULL) return false; 1276 if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1277 &minfo, sizeof(MODULEINFO)) ) 1278 return false; 1279 1280 if ((addr >= minfo.lpBaseOfDll) && 1281 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1282 return true; 1283 else 1284 return false; 1285 } 1286 #endif 1287 1288 1289 // Enumerate all modules for a given process ID 1290 // 1291 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1292 // different API for doing this. We use PSAPI.DLL on NT based 1293 // Windows and ToolHelp on 95/98/Me. 1294 1295 // Callback function that is called by enumerate_modules() on 1296 // every DLL module. 1297 // Input parameters: 1298 // int pid, 1299 // char* module_file_name, 1300 // address module_base_addr, 1301 // unsigned module_size, 1302 // void* param 1303 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1304 1305 // enumerate_modules for Windows NT, using PSAPI 1306 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1307 { 1308 HANDLE hProcess; 1309 1310 # define MAX_NUM_MODULES 128 1311 HMODULE modules[MAX_NUM_MODULES]; 1312 static char filename[MAX_PATH]; 1313 int result = 0; 1314 1315 if (!os::PSApiDll::PSApiAvailable()) { 1316 return 0; 1317 } 1318 1319 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1320 FALSE, pid); 1321 if (hProcess == NULL) return 0; 1322 1323 DWORD size_needed; 1324 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1325 sizeof(modules), &size_needed)) { 1326 CloseHandle(hProcess); 1327 return 0; 1328 } 1329 1330 // number of modules that are currently loaded 1331 int num_modules = size_needed / sizeof(HMODULE); 1332 1333 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1334 // Get Full pathname: 1335 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1336 filename, sizeof(filename))) { 1337 filename[0] = '\0'; 1338 } 1339 1340 MODULEINFO modinfo; 1341 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1342 &modinfo, sizeof(modinfo))) { 1343 modinfo.lpBaseOfDll = NULL; 1344 modinfo.SizeOfImage = 0; 1345 } 1346 1347 // Invoke callback function 1348 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1349 modinfo.SizeOfImage, param); 1350 if (result) break; 1351 } 1352 1353 CloseHandle(hProcess); 1354 return result; 1355 } 1356 1357 1358 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1359 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1360 { 1361 HANDLE hSnapShot; 1362 static MODULEENTRY32 modentry; 1363 int result = 0; 1364 1365 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1366 return 0; 1367 } 1368 1369 // Get a handle to a Toolhelp snapshot of the system 1370 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid); 1371 if (hSnapShot == INVALID_HANDLE_VALUE) { 1372 return FALSE; 1373 } 1374 1375 // iterate through all modules 1376 modentry.dwSize = sizeof(MODULEENTRY32); 1377 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1378 1379 while (not_done) { 1380 // invoke the callback 1381 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1382 modentry.modBaseSize, param); 1383 if (result) break; 1384 1385 modentry.dwSize = sizeof(MODULEENTRY32); 1386 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1387 } 1388 1389 CloseHandle(hSnapShot); 1390 return result; 1391 } 1392 1393 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1394 { 1395 // Get current process ID if caller doesn't provide it. 1396 if (!pid) pid = os::current_process_id(); 1397 1398 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1399 else return _enumerate_modules_windows(pid, func, param); 1400 } 1401 1402 struct _modinfo { 1403 address addr; 1404 char* full_path; // point to a char buffer 1405 int buflen; // size of the buffer 1406 address base_addr; 1407 }; 1408 1409 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1410 unsigned size, void * param) { 1411 struct _modinfo *pmod = (struct _modinfo *)param; 1412 if (!pmod) return -1; 1413 1414 if (base_addr <= pmod->addr && 1415 base_addr+size > pmod->addr) { 1416 // if a buffer is provided, copy path name to the buffer 1417 if (pmod->full_path) { 1418 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1419 } 1420 pmod->base_addr = base_addr; 1421 return 1; 1422 } 1423 return 0; 1424 } 1425 1426 bool os::dll_address_to_library_name(address addr, char* buf, 1427 int buflen, int* offset) { 1428 // buf is not optional, but offset is optional 1429 assert(buf != NULL, "sanity check"); 1430 1431 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1432 // return the full path to the DLL file, sometimes it returns path 1433 // to the corresponding PDB file (debug info); sometimes it only 1434 // returns partial path, which makes life painful. 1435 1436 struct _modinfo mi; 1437 mi.addr = addr; 1438 mi.full_path = buf; 1439 mi.buflen = buflen; 1440 int pid = os::current_process_id(); 1441 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1442 // buf already contains path name 1443 if (offset) *offset = addr - mi.base_addr; 1444 return true; 1445 } 1446 1447 buf[0] = '\0'; 1448 if (offset) *offset = -1; 1449 return false; 1450 } 1451 1452 bool os::dll_address_to_function_name(address addr, char *buf, 1453 int buflen, int *offset) { 1454 // buf is not optional, but offset is optional 1455 assert(buf != NULL, "sanity check"); 1456 1457 if (Decoder::decode(addr, buf, buflen, offset)) { 1458 return true; 1459 } 1460 if (offset != NULL) *offset = -1; 1461 buf[0] = '\0'; 1462 return false; 1463 } 1464 1465 // save the start and end address of jvm.dll into param[0] and param[1] 1466 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1467 unsigned size, void * param) { 1468 if (!param) return -1; 1469 1470 if (base_addr <= (address)_locate_jvm_dll && 1471 base_addr+size > (address)_locate_jvm_dll) { 1472 ((address*)param)[0] = base_addr; 1473 ((address*)param)[1] = base_addr + size; 1474 return 1; 1475 } 1476 return 0; 1477 } 1478 1479 address vm_lib_location[2]; // start and end address of jvm.dll 1480 1481 // check if addr is inside jvm.dll 1482 bool os::address_is_in_vm(address addr) { 1483 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1484 int pid = os::current_process_id(); 1485 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1486 assert(false, "Can't find jvm module."); 1487 return false; 1488 } 1489 } 1490 1491 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1492 } 1493 1494 // print module info; param is outputStream* 1495 static int _print_module(int pid, char* fname, address base, 1496 unsigned size, void* param) { 1497 if (!param) return -1; 1498 1499 outputStream* st = (outputStream*)param; 1500 1501 address end_addr = base + size; 1502 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1503 return 0; 1504 } 1505 1506 // Loads .dll/.so and 1507 // in case of error it checks if .dll/.so was built for the 1508 // same architecture as Hotspot is running on 1509 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1510 { 1511 void * result = LoadLibrary(name); 1512 if (result != NULL) 1513 { 1514 return result; 1515 } 1516 1517 DWORD errcode = GetLastError(); 1518 if (errcode == ERROR_MOD_NOT_FOUND) { 1519 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1520 ebuf[ebuflen-1]='\0'; 1521 return NULL; 1522 } 1523 1524 // Parsing dll below 1525 // If we can read dll-info and find that dll was built 1526 // for an architecture other than Hotspot is running in 1527 // - then print to buffer "DLL was built for a different architecture" 1528 // else call os::lasterror to obtain system error message 1529 1530 // Read system error message into ebuf 1531 // It may or may not be overwritten below (in the for loop and just above) 1532 lasterror(ebuf, (size_t) ebuflen); 1533 ebuf[ebuflen-1]='\0'; 1534 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1535 if (file_descriptor<0) 1536 { 1537 return NULL; 1538 } 1539 1540 uint32_t signature_offset; 1541 uint16_t lib_arch=0; 1542 bool failed_to_get_lib_arch= 1543 ( 1544 //Go to position 3c in the dll 1545 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1546 || 1547 // Read loacation of signature 1548 (sizeof(signature_offset)!= 1549 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1550 || 1551 //Go to COFF File Header in dll 1552 //that is located after"signature" (4 bytes long) 1553 (os::seek_to_file_offset(file_descriptor, 1554 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1555 || 1556 //Read field that contains code of architecture 1557 // that dll was build for 1558 (sizeof(lib_arch)!= 1559 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1560 ); 1561 1562 ::close(file_descriptor); 1563 if (failed_to_get_lib_arch) 1564 { 1565 // file i/o error - report os::lasterror(...) msg 1566 return NULL; 1567 } 1568 1569 typedef struct 1570 { 1571 uint16_t arch_code; 1572 char* arch_name; 1573 } arch_t; 1574 1575 static const arch_t arch_array[]={ 1576 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1577 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1578 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1579 }; 1580 #if (defined _M_IA64) 1581 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1582 #elif (defined _M_AMD64) 1583 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1584 #elif (defined _M_IX86) 1585 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1586 #else 1587 #error Method os::dll_load requires that one of following \ 1588 is defined :_M_IA64,_M_AMD64 or _M_IX86 1589 #endif 1590 1591 1592 // Obtain a string for printf operation 1593 // lib_arch_str shall contain string what platform this .dll was built for 1594 // running_arch_str shall string contain what platform Hotspot was built for 1595 char *running_arch_str=NULL,*lib_arch_str=NULL; 1596 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1597 { 1598 if (lib_arch==arch_array[i].arch_code) 1599 lib_arch_str=arch_array[i].arch_name; 1600 if (running_arch==arch_array[i].arch_code) 1601 running_arch_str=arch_array[i].arch_name; 1602 } 1603 1604 assert(running_arch_str, 1605 "Didn't find runing architecture code in arch_array"); 1606 1607 // If the architure is right 1608 // but some other error took place - report os::lasterror(...) msg 1609 if (lib_arch == running_arch) 1610 { 1611 return NULL; 1612 } 1613 1614 if (lib_arch_str!=NULL) 1615 { 1616 ::_snprintf(ebuf, ebuflen-1, 1617 "Can't load %s-bit .dll on a %s-bit platform", 1618 lib_arch_str,running_arch_str); 1619 } 1620 else 1621 { 1622 // don't know what architecture this dll was build for 1623 ::_snprintf(ebuf, ebuflen-1, 1624 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1625 lib_arch,running_arch_str); 1626 } 1627 1628 return NULL; 1629 } 1630 1631 1632 void os::print_dll_info(outputStream *st) { 1633 int pid = os::current_process_id(); 1634 st->print_cr("Dynamic libraries:"); 1635 enumerate_modules(pid, _print_module, (void *)st); 1636 } 1637 1638 void os::print_os_info_brief(outputStream* st) { 1639 os::print_os_info(st); 1640 } 1641 1642 void os::print_os_info(outputStream* st) { 1643 st->print("OS:"); 1644 1645 os::win32::print_windows_version(st); 1646 } 1647 1648 void os::win32::print_windows_version(outputStream* st) { 1649 OSVERSIONINFOEX osvi; 1650 SYSTEM_INFO si; 1651 1652 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1653 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1654 1655 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1656 st->print_cr("N/A"); 1657 return; 1658 } 1659 1660 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; 1661 1662 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1663 if (os_vers >= 5002) { 1664 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1665 // find out whether we are running on 64 bit processor or not. 1666 if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) { 1667 os::Kernel32Dll::GetNativeSystemInfo(&si); 1668 } else { 1669 GetSystemInfo(&si); 1670 } 1671 } 1672 1673 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { 1674 switch (os_vers) { 1675 case 3051: st->print(" Windows NT 3.51"); break; 1676 case 4000: st->print(" Windows NT 4.0"); break; 1677 case 5000: st->print(" Windows 2000"); break; 1678 case 5001: st->print(" Windows XP"); break; 1679 case 5002: 1680 if (osvi.wProductType == VER_NT_WORKSTATION && 1681 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1682 st->print(" Windows XP x64 Edition"); 1683 } else { 1684 st->print(" Windows Server 2003 family"); 1685 } 1686 break; 1687 1688 case 6000: 1689 if (osvi.wProductType == VER_NT_WORKSTATION) { 1690 st->print(" Windows Vista"); 1691 } else { 1692 st->print(" Windows Server 2008"); 1693 } 1694 break; 1695 1696 case 6001: 1697 if (osvi.wProductType == VER_NT_WORKSTATION) { 1698 st->print(" Windows 7"); 1699 } else { 1700 st->print(" Windows Server 2008 R2"); 1701 } 1702 break; 1703 1704 case 6002: 1705 if (osvi.wProductType == VER_NT_WORKSTATION) { 1706 st->print(" Windows 8"); 1707 } else { 1708 st->print(" Windows Server 2012"); 1709 } 1710 break; 1711 1712 case 6003: 1713 if (osvi.wProductType == VER_NT_WORKSTATION) { 1714 st->print(" Windows 8.1"); 1715 } else { 1716 st->print(" Windows Server 2012 R2"); 1717 } 1718 break; 1719 1720 default: // future os 1721 // Unrecognized windows, print out its major and minor versions 1722 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1723 } 1724 } else { 1725 switch (os_vers) { 1726 case 4000: st->print(" Windows 95"); break; 1727 case 4010: st->print(" Windows 98"); break; 1728 case 4090: st->print(" Windows Me"); break; 1729 default: // future windows, print out its major and minor versions 1730 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1731 } 1732 } 1733 1734 if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1735 st->print(" , 64 bit"); 1736 } 1737 1738 st->print(" Build %d", osvi.dwBuildNumber); 1739 st->print(" %s", osvi.szCSDVersion); // service pack 1740 st->cr(); 1741 } 1742 1743 void os::pd_print_cpu_info(outputStream* st) { 1744 // Nothing to do for now. 1745 } 1746 1747 void os::print_memory_info(outputStream* st) { 1748 st->print("Memory:"); 1749 st->print(" %dk page", os::vm_page_size()>>10); 1750 1751 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1752 // value if total memory is larger than 4GB 1753 MEMORYSTATUSEX ms; 1754 ms.dwLength = sizeof(ms); 1755 GlobalMemoryStatusEx(&ms); 1756 1757 st->print(", physical %uk", os::physical_memory() >> 10); 1758 st->print("(%uk free)", os::available_memory() >> 10); 1759 1760 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1761 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1762 st->cr(); 1763 } 1764 1765 void os::print_siginfo(outputStream *st, void *siginfo) { 1766 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1767 st->print("siginfo:"); 1768 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1769 1770 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1771 er->NumberParameters >= 2) { 1772 switch (er->ExceptionInformation[0]) { 1773 case 0: st->print(", reading address"); break; 1774 case 1: st->print(", writing address"); break; 1775 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1776 er->ExceptionInformation[0]); 1777 } 1778 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1779 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1780 er->NumberParameters >= 2 && UseSharedSpaces) { 1781 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1782 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1783 st->print("\n\nError accessing class data sharing archive." \ 1784 " Mapped file inaccessible during execution, " \ 1785 " possible disk/network problem."); 1786 } 1787 } else { 1788 int num = er->NumberParameters; 1789 if (num > 0) { 1790 st->print(", ExceptionInformation="); 1791 for (int i = 0; i < num; i++) { 1792 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1793 } 1794 } 1795 } 1796 st->cr(); 1797 } 1798 1799 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1800 // do nothing 1801 } 1802 1803 static char saved_jvm_path[MAX_PATH] = {0}; 1804 1805 // Find the full path to the current module, jvm.dll 1806 void os::jvm_path(char *buf, jint buflen) { 1807 // Error checking. 1808 if (buflen < MAX_PATH) { 1809 assert(false, "must use a large-enough buffer"); 1810 buf[0] = '\0'; 1811 return; 1812 } 1813 // Lazy resolve the path to current module. 1814 if (saved_jvm_path[0] != 0) { 1815 strcpy(buf, saved_jvm_path); 1816 return; 1817 } 1818 1819 buf[0] = '\0'; 1820 if (Arguments::sun_java_launcher_is_altjvm()) { 1821 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1822 // for a JAVA_HOME environment variable and fix up the path so it 1823 // looks like jvm.dll is installed there (append a fake suffix 1824 // hotspot/jvm.dll). 1825 char* java_home_var = ::getenv("JAVA_HOME"); 1826 if (java_home_var != NULL && java_home_var[0] != 0) { 1827 strncpy(buf, java_home_var, buflen); 1828 1829 // determine if this is a legacy image or modules image 1830 // modules image doesn't have "jre" subdirectory 1831 size_t len = strlen(buf); 1832 char* jrebin_p = buf + len; 1833 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1834 if (0 != _access(buf, 0)) { 1835 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1836 } 1837 len = strlen(buf); 1838 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1839 } 1840 } 1841 1842 if (buf[0] == '\0') { 1843 GetModuleFileName(vm_lib_handle, buf, buflen); 1844 } 1845 strcpy(saved_jvm_path, buf); 1846 } 1847 1848 1849 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1850 #ifndef _WIN64 1851 st->print("_"); 1852 #endif 1853 } 1854 1855 1856 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1857 #ifndef _WIN64 1858 st->print("@%d", args_size * sizeof(int)); 1859 #endif 1860 } 1861 1862 // This method is a copy of JDK's sysGetLastErrorString 1863 // from src/windows/hpi/src/system_md.c 1864 1865 size_t os::lasterror(char* buf, size_t len) { 1866 DWORD errval; 1867 1868 if ((errval = GetLastError()) != 0) { 1869 // DOS error 1870 size_t n = (size_t)FormatMessage( 1871 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1872 NULL, 1873 errval, 1874 0, 1875 buf, 1876 (DWORD)len, 1877 NULL); 1878 if (n > 3) { 1879 // Drop final '.', CR, LF 1880 if (buf[n - 1] == '\n') n--; 1881 if (buf[n - 1] == '\r') n--; 1882 if (buf[n - 1] == '.') n--; 1883 buf[n] = '\0'; 1884 } 1885 return n; 1886 } 1887 1888 if (errno != 0) { 1889 // C runtime error that has no corresponding DOS error code 1890 const char* s = strerror(errno); 1891 size_t n = strlen(s); 1892 if (n >= len) n = len - 1; 1893 strncpy(buf, s, n); 1894 buf[n] = '\0'; 1895 return n; 1896 } 1897 1898 return 0; 1899 } 1900 1901 int os::get_last_error() { 1902 DWORD error = GetLastError(); 1903 if (error == 0) 1904 error = errno; 1905 return (int)error; 1906 } 1907 1908 // sun.misc.Signal 1909 // NOTE that this is a workaround for an apparent kernel bug where if 1910 // a signal handler for SIGBREAK is installed then that signal handler 1911 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1912 // See bug 4416763. 1913 static void (*sigbreakHandler)(int) = NULL; 1914 1915 static void UserHandler(int sig, void *siginfo, void *context) { 1916 os::signal_notify(sig); 1917 // We need to reinstate the signal handler each time... 1918 os::signal(sig, (void*)UserHandler); 1919 } 1920 1921 void* os::user_handler() { 1922 return (void*) UserHandler; 1923 } 1924 1925 void* os::signal(int signal_number, void* handler) { 1926 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1927 void (*oldHandler)(int) = sigbreakHandler; 1928 sigbreakHandler = (void (*)(int)) handler; 1929 return (void*) oldHandler; 1930 } else { 1931 return (void*)::signal(signal_number, (void (*)(int))handler); 1932 } 1933 } 1934 1935 void os::signal_raise(int signal_number) { 1936 raise(signal_number); 1937 } 1938 1939 // The Win32 C runtime library maps all console control events other than ^C 1940 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1941 // logoff, and shutdown events. We therefore install our own console handler 1942 // that raises SIGTERM for the latter cases. 1943 // 1944 static BOOL WINAPI consoleHandler(DWORD event) { 1945 switch (event) { 1946 case CTRL_C_EVENT: 1947 if (is_error_reported()) { 1948 // Ctrl-C is pressed during error reporting, likely because the error 1949 // handler fails to abort. Let VM die immediately. 1950 os::die(); 1951 } 1952 1953 os::signal_raise(SIGINT); 1954 return TRUE; 1955 break; 1956 case CTRL_BREAK_EVENT: 1957 if (sigbreakHandler != NULL) { 1958 (*sigbreakHandler)(SIGBREAK); 1959 } 1960 return TRUE; 1961 break; 1962 case CTRL_LOGOFF_EVENT: { 1963 // Don't terminate JVM if it is running in a non-interactive session, 1964 // such as a service process. 1965 USEROBJECTFLAGS flags; 1966 HANDLE handle = GetProcessWindowStation(); 1967 if (handle != NULL && 1968 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1969 sizeof(USEROBJECTFLAGS), NULL)) { 1970 // If it is a non-interactive session, let next handler to deal 1971 // with it. 1972 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1973 return FALSE; 1974 } 1975 } 1976 } 1977 case CTRL_CLOSE_EVENT: 1978 case CTRL_SHUTDOWN_EVENT: 1979 os::signal_raise(SIGTERM); 1980 return TRUE; 1981 break; 1982 default: 1983 break; 1984 } 1985 return FALSE; 1986 } 1987 1988 /* 1989 * The following code is moved from os.cpp for making this 1990 * code platform specific, which it is by its very nature. 1991 */ 1992 1993 // Return maximum OS signal used + 1 for internal use only 1994 // Used as exit signal for signal_thread 1995 int os::sigexitnum_pd() { 1996 return NSIG; 1997 } 1998 1999 // a counter for each possible signal value, including signal_thread exit signal 2000 static volatile jint pending_signals[NSIG+1] = { 0 }; 2001 static HANDLE sig_sem = NULL; 2002 2003 void os::signal_init_pd() { 2004 // Initialize signal structures 2005 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2006 2007 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2008 2009 // Programs embedding the VM do not want it to attempt to receive 2010 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2011 // shutdown hooks mechanism introduced in 1.3. For example, when 2012 // the VM is run as part of a Windows NT service (i.e., a servlet 2013 // engine in a web server), the correct behavior is for any console 2014 // control handler to return FALSE, not TRUE, because the OS's 2015 // "final" handler for such events allows the process to continue if 2016 // it is a service (while terminating it if it is not a service). 2017 // To make this behavior uniform and the mechanism simpler, we 2018 // completely disable the VM's usage of these console events if -Xrs 2019 // (=ReduceSignalUsage) is specified. This means, for example, that 2020 // the CTRL-BREAK thread dump mechanism is also disabled in this 2021 // case. See bugs 4323062, 4345157, and related bugs. 2022 2023 if (!ReduceSignalUsage) { 2024 // Add a CTRL-C handler 2025 SetConsoleCtrlHandler(consoleHandler, TRUE); 2026 } 2027 } 2028 2029 void os::signal_notify(int signal_number) { 2030 BOOL ret; 2031 if (sig_sem != NULL) { 2032 Atomic::inc(&pending_signals[signal_number]); 2033 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2034 assert(ret != 0, "ReleaseSemaphore() failed"); 2035 } 2036 } 2037 2038 static int check_pending_signals(bool wait_for_signal) { 2039 DWORD ret; 2040 while (true) { 2041 for (int i = 0; i < NSIG + 1; i++) { 2042 jint n = pending_signals[i]; 2043 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2044 return i; 2045 } 2046 } 2047 if (!wait_for_signal) { 2048 return -1; 2049 } 2050 2051 JavaThread *thread = JavaThread::current(); 2052 2053 ThreadBlockInVM tbivm(thread); 2054 2055 bool threadIsSuspended; 2056 do { 2057 thread->set_suspend_equivalent(); 2058 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2059 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2060 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2061 2062 // were we externally suspended while we were waiting? 2063 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2064 if (threadIsSuspended) { 2065 // 2066 // The semaphore has been incremented, but while we were waiting 2067 // another thread suspended us. We don't want to continue running 2068 // while suspended because that would surprise the thread that 2069 // suspended us. 2070 // 2071 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2072 assert(ret != 0, "ReleaseSemaphore() failed"); 2073 2074 thread->java_suspend_self(); 2075 } 2076 } while (threadIsSuspended); 2077 } 2078 } 2079 2080 int os::signal_lookup() { 2081 return check_pending_signals(false); 2082 } 2083 2084 int os::signal_wait() { 2085 return check_pending_signals(true); 2086 } 2087 2088 // Implicit OS exception handling 2089 2090 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2091 JavaThread* thread = JavaThread::current(); 2092 // Save pc in thread 2093 #ifdef _M_IA64 2094 // Do not blow up if no thread info available. 2095 if (thread) { 2096 // Saving PRECISE pc (with slot information) in thread. 2097 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2098 // Convert precise PC into "Unix" format 2099 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2100 thread->set_saved_exception_pc((address)precise_pc); 2101 } 2102 // Set pc to handler 2103 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2104 // Clear out psr.ri (= Restart Instruction) in order to continue 2105 // at the beginning of the target bundle. 2106 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2107 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2108 #elif _M_AMD64 2109 // Do not blow up if no thread info available. 2110 if (thread) { 2111 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2112 } 2113 // Set pc to handler 2114 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2115 #else 2116 // Do not blow up if no thread info available. 2117 if (thread) { 2118 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2119 } 2120 // Set pc to handler 2121 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2122 #endif 2123 2124 // Continue the execution 2125 return EXCEPTION_CONTINUE_EXECUTION; 2126 } 2127 2128 2129 // Used for PostMortemDump 2130 extern "C" void safepoints(); 2131 extern "C" void find(int x); 2132 extern "C" void events(); 2133 2134 // According to Windows API documentation, an illegal instruction sequence should generate 2135 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2136 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2137 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2138 2139 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2140 2141 // From "Execution Protection in the Windows Operating System" draft 0.35 2142 // Once a system header becomes available, the "real" define should be 2143 // included or copied here. 2144 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2145 2146 // Handle NAT Bit consumption on IA64. 2147 #ifdef _M_IA64 2148 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2149 #endif 2150 2151 // Windows Vista/2008 heap corruption check 2152 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2153 2154 #define def_excpt(val) #val, val 2155 2156 struct siglabel { 2157 char *name; 2158 int number; 2159 }; 2160 2161 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2162 // C++ compiler contain this error code. Because this is a compiler-generated 2163 // error, the code is not listed in the Win32 API header files. 2164 // The code is actually a cryptic mnemonic device, with the initial "E" 2165 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2166 // ASCII values of "msc". 2167 2168 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2169 2170 2171 struct siglabel exceptlabels[] = { 2172 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2173 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2174 def_excpt(EXCEPTION_BREAKPOINT), 2175 def_excpt(EXCEPTION_SINGLE_STEP), 2176 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2177 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2178 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2179 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2180 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2181 def_excpt(EXCEPTION_FLT_OVERFLOW), 2182 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2183 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2184 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2185 def_excpt(EXCEPTION_INT_OVERFLOW), 2186 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2187 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2188 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2189 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2190 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2191 def_excpt(EXCEPTION_STACK_OVERFLOW), 2192 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2193 def_excpt(EXCEPTION_GUARD_PAGE), 2194 def_excpt(EXCEPTION_INVALID_HANDLE), 2195 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2196 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2197 #ifdef _M_IA64 2198 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2199 #endif 2200 NULL, 0 2201 }; 2202 2203 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2204 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2205 if (exceptlabels[i].number == exception_code) { 2206 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2207 return buf; 2208 } 2209 } 2210 2211 return NULL; 2212 } 2213 2214 //----------------------------------------------------------------------------- 2215 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2216 // handle exception caused by idiv; should only happen for -MinInt/-1 2217 // (division by zero is handled explicitly) 2218 #ifdef _M_IA64 2219 assert(0, "Fix Handle_IDiv_Exception"); 2220 #elif _M_AMD64 2221 PCONTEXT ctx = exceptionInfo->ContextRecord; 2222 address pc = (address)ctx->Rip; 2223 assert(pc[0] == 0xF7, "not an idiv opcode"); 2224 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2225 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2226 // set correct result values and continue after idiv instruction 2227 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2228 ctx->Rax = (DWORD)min_jint; // result 2229 ctx->Rdx = (DWORD)0; // remainder 2230 // Continue the execution 2231 #else 2232 PCONTEXT ctx = exceptionInfo->ContextRecord; 2233 address pc = (address)ctx->Eip; 2234 assert(pc[0] == 0xF7, "not an idiv opcode"); 2235 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2236 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2237 // set correct result values and continue after idiv instruction 2238 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2239 ctx->Eax = (DWORD)min_jint; // result 2240 ctx->Edx = (DWORD)0; // remainder 2241 // Continue the execution 2242 #endif 2243 return EXCEPTION_CONTINUE_EXECUTION; 2244 } 2245 2246 //----------------------------------------------------------------------------- 2247 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2248 PCONTEXT ctx = exceptionInfo->ContextRecord; 2249 #ifndef _WIN64 2250 // handle exception caused by native method modifying control word 2251 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2252 2253 switch (exception_code) { 2254 case EXCEPTION_FLT_DENORMAL_OPERAND: 2255 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2256 case EXCEPTION_FLT_INEXACT_RESULT: 2257 case EXCEPTION_FLT_INVALID_OPERATION: 2258 case EXCEPTION_FLT_OVERFLOW: 2259 case EXCEPTION_FLT_STACK_CHECK: 2260 case EXCEPTION_FLT_UNDERFLOW: 2261 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2262 if (fp_control_word != ctx->FloatSave.ControlWord) { 2263 // Restore FPCW and mask out FLT exceptions 2264 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2265 // Mask out pending FLT exceptions 2266 ctx->FloatSave.StatusWord &= 0xffffff00; 2267 return EXCEPTION_CONTINUE_EXECUTION; 2268 } 2269 } 2270 2271 if (prev_uef_handler != NULL) { 2272 // We didn't handle this exception so pass it to the previous 2273 // UnhandledExceptionFilter. 2274 return (prev_uef_handler)(exceptionInfo); 2275 } 2276 #else // !_WIN64 2277 /* 2278 On Windows, the mxcsr control bits are non-volatile across calls 2279 See also CR 6192333 2280 */ 2281 jint MxCsr = INITIAL_MXCSR; 2282 // we can't use StubRoutines::addr_mxcsr_std() 2283 // because in Win64 mxcsr is not saved there 2284 if (MxCsr != ctx->MxCsr) { 2285 ctx->MxCsr = MxCsr; 2286 return EXCEPTION_CONTINUE_EXECUTION; 2287 } 2288 #endif // !_WIN64 2289 2290 return EXCEPTION_CONTINUE_SEARCH; 2291 } 2292 2293 // Fatal error reporting is single threaded so we can make this a 2294 // static and preallocated. If it's more than MAX_PATH silently ignore 2295 // it. 2296 static char saved_error_file[MAX_PATH] = {0}; 2297 2298 void os::set_error_file(const char *logfile) { 2299 if (strlen(logfile) <= MAX_PATH) { 2300 strncpy(saved_error_file, logfile, MAX_PATH); 2301 } 2302 } 2303 2304 static inline void report_error(Thread* t, DWORD exception_code, 2305 address addr, void* siginfo, void* context) { 2306 VMError err(t, exception_code, addr, siginfo, context); 2307 err.report_and_die(); 2308 2309 // If UseOsErrorReporting, this will return here and save the error file 2310 // somewhere where we can find it in the minidump. 2311 } 2312 2313 //----------------------------------------------------------------------------- 2314 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2315 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2316 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2317 #ifdef _M_IA64 2318 // On Itanium, we need the "precise pc", which has the slot number coded 2319 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2320 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2321 // Convert the pc to "Unix format", which has the slot number coded 2322 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2323 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2324 // information is saved in the Unix format. 2325 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2326 #elif _M_AMD64 2327 address pc = (address) exceptionInfo->ContextRecord->Rip; 2328 #else 2329 address pc = (address) exceptionInfo->ContextRecord->Eip; 2330 #endif 2331 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2332 2333 // Handle SafeFetch32 and SafeFetchN exceptions. 2334 if (StubRoutines::is_safefetch_fault(pc)) { 2335 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2336 } 2337 2338 #ifndef _WIN64 2339 // Execution protection violation - win32 running on AMD64 only 2340 // Handled first to avoid misdiagnosis as a "normal" access violation; 2341 // This is safe to do because we have a new/unique ExceptionInformation 2342 // code for this condition. 2343 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2344 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2345 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2346 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2347 2348 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2349 int page_size = os::vm_page_size(); 2350 2351 // Make sure the pc and the faulting address are sane. 2352 // 2353 // If an instruction spans a page boundary, and the page containing 2354 // the beginning of the instruction is executable but the following 2355 // page is not, the pc and the faulting address might be slightly 2356 // different - we still want to unguard the 2nd page in this case. 2357 // 2358 // 15 bytes seems to be a (very) safe value for max instruction size. 2359 bool pc_is_near_addr = 2360 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2361 bool instr_spans_page_boundary = 2362 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2363 (intptr_t) page_size) > 0); 2364 2365 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2366 static volatile address last_addr = 2367 (address) os::non_memory_address_word(); 2368 2369 // In conservative mode, don't unguard unless the address is in the VM 2370 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2371 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2372 2373 // Set memory to RWX and retry 2374 address page_start = 2375 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2376 bool res = os::protect_memory((char*) page_start, page_size, 2377 os::MEM_PROT_RWX); 2378 2379 if (PrintMiscellaneous && Verbose) { 2380 char buf[256]; 2381 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2382 "at " INTPTR_FORMAT 2383 ", unguarding " INTPTR_FORMAT ": %s", addr, 2384 page_start, (res ? "success" : strerror(errno))); 2385 tty->print_raw_cr(buf); 2386 } 2387 2388 // Set last_addr so if we fault again at the same address, we don't 2389 // end up in an endless loop. 2390 // 2391 // There are two potential complications here. Two threads trapping 2392 // at the same address at the same time could cause one of the 2393 // threads to think it already unguarded, and abort the VM. Likely 2394 // very rare. 2395 // 2396 // The other race involves two threads alternately trapping at 2397 // different addresses and failing to unguard the page, resulting in 2398 // an endless loop. This condition is probably even more unlikely 2399 // than the first. 2400 // 2401 // Although both cases could be avoided by using locks or thread 2402 // local last_addr, these solutions are unnecessary complication: 2403 // this handler is a best-effort safety net, not a complete solution. 2404 // It is disabled by default and should only be used as a workaround 2405 // in case we missed any no-execute-unsafe VM code. 2406 2407 last_addr = addr; 2408 2409 return EXCEPTION_CONTINUE_EXECUTION; 2410 } 2411 } 2412 2413 // Last unguard failed or not unguarding 2414 tty->print_raw_cr("Execution protection violation"); 2415 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2416 exceptionInfo->ContextRecord); 2417 return EXCEPTION_CONTINUE_SEARCH; 2418 } 2419 } 2420 #endif // _WIN64 2421 2422 // Check to see if we caught the safepoint code in the 2423 // process of write protecting the memory serialization page. 2424 // It write enables the page immediately after protecting it 2425 // so just return. 2426 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2427 JavaThread* thread = (JavaThread*) t; 2428 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2429 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2430 if (os::is_memory_serialize_page(thread, addr)) { 2431 // Block current thread until the memory serialize page permission restored. 2432 os::block_on_serialize_page_trap(); 2433 return EXCEPTION_CONTINUE_EXECUTION; 2434 } 2435 } 2436 2437 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2438 VM_Version::is_cpuinfo_segv_addr(pc)) { 2439 // Verify that OS save/restore AVX registers. 2440 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2441 } 2442 2443 if (t != NULL && t->is_Java_thread()) { 2444 JavaThread* thread = (JavaThread*) t; 2445 bool in_java = thread->thread_state() == _thread_in_Java; 2446 2447 // Handle potential stack overflows up front. 2448 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2449 if (os::uses_stack_guard_pages()) { 2450 #ifdef _M_IA64 2451 // Use guard page for register stack. 2452 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2453 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2454 // Check for a register stack overflow on Itanium 2455 if (thread->addr_inside_register_stack_red_zone(addr)) { 2456 // Fatal red zone violation happens if the Java program 2457 // catches a StackOverflow error and does so much processing 2458 // that it runs beyond the unprotected yellow guard zone. As 2459 // a result, we are out of here. 2460 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2461 } else if(thread->addr_inside_register_stack(addr)) { 2462 // Disable the yellow zone which sets the state that 2463 // we've got a stack overflow problem. 2464 if (thread->stack_yellow_zone_enabled()) { 2465 thread->disable_stack_yellow_zone(); 2466 } 2467 // Give us some room to process the exception. 2468 thread->disable_register_stack_guard(); 2469 // Tracing with +Verbose. 2470 if (Verbose) { 2471 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2472 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2473 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2474 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2475 thread->register_stack_base(), 2476 thread->register_stack_base() + thread->stack_size()); 2477 } 2478 2479 // Reguard the permanent register stack red zone just to be sure. 2480 // We saw Windows silently disabling this without telling us. 2481 thread->enable_register_stack_red_zone(); 2482 2483 return Handle_Exception(exceptionInfo, 2484 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2485 } 2486 #endif 2487 if (thread->stack_yellow_zone_enabled()) { 2488 // Yellow zone violation. The o/s has unprotected the first yellow 2489 // zone page for us. Note: must call disable_stack_yellow_zone to 2490 // update the enabled status, even if the zone contains only one page. 2491 thread->disable_stack_yellow_zone(); 2492 // If not in java code, return and hope for the best. 2493 return in_java ? Handle_Exception(exceptionInfo, 2494 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2495 : EXCEPTION_CONTINUE_EXECUTION; 2496 } else { 2497 // Fatal red zone violation. 2498 thread->disable_stack_red_zone(); 2499 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2500 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2501 exceptionInfo->ContextRecord); 2502 return EXCEPTION_CONTINUE_SEARCH; 2503 } 2504 } else if (in_java) { 2505 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2506 // a one-time-only guard page, which it has released to us. The next 2507 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2508 return Handle_Exception(exceptionInfo, 2509 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2510 } else { 2511 // Can only return and hope for the best. Further stack growth will 2512 // result in an ACCESS_VIOLATION. 2513 return EXCEPTION_CONTINUE_EXECUTION; 2514 } 2515 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2516 // Either stack overflow or null pointer exception. 2517 if (in_java) { 2518 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2519 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2520 address stack_end = thread->stack_base() - thread->stack_size(); 2521 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2522 // Stack overflow. 2523 assert(!os::uses_stack_guard_pages(), 2524 "should be caught by red zone code above."); 2525 return Handle_Exception(exceptionInfo, 2526 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2527 } 2528 // 2529 // Check for safepoint polling and implicit null 2530 // We only expect null pointers in the stubs (vtable) 2531 // the rest are checked explicitly now. 2532 // 2533 CodeBlob* cb = CodeCache::find_blob(pc); 2534 if (cb != NULL) { 2535 if (os::is_poll_address(addr)) { 2536 address stub = SharedRuntime::get_poll_stub(pc); 2537 return Handle_Exception(exceptionInfo, stub); 2538 } 2539 } 2540 { 2541 #ifdef _WIN64 2542 // 2543 // If it's a legal stack address map the entire region in 2544 // 2545 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2546 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2547 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2548 addr = (address)((uintptr_t)addr & 2549 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2550 os::commit_memory((char *)addr, thread->stack_base() - addr, 2551 !ExecMem); 2552 return EXCEPTION_CONTINUE_EXECUTION; 2553 } 2554 else 2555 #endif 2556 { 2557 // Null pointer exception. 2558 #ifdef _M_IA64 2559 // Process implicit null checks in compiled code. Note: Implicit null checks 2560 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2561 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2562 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2563 // Handle implicit null check in UEP method entry 2564 if (cb && (cb->is_frame_complete_at(pc) || 2565 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2566 if (Verbose) { 2567 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2568 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2569 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2570 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2571 *(bundle_start + 1), *bundle_start); 2572 } 2573 return Handle_Exception(exceptionInfo, 2574 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2575 } 2576 } 2577 2578 // Implicit null checks were processed above. Hence, we should not reach 2579 // here in the usual case => die! 2580 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2581 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2582 exceptionInfo->ContextRecord); 2583 return EXCEPTION_CONTINUE_SEARCH; 2584 2585 #else // !IA64 2586 2587 // Windows 98 reports faulting addresses incorrectly 2588 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2589 !os::win32::is_nt()) { 2590 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2591 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2592 } 2593 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2594 exceptionInfo->ContextRecord); 2595 return EXCEPTION_CONTINUE_SEARCH; 2596 #endif 2597 } 2598 } 2599 } 2600 2601 #ifdef _WIN64 2602 // Special care for fast JNI field accessors. 2603 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2604 // in and the heap gets shrunk before the field access. 2605 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2606 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2607 if (addr != (address)-1) { 2608 return Handle_Exception(exceptionInfo, addr); 2609 } 2610 } 2611 #endif 2612 2613 // Stack overflow or null pointer exception in native code. 2614 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2615 exceptionInfo->ContextRecord); 2616 return EXCEPTION_CONTINUE_SEARCH; 2617 } // /EXCEPTION_ACCESS_VIOLATION 2618 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2619 #if defined _M_IA64 2620 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2621 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2622 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2623 2624 // Compiled method patched to be non entrant? Following conditions must apply: 2625 // 1. must be first instruction in bundle 2626 // 2. must be a break instruction with appropriate code 2627 if ((((uint64_t) pc & 0x0F) == 0) && 2628 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2629 return Handle_Exception(exceptionInfo, 2630 (address)SharedRuntime::get_handle_wrong_method_stub()); 2631 } 2632 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2633 #endif 2634 2635 2636 if (in_java) { 2637 switch (exception_code) { 2638 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2639 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2640 2641 case EXCEPTION_INT_OVERFLOW: 2642 return Handle_IDiv_Exception(exceptionInfo); 2643 2644 } // switch 2645 } 2646 if (((thread->thread_state() == _thread_in_Java) || 2647 (thread->thread_state() == _thread_in_native)) && 2648 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2649 { 2650 LONG result=Handle_FLT_Exception(exceptionInfo); 2651 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2652 } 2653 } 2654 2655 if (exception_code != EXCEPTION_BREAKPOINT) { 2656 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2657 exceptionInfo->ContextRecord); 2658 } 2659 return EXCEPTION_CONTINUE_SEARCH; 2660 } 2661 2662 #ifndef _WIN64 2663 // Special care for fast JNI accessors. 2664 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2665 // the heap gets shrunk before the field access. 2666 // Need to install our own structured exception handler since native code may 2667 // install its own. 2668 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2669 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2670 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2671 address pc = (address) exceptionInfo->ContextRecord->Eip; 2672 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2673 if (addr != (address)-1) { 2674 return Handle_Exception(exceptionInfo, addr); 2675 } 2676 } 2677 return EXCEPTION_CONTINUE_SEARCH; 2678 } 2679 2680 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2681 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2682 __try { \ 2683 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2684 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2685 } \ 2686 return 0; \ 2687 } 2688 2689 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2690 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2691 DEFINE_FAST_GETFIELD(jchar, char, Char) 2692 DEFINE_FAST_GETFIELD(jshort, short, Short) 2693 DEFINE_FAST_GETFIELD(jint, int, Int) 2694 DEFINE_FAST_GETFIELD(jlong, long, Long) 2695 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2696 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2697 2698 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2699 switch (type) { 2700 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2701 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2702 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2703 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2704 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2705 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2706 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2707 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2708 default: ShouldNotReachHere(); 2709 } 2710 return (address)-1; 2711 } 2712 #endif 2713 2714 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2715 // Install a win32 structured exception handler around the test 2716 // function call so the VM can generate an error dump if needed. 2717 __try { 2718 (*funcPtr)(); 2719 } __except(topLevelExceptionFilter( 2720 (_EXCEPTION_POINTERS*)_exception_info())) { 2721 // Nothing to do. 2722 } 2723 } 2724 2725 // Virtual Memory 2726 2727 int os::vm_page_size() { return os::win32::vm_page_size(); } 2728 int os::vm_allocation_granularity() { 2729 return os::win32::vm_allocation_granularity(); 2730 } 2731 2732 // Windows large page support is available on Windows 2003. In order to use 2733 // large page memory, the administrator must first assign additional privilege 2734 // to the user: 2735 // + select Control Panel -> Administrative Tools -> Local Security Policy 2736 // + select Local Policies -> User Rights Assignment 2737 // + double click "Lock pages in memory", add users and/or groups 2738 // + reboot 2739 // Note the above steps are needed for administrator as well, as administrators 2740 // by default do not have the privilege to lock pages in memory. 2741 // 2742 // Note about Windows 2003: although the API supports committing large page 2743 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2744 // scenario, I found through experiment it only uses large page if the entire 2745 // memory region is reserved and committed in a single VirtualAlloc() call. 2746 // This makes Windows large page support more or less like Solaris ISM, in 2747 // that the entire heap must be committed upfront. This probably will change 2748 // in the future, if so the code below needs to be revisited. 2749 2750 #ifndef MEM_LARGE_PAGES 2751 #define MEM_LARGE_PAGES 0x20000000 2752 #endif 2753 2754 static HANDLE _hProcess; 2755 static HANDLE _hToken; 2756 2757 // Container for NUMA node list info 2758 class NUMANodeListHolder { 2759 private: 2760 int *_numa_used_node_list; // allocated below 2761 int _numa_used_node_count; 2762 2763 void free_node_list() { 2764 if (_numa_used_node_list != NULL) { 2765 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2766 } 2767 } 2768 2769 public: 2770 NUMANodeListHolder() { 2771 _numa_used_node_count = 0; 2772 _numa_used_node_list = NULL; 2773 // do rest of initialization in build routine (after function pointers are set up) 2774 } 2775 2776 ~NUMANodeListHolder() { 2777 free_node_list(); 2778 } 2779 2780 bool build() { 2781 DWORD_PTR proc_aff_mask; 2782 DWORD_PTR sys_aff_mask; 2783 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2784 ULONG highest_node_number; 2785 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2786 free_node_list(); 2787 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2788 for (unsigned int i = 0; i <= highest_node_number; i++) { 2789 ULONGLONG proc_mask_numa_node; 2790 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2791 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2792 _numa_used_node_list[_numa_used_node_count++] = i; 2793 } 2794 } 2795 return (_numa_used_node_count > 1); 2796 } 2797 2798 int get_count() { return _numa_used_node_count; } 2799 int get_node_list_entry(int n) { 2800 // for indexes out of range, returns -1 2801 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2802 } 2803 2804 } numa_node_list_holder; 2805 2806 2807 2808 static size_t _large_page_size = 0; 2809 2810 static bool resolve_functions_for_large_page_init() { 2811 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2812 os::Advapi32Dll::AdvapiAvailable(); 2813 } 2814 2815 static bool request_lock_memory_privilege() { 2816 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2817 os::current_process_id()); 2818 2819 LUID luid; 2820 if (_hProcess != NULL && 2821 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2822 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2823 2824 TOKEN_PRIVILEGES tp; 2825 tp.PrivilegeCount = 1; 2826 tp.Privileges[0].Luid = luid; 2827 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2828 2829 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2830 // privilege. Check GetLastError() too. See MSDN document. 2831 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2832 (GetLastError() == ERROR_SUCCESS)) { 2833 return true; 2834 } 2835 } 2836 2837 return false; 2838 } 2839 2840 static void cleanup_after_large_page_init() { 2841 if (_hProcess) CloseHandle(_hProcess); 2842 _hProcess = NULL; 2843 if (_hToken) CloseHandle(_hToken); 2844 _hToken = NULL; 2845 } 2846 2847 static bool numa_interleaving_init() { 2848 bool success = false; 2849 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2850 2851 // print a warning if UseNUMAInterleaving flag is specified on command line 2852 bool warn_on_failure = use_numa_interleaving_specified; 2853 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2854 2855 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2856 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2857 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2858 2859 if (os::Kernel32Dll::NumaCallsAvailable()) { 2860 if (numa_node_list_holder.build()) { 2861 if (PrintMiscellaneous && Verbose) { 2862 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2863 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2864 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2865 } 2866 tty->print("\n"); 2867 } 2868 success = true; 2869 } else { 2870 WARN("Process does not cover multiple NUMA nodes."); 2871 } 2872 } else { 2873 WARN("NUMA Interleaving is not supported by the operating system."); 2874 } 2875 if (!success) { 2876 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2877 } 2878 return success; 2879 #undef WARN 2880 } 2881 2882 // this routine is used whenever we need to reserve a contiguous VA range 2883 // but we need to make separate VirtualAlloc calls for each piece of the range 2884 // Reasons for doing this: 2885 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2886 // * UseNUMAInterleaving requires a separate node for each piece 2887 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2888 bool should_inject_error=false) { 2889 char * p_buf; 2890 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2891 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2892 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2893 2894 // first reserve enough address space in advance since we want to be 2895 // able to break a single contiguous virtual address range into multiple 2896 // large page commits but WS2003 does not allow reserving large page space 2897 // so we just use 4K pages for reserve, this gives us a legal contiguous 2898 // address space. then we will deallocate that reservation, and re alloc 2899 // using large pages 2900 const size_t size_of_reserve = bytes + chunk_size; 2901 if (bytes > size_of_reserve) { 2902 // Overflowed. 2903 return NULL; 2904 } 2905 p_buf = (char *) VirtualAlloc(addr, 2906 size_of_reserve, // size of Reserve 2907 MEM_RESERVE, 2908 PAGE_READWRITE); 2909 // If reservation failed, return NULL 2910 if (p_buf == NULL) return NULL; 2911 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC); 2912 os::release_memory(p_buf, bytes + chunk_size); 2913 2914 // we still need to round up to a page boundary (in case we are using large pages) 2915 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2916 // instead we handle this in the bytes_to_rq computation below 2917 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2918 2919 // now go through and allocate one chunk at a time until all bytes are 2920 // allocated 2921 size_t bytes_remaining = bytes; 2922 // An overflow of align_size_up() would have been caught above 2923 // in the calculation of size_of_reserve. 2924 char * next_alloc_addr = p_buf; 2925 HANDLE hProc = GetCurrentProcess(); 2926 2927 #ifdef ASSERT 2928 // Variable for the failure injection 2929 long ran_num = os::random(); 2930 size_t fail_after = ran_num % bytes; 2931 #endif 2932 2933 int count=0; 2934 while (bytes_remaining) { 2935 // select bytes_to_rq to get to the next chunk_size boundary 2936 2937 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2938 // Note allocate and commit 2939 char * p_new; 2940 2941 #ifdef ASSERT 2942 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2943 #else 2944 const bool inject_error_now = false; 2945 #endif 2946 2947 if (inject_error_now) { 2948 p_new = NULL; 2949 } else { 2950 if (!UseNUMAInterleaving) { 2951 p_new = (char *) VirtualAlloc(next_alloc_addr, 2952 bytes_to_rq, 2953 flags, 2954 prot); 2955 } else { 2956 // get the next node to use from the used_node_list 2957 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2958 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2959 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2960 next_alloc_addr, 2961 bytes_to_rq, 2962 flags, 2963 prot, 2964 node); 2965 } 2966 } 2967 2968 if (p_new == NULL) { 2969 // Free any allocated pages 2970 if (next_alloc_addr > p_buf) { 2971 // Some memory was committed so release it. 2972 size_t bytes_to_release = bytes - bytes_remaining; 2973 // NMT has yet to record any individual blocks, so it 2974 // need to create a dummy 'reserve' record to match 2975 // the release. 2976 MemTracker::record_virtual_memory_reserve((address)p_buf, 2977 bytes_to_release, mtNone, CALLER_PC); 2978 os::release_memory(p_buf, bytes_to_release); 2979 } 2980 #ifdef ASSERT 2981 if (should_inject_error) { 2982 if (TracePageSizes && Verbose) { 2983 tty->print_cr("Reserving pages individually failed."); 2984 } 2985 } 2986 #endif 2987 return NULL; 2988 } 2989 2990 bytes_remaining -= bytes_to_rq; 2991 next_alloc_addr += bytes_to_rq; 2992 count++; 2993 } 2994 // Although the memory is allocated individually, it is returned as one. 2995 // NMT records it as one block. 2996 address pc = CALLER_PC; 2997 if ((flags & MEM_COMMIT) != 0) { 2998 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc); 2999 } else { 3000 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc); 3001 } 3002 3003 // made it this far, success 3004 return p_buf; 3005 } 3006 3007 3008 3009 void os::large_page_init() { 3010 if (!UseLargePages) return; 3011 3012 // print a warning if any large page related flag is specified on command line 3013 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3014 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3015 bool success = false; 3016 3017 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3018 if (resolve_functions_for_large_page_init()) { 3019 if (request_lock_memory_privilege()) { 3020 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3021 if (s) { 3022 #if defined(IA32) || defined(AMD64) 3023 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3024 WARN("JVM cannot use large pages bigger than 4mb."); 3025 } else { 3026 #endif 3027 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3028 _large_page_size = LargePageSizeInBytes; 3029 } else { 3030 _large_page_size = s; 3031 } 3032 success = true; 3033 #if defined(IA32) || defined(AMD64) 3034 } 3035 #endif 3036 } else { 3037 WARN("Large page is not supported by the processor."); 3038 } 3039 } else { 3040 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3041 } 3042 } else { 3043 WARN("Large page is not supported by the operating system."); 3044 } 3045 #undef WARN 3046 3047 const size_t default_page_size = (size_t) vm_page_size(); 3048 if (success && _large_page_size > default_page_size) { 3049 _page_sizes[0] = _large_page_size; 3050 _page_sizes[1] = default_page_size; 3051 _page_sizes[2] = 0; 3052 } 3053 3054 cleanup_after_large_page_init(); 3055 UseLargePages = success; 3056 } 3057 3058 // On win32, one cannot release just a part of reserved memory, it's an 3059 // all or nothing deal. When we split a reservation, we must break the 3060 // reservation into two reservations. 3061 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3062 bool realloc) { 3063 if (size > 0) { 3064 release_memory(base, size); 3065 if (realloc) { 3066 reserve_memory(split, base); 3067 } 3068 if (size != split) { 3069 reserve_memory(size - split, base + split); 3070 } 3071 } 3072 } 3073 3074 // Multiple threads can race in this code but it's not possible to unmap small sections of 3075 // virtual space to get requested alignment, like posix-like os's. 3076 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3077 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3078 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3079 "Alignment must be a multiple of allocation granularity (page size)"); 3080 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3081 3082 size_t extra_size = size + alignment; 3083 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3084 3085 char* aligned_base = NULL; 3086 3087 do { 3088 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3089 if (extra_base == NULL) { 3090 return NULL; 3091 } 3092 // Do manual alignment 3093 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3094 3095 os::release_memory(extra_base, extra_size); 3096 3097 aligned_base = os::reserve_memory(size, aligned_base); 3098 3099 } while (aligned_base == NULL); 3100 3101 return aligned_base; 3102 } 3103 3104 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3105 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3106 "reserve alignment"); 3107 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3108 char* res; 3109 // note that if UseLargePages is on, all the areas that require interleaving 3110 // will go thru reserve_memory_special rather than thru here. 3111 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3112 if (!use_individual) { 3113 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3114 } else { 3115 elapsedTimer reserveTimer; 3116 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3117 // in numa interleaving, we have to allocate pages individually 3118 // (well really chunks of NUMAInterleaveGranularity size) 3119 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3120 if (res == NULL) { 3121 warning("NUMA page allocation failed"); 3122 } 3123 if (Verbose && PrintMiscellaneous) { 3124 reserveTimer.stop(); 3125 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3126 reserveTimer.milliseconds(), reserveTimer.ticks()); 3127 } 3128 } 3129 assert(res == NULL || addr == NULL || addr == res, 3130 "Unexpected address from reserve."); 3131 3132 return res; 3133 } 3134 3135 // Reserve memory at an arbitrary address, only if that area is 3136 // available (and not reserved for something else). 3137 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3138 // Windows os::reserve_memory() fails of the requested address range is 3139 // not avilable. 3140 return reserve_memory(bytes, requested_addr); 3141 } 3142 3143 size_t os::large_page_size() { 3144 return _large_page_size; 3145 } 3146 3147 bool os::can_commit_large_page_memory() { 3148 // Windows only uses large page memory when the entire region is reserved 3149 // and committed in a single VirtualAlloc() call. This may change in the 3150 // future, but with Windows 2003 it's not possible to commit on demand. 3151 return false; 3152 } 3153 3154 bool os::can_execute_large_page_memory() { 3155 return true; 3156 } 3157 3158 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3159 assert(UseLargePages, "only for large pages"); 3160 3161 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3162 return NULL; // Fallback to small pages. 3163 } 3164 3165 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3166 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3167 3168 // with large pages, there are two cases where we need to use Individual Allocation 3169 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3170 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3171 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3172 if (TracePageSizes && Verbose) { 3173 tty->print_cr("Reserving large pages individually."); 3174 } 3175 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3176 if (p_buf == NULL) { 3177 // give an appropriate warning message 3178 if (UseNUMAInterleaving) { 3179 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3180 } 3181 if (UseLargePagesIndividualAllocation) { 3182 warning("Individually allocated large pages failed, " 3183 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3184 } 3185 return NULL; 3186 } 3187 3188 return p_buf; 3189 3190 } else { 3191 if (TracePageSizes && Verbose) { 3192 tty->print_cr("Reserving large pages in a single large chunk."); 3193 } 3194 // normal policy just allocate it all at once 3195 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3196 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3197 if (res != NULL) { 3198 address pc = CALLER_PC; 3199 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc); 3200 } 3201 3202 return res; 3203 } 3204 } 3205 3206 bool os::release_memory_special(char* base, size_t bytes) { 3207 assert(base != NULL, "Sanity check"); 3208 return release_memory(base, bytes); 3209 } 3210 3211 void os::print_statistics() { 3212 } 3213 3214 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3215 int err = os::get_last_error(); 3216 char buf[256]; 3217 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3218 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3219 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3220 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3221 } 3222 3223 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3224 if (bytes == 0) { 3225 // Don't bother the OS with noops. 3226 return true; 3227 } 3228 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3229 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3230 // Don't attempt to print anything if the OS call fails. We're 3231 // probably low on resources, so the print itself may cause crashes. 3232 3233 // unless we have NUMAInterleaving enabled, the range of a commit 3234 // is always within a reserve covered by a single VirtualAlloc 3235 // in that case we can just do a single commit for the requested size 3236 if (!UseNUMAInterleaving) { 3237 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3238 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3239 return false; 3240 } 3241 if (exec) { 3242 DWORD oldprot; 3243 // Windows doc says to use VirtualProtect to get execute permissions 3244 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3245 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3246 return false; 3247 } 3248 } 3249 return true; 3250 } else { 3251 3252 // when NUMAInterleaving is enabled, the commit might cover a range that 3253 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3254 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3255 // returns represents the number of bytes that can be committed in one step. 3256 size_t bytes_remaining = bytes; 3257 char * next_alloc_addr = addr; 3258 while (bytes_remaining > 0) { 3259 MEMORY_BASIC_INFORMATION alloc_info; 3260 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3261 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3262 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3263 PAGE_READWRITE) == NULL) { 3264 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3265 exec);) 3266 return false; 3267 } 3268 if (exec) { 3269 DWORD oldprot; 3270 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3271 PAGE_EXECUTE_READWRITE, &oldprot)) { 3272 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3273 exec);) 3274 return false; 3275 } 3276 } 3277 bytes_remaining -= bytes_to_rq; 3278 next_alloc_addr += bytes_to_rq; 3279 } 3280 } 3281 // if we made it this far, return true 3282 return true; 3283 } 3284 3285 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3286 bool exec) { 3287 // alignment_hint is ignored on this OS 3288 return pd_commit_memory(addr, size, exec); 3289 } 3290 3291 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3292 const char* mesg) { 3293 assert(mesg != NULL, "mesg must be specified"); 3294 if (!pd_commit_memory(addr, size, exec)) { 3295 warn_fail_commit_memory(addr, size, exec); 3296 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3297 } 3298 } 3299 3300 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3301 size_t alignment_hint, bool exec, 3302 const char* mesg) { 3303 // alignment_hint is ignored on this OS 3304 pd_commit_memory_or_exit(addr, size, exec, mesg); 3305 } 3306 3307 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3308 if (bytes == 0) { 3309 // Don't bother the OS with noops. 3310 return true; 3311 } 3312 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3313 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3314 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3315 } 3316 3317 bool os::pd_release_memory(char* addr, size_t bytes) { 3318 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3319 } 3320 3321 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3322 return os::commit_memory(addr, size, !ExecMem); 3323 } 3324 3325 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3326 return os::uncommit_memory(addr, size); 3327 } 3328 3329 // Set protections specified 3330 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3331 bool is_committed) { 3332 unsigned int p = 0; 3333 switch (prot) { 3334 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3335 case MEM_PROT_READ: p = PAGE_READONLY; break; 3336 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3337 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3338 default: 3339 ShouldNotReachHere(); 3340 } 3341 3342 DWORD old_status; 3343 3344 // Strange enough, but on Win32 one can change protection only for committed 3345 // memory, not a big deal anyway, as bytes less or equal than 64K 3346 if (!is_committed) { 3347 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3348 "cannot commit protection page"); 3349 } 3350 // One cannot use os::guard_memory() here, as on Win32 guard page 3351 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3352 // 3353 // Pages in the region become guard pages. Any attempt to access a guard page 3354 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3355 // the guard page status. Guard pages thus act as a one-time access alarm. 3356 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3357 } 3358 3359 bool os::guard_memory(char* addr, size_t bytes) { 3360 DWORD old_status; 3361 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3362 } 3363 3364 bool os::unguard_memory(char* addr, size_t bytes) { 3365 DWORD old_status; 3366 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3367 } 3368 3369 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3370 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3371 void os::numa_make_global(char *addr, size_t bytes) { } 3372 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3373 bool os::numa_topology_changed() { return false; } 3374 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3375 int os::numa_get_group_id() { return 0; } 3376 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3377 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3378 // Provide an answer for UMA systems 3379 ids[0] = 0; 3380 return 1; 3381 } else { 3382 // check for size bigger than actual groups_num 3383 size = MIN2(size, numa_get_groups_num()); 3384 for (int i = 0; i < (int)size; i++) { 3385 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3386 } 3387 return size; 3388 } 3389 } 3390 3391 bool os::get_page_info(char *start, page_info* info) { 3392 return false; 3393 } 3394 3395 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3396 return end; 3397 } 3398 3399 char* os::non_memory_address_word() { 3400 // Must never look like an address returned by reserve_memory, 3401 // even in its subfields (as defined by the CPU immediate fields, 3402 // if the CPU splits constants across multiple instructions). 3403 return (char*)-1; 3404 } 3405 3406 #define MAX_ERROR_COUNT 100 3407 #define SYS_THREAD_ERROR 0xffffffffUL 3408 3409 void os::pd_start_thread(Thread* thread) { 3410 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3411 // Returns previous suspend state: 3412 // 0: Thread was not suspended 3413 // 1: Thread is running now 3414 // >1: Thread is still suspended. 3415 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3416 } 3417 3418 class HighResolutionInterval : public CHeapObj<mtThread> { 3419 // The default timer resolution seems to be 10 milliseconds. 3420 // (Where is this written down?) 3421 // If someone wants to sleep for only a fraction of the default, 3422 // then we set the timer resolution down to 1 millisecond for 3423 // the duration of their interval. 3424 // We carefully set the resolution back, since otherwise we 3425 // seem to incur an overhead (3%?) that we don't need. 3426 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3427 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3428 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3429 // timeBeginPeriod() if the relative error exceeded some threshold. 3430 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3431 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3432 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3433 // resolution timers running. 3434 private: 3435 jlong resolution; 3436 public: 3437 HighResolutionInterval(jlong ms) { 3438 resolution = ms % 10L; 3439 if (resolution != 0) { 3440 MMRESULT result = timeBeginPeriod(1L); 3441 } 3442 } 3443 ~HighResolutionInterval() { 3444 if (resolution != 0) { 3445 MMRESULT result = timeEndPeriod(1L); 3446 } 3447 resolution = 0L; 3448 } 3449 }; 3450 3451 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3452 jlong limit = (jlong) MAXDWORD; 3453 3454 while (ms > limit) { 3455 int res; 3456 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3457 return res; 3458 ms -= limit; 3459 } 3460 3461 assert(thread == Thread::current(), "thread consistency check"); 3462 OSThread* osthread = thread->osthread(); 3463 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3464 int result; 3465 if (interruptable) { 3466 assert(thread->is_Java_thread(), "must be java thread"); 3467 JavaThread *jt = (JavaThread *) thread; 3468 ThreadBlockInVM tbivm(jt); 3469 3470 jt->set_suspend_equivalent(); 3471 // cleared by handle_special_suspend_equivalent_condition() or 3472 // java_suspend_self() via check_and_wait_while_suspended() 3473 3474 HANDLE events[1]; 3475 events[0] = osthread->interrupt_event(); 3476 HighResolutionInterval *phri=NULL; 3477 if (!ForceTimeHighResolution) 3478 phri = new HighResolutionInterval(ms); 3479 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3480 result = OS_TIMEOUT; 3481 } else { 3482 ResetEvent(osthread->interrupt_event()); 3483 osthread->set_interrupted(false); 3484 result = OS_INTRPT; 3485 } 3486 delete phri; //if it is NULL, harmless 3487 3488 // were we externally suspended while we were waiting? 3489 jt->check_and_wait_while_suspended(); 3490 } else { 3491 assert(!thread->is_Java_thread(), "must not be java thread"); 3492 Sleep((long) ms); 3493 result = OS_TIMEOUT; 3494 } 3495 return result; 3496 } 3497 3498 // 3499 // Short sleep, direct OS call. 3500 // 3501 // ms = 0, means allow others (if any) to run. 3502 // 3503 void os::naked_short_sleep(jlong ms) { 3504 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3505 Sleep(ms); 3506 } 3507 3508 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3509 void os::infinite_sleep() { 3510 while (true) { // sleep forever ... 3511 Sleep(100000); // ... 100 seconds at a time 3512 } 3513 } 3514 3515 typedef BOOL (WINAPI * STTSignature)(void); 3516 3517 os::YieldResult os::NakedYield() { 3518 // Use either SwitchToThread() or Sleep(0) 3519 // Consider passing back the return value from SwitchToThread(). 3520 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3521 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY; 3522 } else { 3523 Sleep(0); 3524 } 3525 return os::YIELD_UNKNOWN; 3526 } 3527 3528 void os::yield() { os::NakedYield(); } 3529 3530 void os::yield_all() { 3531 // Yields to all threads, including threads with lower priorities 3532 Sleep(1); 3533 } 3534 3535 // Win32 only gives you access to seven real priorities at a time, 3536 // so we compress Java's ten down to seven. It would be better 3537 // if we dynamically adjusted relative priorities. 3538 3539 int os::java_to_os_priority[CriticalPriority + 1] = { 3540 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3541 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3542 THREAD_PRIORITY_LOWEST, // 2 3543 THREAD_PRIORITY_BELOW_NORMAL, // 3 3544 THREAD_PRIORITY_BELOW_NORMAL, // 4 3545 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3546 THREAD_PRIORITY_NORMAL, // 6 3547 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3548 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3549 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3550 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3551 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3552 }; 3553 3554 int prio_policy1[CriticalPriority + 1] = { 3555 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3556 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3557 THREAD_PRIORITY_LOWEST, // 2 3558 THREAD_PRIORITY_BELOW_NORMAL, // 3 3559 THREAD_PRIORITY_BELOW_NORMAL, // 4 3560 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3561 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3562 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3563 THREAD_PRIORITY_HIGHEST, // 8 3564 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3565 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3566 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3567 }; 3568 3569 static int prio_init() { 3570 // If ThreadPriorityPolicy is 1, switch tables 3571 if (ThreadPriorityPolicy == 1) { 3572 int i; 3573 for (i = 0; i < CriticalPriority + 1; i++) { 3574 os::java_to_os_priority[i] = prio_policy1[i]; 3575 } 3576 } 3577 if (UseCriticalJavaThreadPriority) { 3578 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3579 } 3580 return 0; 3581 } 3582 3583 OSReturn os::set_native_priority(Thread* thread, int priority) { 3584 if (!UseThreadPriorities) return OS_OK; 3585 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3586 return ret ? OS_OK : OS_ERR; 3587 } 3588 3589 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3590 if (!UseThreadPriorities) { 3591 *priority_ptr = java_to_os_priority[NormPriority]; 3592 return OS_OK; 3593 } 3594 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3595 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3596 assert(false, "GetThreadPriority failed"); 3597 return OS_ERR; 3598 } 3599 *priority_ptr = os_prio; 3600 return OS_OK; 3601 } 3602 3603 3604 // Hint to the underlying OS that a task switch would not be good. 3605 // Void return because it's a hint and can fail. 3606 void os::hint_no_preempt() {} 3607 3608 void os::interrupt(Thread* thread) { 3609 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3610 "possibility of dangling Thread pointer"); 3611 3612 OSThread* osthread = thread->osthread(); 3613 osthread->set_interrupted(true); 3614 // More than one thread can get here with the same value of osthread, 3615 // resulting in multiple notifications. We do, however, want the store 3616 // to interrupted() to be visible to other threads before we post 3617 // the interrupt event. 3618 OrderAccess::release(); 3619 SetEvent(osthread->interrupt_event()); 3620 // For JSR166: unpark after setting status 3621 if (thread->is_Java_thread()) 3622 ((JavaThread*)thread)->parker()->unpark(); 3623 3624 ParkEvent * ev = thread->_ParkEvent; 3625 if (ev != NULL) ev->unpark(); 3626 3627 } 3628 3629 3630 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3631 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3632 "possibility of dangling Thread pointer"); 3633 3634 OSThread* osthread = thread->osthread(); 3635 // There is no synchronization between the setting of the interrupt 3636 // and it being cleared here. It is critical - see 6535709 - that 3637 // we only clear the interrupt state, and reset the interrupt event, 3638 // if we are going to report that we were indeed interrupted - else 3639 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3640 // depending on the timing. By checking thread interrupt event to see 3641 // if the thread gets real interrupt thus prevent spurious wakeup. 3642 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3643 if (interrupted && clear_interrupted) { 3644 osthread->set_interrupted(false); 3645 ResetEvent(osthread->interrupt_event()); 3646 } // Otherwise leave the interrupted state alone 3647 3648 return interrupted; 3649 } 3650 3651 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3652 ExtendedPC os::get_thread_pc(Thread* thread) { 3653 CONTEXT context; 3654 context.ContextFlags = CONTEXT_CONTROL; 3655 HANDLE handle = thread->osthread()->thread_handle(); 3656 #ifdef _M_IA64 3657 assert(0, "Fix get_thread_pc"); 3658 return ExtendedPC(NULL); 3659 #else 3660 if (GetThreadContext(handle, &context)) { 3661 #ifdef _M_AMD64 3662 return ExtendedPC((address) context.Rip); 3663 #else 3664 return ExtendedPC((address) context.Eip); 3665 #endif 3666 } else { 3667 return ExtendedPC(NULL); 3668 } 3669 #endif 3670 } 3671 3672 // GetCurrentThreadId() returns DWORD 3673 intx os::current_thread_id() { return GetCurrentThreadId(); } 3674 3675 static int _initial_pid = 0; 3676 3677 int os::current_process_id() 3678 { 3679 return (_initial_pid ? _initial_pid : _getpid()); 3680 } 3681 3682 int os::win32::_vm_page_size = 0; 3683 int os::win32::_vm_allocation_granularity = 0; 3684 int os::win32::_processor_type = 0; 3685 // Processor level is not available on non-NT systems, use vm_version instead 3686 int os::win32::_processor_level = 0; 3687 julong os::win32::_physical_memory = 0; 3688 size_t os::win32::_default_stack_size = 0; 3689 3690 intx os::win32::_os_thread_limit = 0; 3691 volatile intx os::win32::_os_thread_count = 0; 3692 3693 bool os::win32::_is_nt = false; 3694 bool os::win32::_is_windows_2003 = false; 3695 bool os::win32::_is_windows_server = false; 3696 3697 bool os::win32::_has_performance_count = 0; 3698 3699 void os::win32::initialize_system_info() { 3700 SYSTEM_INFO si; 3701 GetSystemInfo(&si); 3702 _vm_page_size = si.dwPageSize; 3703 _vm_allocation_granularity = si.dwAllocationGranularity; 3704 _processor_type = si.dwProcessorType; 3705 _processor_level = si.wProcessorLevel; 3706 set_processor_count(si.dwNumberOfProcessors); 3707 3708 MEMORYSTATUSEX ms; 3709 ms.dwLength = sizeof(ms); 3710 3711 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3712 // dwMemoryLoad (% of memory in use) 3713 GlobalMemoryStatusEx(&ms); 3714 _physical_memory = ms.ullTotalPhys; 3715 3716 OSVERSIONINFOEX oi; 3717 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3718 GetVersionEx((OSVERSIONINFO*)&oi); 3719 switch (oi.dwPlatformId) { 3720 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3721 case VER_PLATFORM_WIN32_NT: 3722 _is_nt = true; 3723 { 3724 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3725 if (os_vers == 5002) { 3726 _is_windows_2003 = true; 3727 } 3728 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3729 oi.wProductType == VER_NT_SERVER) { 3730 _is_windows_server = true; 3731 } 3732 } 3733 break; 3734 default: fatal("Unknown platform"); 3735 } 3736 3737 _default_stack_size = os::current_stack_size(); 3738 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3739 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3740 "stack size not a multiple of page size"); 3741 3742 initialize_performance_counter(); 3743 3744 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3745 // known to deadlock the system, if the VM issues to thread operations with 3746 // a too high frequency, e.g., such as changing the priorities. 3747 // The 6000 seems to work well - no deadlocks has been notices on the test 3748 // programs that we have seen experience this problem. 3749 if (!os::win32::is_nt()) { 3750 StarvationMonitorInterval = 6000; 3751 } 3752 } 3753 3754 3755 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3756 char path[MAX_PATH]; 3757 DWORD size; 3758 DWORD pathLen = (DWORD)sizeof(path); 3759 HINSTANCE result = NULL; 3760 3761 // only allow library name without path component 3762 assert(strchr(name, '\\') == NULL, "path not allowed"); 3763 assert(strchr(name, ':') == NULL, "path not allowed"); 3764 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3765 jio_snprintf(ebuf, ebuflen, 3766 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3767 return NULL; 3768 } 3769 3770 // search system directory 3771 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3772 strcat(path, "\\"); 3773 strcat(path, name); 3774 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3775 return result; 3776 } 3777 } 3778 3779 // try Windows directory 3780 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3781 strcat(path, "\\"); 3782 strcat(path, name); 3783 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3784 return result; 3785 } 3786 } 3787 3788 jio_snprintf(ebuf, ebuflen, 3789 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3790 return NULL; 3791 } 3792 3793 void os::win32::setmode_streams() { 3794 _setmode(_fileno(stdin), _O_BINARY); 3795 _setmode(_fileno(stdout), _O_BINARY); 3796 _setmode(_fileno(stderr), _O_BINARY); 3797 } 3798 3799 3800 bool os::is_debugger_attached() { 3801 return IsDebuggerPresent() ? true : false; 3802 } 3803 3804 3805 void os::wait_for_keypress_at_exit(void) { 3806 if (PauseAtExit) { 3807 fprintf(stderr, "Press any key to continue...\n"); 3808 fgetc(stdin); 3809 } 3810 } 3811 3812 3813 int os::message_box(const char* title, const char* message) { 3814 int result = MessageBox(NULL, message, title, 3815 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3816 return result == IDYES; 3817 } 3818 3819 int os::allocate_thread_local_storage() { 3820 return TlsAlloc(); 3821 } 3822 3823 3824 void os::free_thread_local_storage(int index) { 3825 TlsFree(index); 3826 } 3827 3828 3829 void os::thread_local_storage_at_put(int index, void* value) { 3830 TlsSetValue(index, value); 3831 assert(thread_local_storage_at(index) == value, "Just checking"); 3832 } 3833 3834 3835 void* os::thread_local_storage_at(int index) { 3836 return TlsGetValue(index); 3837 } 3838 3839 3840 #ifndef PRODUCT 3841 #ifndef _WIN64 3842 // Helpers to check whether NX protection is enabled 3843 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3844 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3845 pex->ExceptionRecord->NumberParameters > 0 && 3846 pex->ExceptionRecord->ExceptionInformation[0] == 3847 EXCEPTION_INFO_EXEC_VIOLATION) { 3848 return EXCEPTION_EXECUTE_HANDLER; 3849 } 3850 return EXCEPTION_CONTINUE_SEARCH; 3851 } 3852 3853 void nx_check_protection() { 3854 // If NX is enabled we'll get an exception calling into code on the stack 3855 char code[] = { (char)0xC3 }; // ret 3856 void *code_ptr = (void *)code; 3857 __try { 3858 __asm call code_ptr 3859 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3860 tty->print_raw_cr("NX protection detected."); 3861 } 3862 } 3863 #endif // _WIN64 3864 #endif // PRODUCT 3865 3866 // this is called _before_ the global arguments have been parsed 3867 void os::init(void) { 3868 _initial_pid = _getpid(); 3869 3870 init_random(1234567); 3871 3872 win32::initialize_system_info(); 3873 win32::setmode_streams(); 3874 init_page_sizes((size_t) win32::vm_page_size()); 3875 3876 // This may be overridden later when argument processing is done. 3877 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3878 os::win32::is_windows_2003()); 3879 3880 // Initialize main_process and main_thread 3881 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3882 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3883 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3884 fatal("DuplicateHandle failed\n"); 3885 } 3886 main_thread_id = (int) GetCurrentThreadId(); 3887 } 3888 3889 // To install functions for atexit processing 3890 extern "C" { 3891 static void perfMemory_exit_helper() { 3892 perfMemory_exit(); 3893 } 3894 } 3895 3896 static jint initSock(); 3897 3898 // this is called _after_ the global arguments have been parsed 3899 jint os::init_2(void) { 3900 // Allocate a single page and mark it as readable for safepoint polling 3901 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3902 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3903 3904 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3905 guarantee(return_page != NULL, "Commit Failed for polling page"); 3906 3907 os::set_polling_page(polling_page); 3908 3909 #ifndef PRODUCT 3910 if (Verbose && PrintMiscellaneous) 3911 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3912 #endif 3913 3914 if (!UseMembar) { 3915 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3916 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3917 3918 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3919 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3920 3921 os::set_memory_serialize_page(mem_serialize_page); 3922 3923 #ifndef PRODUCT 3924 if (Verbose && PrintMiscellaneous) 3925 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3926 #endif 3927 } 3928 3929 // Setup Windows Exceptions 3930 3931 // for debugging float code generation bugs 3932 if (ForceFloatExceptions) { 3933 #ifndef _WIN64 3934 static long fp_control_word = 0; 3935 __asm { fstcw fp_control_word } 3936 // see Intel PPro Manual, Vol. 2, p 7-16 3937 const long precision = 0x20; 3938 const long underflow = 0x10; 3939 const long overflow = 0x08; 3940 const long zero_div = 0x04; 3941 const long denorm = 0x02; 3942 const long invalid = 0x01; 3943 fp_control_word |= invalid; 3944 __asm { fldcw fp_control_word } 3945 #endif 3946 } 3947 3948 // If stack_commit_size is 0, windows will reserve the default size, 3949 // but only commit a small portion of it. 3950 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 3951 size_t default_reserve_size = os::win32::default_stack_size(); 3952 size_t actual_reserve_size = stack_commit_size; 3953 if (stack_commit_size < default_reserve_size) { 3954 // If stack_commit_size == 0, we want this too 3955 actual_reserve_size = default_reserve_size; 3956 } 3957 3958 // Check minimum allowable stack size for thread creation and to initialize 3959 // the java system classes, including StackOverflowError - depends on page 3960 // size. Add a page for compiler2 recursion in main thread. 3961 // Add in 2*BytesPerWord times page size to account for VM stack during 3962 // class initialization depending on 32 or 64 bit VM. 3963 size_t min_stack_allowed = 3964 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 3965 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 3966 if (actual_reserve_size < min_stack_allowed) { 3967 tty->print_cr("\nThe stack size specified is too small, " 3968 "Specify at least %dk", 3969 min_stack_allowed / K); 3970 return JNI_ERR; 3971 } 3972 3973 JavaThread::set_stack_size_at_create(stack_commit_size); 3974 3975 // Calculate theoretical max. size of Threads to guard gainst artifical 3976 // out-of-memory situations, where all available address-space has been 3977 // reserved by thread stacks. 3978 assert(actual_reserve_size != 0, "Must have a stack"); 3979 3980 // Calculate the thread limit when we should start doing Virtual Memory 3981 // banging. Currently when the threads will have used all but 200Mb of space. 3982 // 3983 // TODO: consider performing a similar calculation for commit size instead 3984 // as reserve size, since on a 64-bit platform we'll run into that more 3985 // often than running out of virtual memory space. We can use the 3986 // lower value of the two calculations as the os_thread_limit. 3987 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 3988 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 3989 3990 // at exit methods are called in the reverse order of their registration. 3991 // there is no limit to the number of functions registered. atexit does 3992 // not set errno. 3993 3994 if (PerfAllowAtExitRegistration) { 3995 // only register atexit functions if PerfAllowAtExitRegistration is set. 3996 // atexit functions can be delayed until process exit time, which 3997 // can be problematic for embedded VM situations. Embedded VMs should 3998 // call DestroyJavaVM() to assure that VM resources are released. 3999 4000 // note: perfMemory_exit_helper atexit function may be removed in 4001 // the future if the appropriate cleanup code can be added to the 4002 // VM_Exit VMOperation's doit method. 4003 if (atexit(perfMemory_exit_helper) != 0) { 4004 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4005 } 4006 } 4007 4008 #ifndef _WIN64 4009 // Print something if NX is enabled (win32 on AMD64) 4010 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4011 #endif 4012 4013 // initialize thread priority policy 4014 prio_init(); 4015 4016 if (UseNUMA && !ForceNUMA) { 4017 UseNUMA = false; // We don't fully support this yet 4018 } 4019 4020 if (UseNUMAInterleaving) { 4021 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4022 bool success = numa_interleaving_init(); 4023 if (!success) UseNUMAInterleaving = false; 4024 } 4025 4026 if (initSock() != JNI_OK) { 4027 return JNI_ERR; 4028 } 4029 4030 return JNI_OK; 4031 } 4032 4033 void os::init_3(void) { 4034 return; 4035 } 4036 4037 // Mark the polling page as unreadable 4038 void os::make_polling_page_unreadable(void) { 4039 DWORD old_status; 4040 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status)) 4041 fatal("Could not disable polling page"); 4042 }; 4043 4044 // Mark the polling page as readable 4045 void os::make_polling_page_readable(void) { 4046 DWORD old_status; 4047 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status)) 4048 fatal("Could not enable polling page"); 4049 }; 4050 4051 4052 int os::stat(const char *path, struct stat *sbuf) { 4053 char pathbuf[MAX_PATH]; 4054 if (strlen(path) > MAX_PATH - 1) { 4055 errno = ENAMETOOLONG; 4056 return -1; 4057 } 4058 os::native_path(strcpy(pathbuf, path)); 4059 int ret = ::stat(pathbuf, sbuf); 4060 if (sbuf != NULL && UseUTCFileTimestamp) { 4061 // Fix for 6539723. st_mtime returned from stat() is dependent on 4062 // the system timezone and so can return different values for the 4063 // same file if/when daylight savings time changes. This adjustment 4064 // makes sure the same timestamp is returned regardless of the TZ. 4065 // 4066 // See: 4067 // http://msdn.microsoft.com/library/ 4068 // default.asp?url=/library/en-us/sysinfo/base/ 4069 // time_zone_information_str.asp 4070 // and 4071 // http://msdn.microsoft.com/library/default.asp?url= 4072 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4073 // 4074 // NOTE: there is a insidious bug here: If the timezone is changed 4075 // after the call to stat() but before 'GetTimeZoneInformation()', then 4076 // the adjustment we do here will be wrong and we'll return the wrong 4077 // value (which will likely end up creating an invalid class data 4078 // archive). Absent a better API for this, or some time zone locking 4079 // mechanism, we'll have to live with this risk. 4080 TIME_ZONE_INFORMATION tz; 4081 DWORD tzid = GetTimeZoneInformation(&tz); 4082 int daylightBias = 4083 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4084 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4085 } 4086 return ret; 4087 } 4088 4089 4090 #define FT2INT64(ft) \ 4091 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4092 4093 4094 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4095 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4096 // of a thread. 4097 // 4098 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4099 // the fast estimate available on the platform. 4100 4101 // current_thread_cpu_time() is not optimized for Windows yet 4102 jlong os::current_thread_cpu_time() { 4103 // return user + sys since the cost is the same 4104 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4105 } 4106 4107 jlong os::thread_cpu_time(Thread* thread) { 4108 // consistent with what current_thread_cpu_time() returns. 4109 return os::thread_cpu_time(thread, true /* user+sys */); 4110 } 4111 4112 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4113 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4114 } 4115 4116 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4117 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4118 // If this function changes, os::is_thread_cpu_time_supported() should too 4119 if (os::win32::is_nt()) { 4120 FILETIME CreationTime; 4121 FILETIME ExitTime; 4122 FILETIME KernelTime; 4123 FILETIME UserTime; 4124 4125 if (GetThreadTimes(thread->osthread()->thread_handle(), 4126 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4127 return -1; 4128 else 4129 if (user_sys_cpu_time) { 4130 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4131 } else { 4132 return FT2INT64(UserTime) * 100; 4133 } 4134 } else { 4135 return (jlong) timeGetTime() * 1000000; 4136 } 4137 } 4138 4139 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4140 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4141 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4142 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4143 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4144 } 4145 4146 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4147 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4148 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4149 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4150 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4151 } 4152 4153 bool os::is_thread_cpu_time_supported() { 4154 // see os::thread_cpu_time 4155 if (os::win32::is_nt()) { 4156 FILETIME CreationTime; 4157 FILETIME ExitTime; 4158 FILETIME KernelTime; 4159 FILETIME UserTime; 4160 4161 if (GetThreadTimes(GetCurrentThread(), 4162 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4163 return false; 4164 else 4165 return true; 4166 } else { 4167 return false; 4168 } 4169 } 4170 4171 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4172 // It does have primitives (PDH API) to get CPU usage and run queue length. 4173 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4174 // If we wanted to implement loadavg on Windows, we have a few options: 4175 // 4176 // a) Query CPU usage and run queue length and "fake" an answer by 4177 // returning the CPU usage if it's under 100%, and the run queue 4178 // length otherwise. It turns out that querying is pretty slow 4179 // on Windows, on the order of 200 microseconds on a fast machine. 4180 // Note that on the Windows the CPU usage value is the % usage 4181 // since the last time the API was called (and the first call 4182 // returns 100%), so we'd have to deal with that as well. 4183 // 4184 // b) Sample the "fake" answer using a sampling thread and store 4185 // the answer in a global variable. The call to loadavg would 4186 // just return the value of the global, avoiding the slow query. 4187 // 4188 // c) Sample a better answer using exponential decay to smooth the 4189 // value. This is basically the algorithm used by UNIX kernels. 4190 // 4191 // Note that sampling thread starvation could affect both (b) and (c). 4192 int os::loadavg(double loadavg[], int nelem) { 4193 return -1; 4194 } 4195 4196 4197 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4198 bool os::dont_yield() { 4199 return DontYieldALot; 4200 } 4201 4202 // This method is a slightly reworked copy of JDK's sysOpen 4203 // from src/windows/hpi/src/sys_api_md.c 4204 4205 int os::open(const char *path, int oflag, int mode) { 4206 char pathbuf[MAX_PATH]; 4207 4208 if (strlen(path) > MAX_PATH - 1) { 4209 errno = ENAMETOOLONG; 4210 return -1; 4211 } 4212 os::native_path(strcpy(pathbuf, path)); 4213 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4214 } 4215 4216 FILE* os::open(int fd, const char* mode) { 4217 return ::_fdopen(fd, mode); 4218 } 4219 4220 // Is a (classpath) directory empty? 4221 bool os::dir_is_empty(const char* path) { 4222 WIN32_FIND_DATA fd; 4223 HANDLE f = FindFirstFile(path, &fd); 4224 if (f == INVALID_HANDLE_VALUE) { 4225 return true; 4226 } 4227 FindClose(f); 4228 return false; 4229 } 4230 4231 // create binary file, rewriting existing file if required 4232 int os::create_binary_file(const char* path, bool rewrite_existing) { 4233 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4234 if (!rewrite_existing) { 4235 oflags |= _O_EXCL; 4236 } 4237 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4238 } 4239 4240 // return current position of file pointer 4241 jlong os::current_file_offset(int fd) { 4242 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4243 } 4244 4245 // move file pointer to the specified offset 4246 jlong os::seek_to_file_offset(int fd, jlong offset) { 4247 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4248 } 4249 4250 4251 jlong os::lseek(int fd, jlong offset, int whence) { 4252 return (jlong) ::_lseeki64(fd, offset, whence); 4253 } 4254 4255 // This method is a slightly reworked copy of JDK's sysNativePath 4256 // from src/windows/hpi/src/path_md.c 4257 4258 /* Convert a pathname to native format. On win32, this involves forcing all 4259 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4260 sometimes rejects '/') and removing redundant separators. The input path is 4261 assumed to have been converted into the character encoding used by the local 4262 system. Because this might be a double-byte encoding, care is taken to 4263 treat double-byte lead characters correctly. 4264 4265 This procedure modifies the given path in place, as the result is never 4266 longer than the original. There is no error return; this operation always 4267 succeeds. */ 4268 char * os::native_path(char *path) { 4269 char *src = path, *dst = path, *end = path; 4270 char *colon = NULL; /* If a drive specifier is found, this will 4271 point to the colon following the drive 4272 letter */ 4273 4274 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4275 assert(((!::IsDBCSLeadByte('/')) 4276 && (!::IsDBCSLeadByte('\\')) 4277 && (!::IsDBCSLeadByte(':'))), 4278 "Illegal lead byte"); 4279 4280 /* Check for leading separators */ 4281 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4282 while (isfilesep(*src)) { 4283 src++; 4284 } 4285 4286 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4287 /* Remove leading separators if followed by drive specifier. This 4288 hack is necessary to support file URLs containing drive 4289 specifiers (e.g., "file://c:/path"). As a side effect, 4290 "/c:/path" can be used as an alternative to "c:/path". */ 4291 *dst++ = *src++; 4292 colon = dst; 4293 *dst++ = ':'; 4294 src++; 4295 } else { 4296 src = path; 4297 if (isfilesep(src[0]) && isfilesep(src[1])) { 4298 /* UNC pathname: Retain first separator; leave src pointed at 4299 second separator so that further separators will be collapsed 4300 into the second separator. The result will be a pathname 4301 beginning with "\\\\" followed (most likely) by a host name. */ 4302 src = dst = path + 1; 4303 path[0] = '\\'; /* Force first separator to '\\' */ 4304 } 4305 } 4306 4307 end = dst; 4308 4309 /* Remove redundant separators from remainder of path, forcing all 4310 separators to be '\\' rather than '/'. Also, single byte space 4311 characters are removed from the end of the path because those 4312 are not legal ending characters on this operating system. 4313 */ 4314 while (*src != '\0') { 4315 if (isfilesep(*src)) { 4316 *dst++ = '\\'; src++; 4317 while (isfilesep(*src)) src++; 4318 if (*src == '\0') { 4319 /* Check for trailing separator */ 4320 end = dst; 4321 if (colon == dst - 2) break; /* "z:\\" */ 4322 if (dst == path + 1) break; /* "\\" */ 4323 if (dst == path + 2 && isfilesep(path[0])) { 4324 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4325 beginning of a UNC pathname. Even though it is not, by 4326 itself, a valid UNC pathname, we leave it as is in order 4327 to be consistent with the path canonicalizer as well 4328 as the win32 APIs, which treat this case as an invalid 4329 UNC pathname rather than as an alias for the root 4330 directory of the current drive. */ 4331 break; 4332 } 4333 end = --dst; /* Path does not denote a root directory, so 4334 remove trailing separator */ 4335 break; 4336 } 4337 end = dst; 4338 } else { 4339 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4340 *dst++ = *src++; 4341 if (*src) *dst++ = *src++; 4342 end = dst; 4343 } else { /* Copy a single-byte character */ 4344 char c = *src++; 4345 *dst++ = c; 4346 /* Space is not a legal ending character */ 4347 if (c != ' ') end = dst; 4348 } 4349 } 4350 } 4351 4352 *end = '\0'; 4353 4354 /* For "z:", add "." to work around a bug in the C runtime library */ 4355 if (colon == dst - 1) { 4356 path[2] = '.'; 4357 path[3] = '\0'; 4358 } 4359 4360 return path; 4361 } 4362 4363 // This code is a copy of JDK's sysSetLength 4364 // from src/windows/hpi/src/sys_api_md.c 4365 4366 int os::ftruncate(int fd, jlong length) { 4367 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4368 long high = (long)(length >> 32); 4369 DWORD ret; 4370 4371 if (h == (HANDLE)(-1)) { 4372 return -1; 4373 } 4374 4375 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4376 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4377 return -1; 4378 } 4379 4380 if (::SetEndOfFile(h) == FALSE) { 4381 return -1; 4382 } 4383 4384 return 0; 4385 } 4386 4387 4388 // This code is a copy of JDK's sysSync 4389 // from src/windows/hpi/src/sys_api_md.c 4390 // except for the legacy workaround for a bug in Win 98 4391 4392 int os::fsync(int fd) { 4393 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4394 4395 if ((!::FlushFileBuffers(handle)) && 4396 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4397 /* from winerror.h */ 4398 return -1; 4399 } 4400 return 0; 4401 } 4402 4403 static int nonSeekAvailable(int, long *); 4404 static int stdinAvailable(int, long *); 4405 4406 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4407 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4408 4409 // This code is a copy of JDK's sysAvailable 4410 // from src/windows/hpi/src/sys_api_md.c 4411 4412 int os::available(int fd, jlong *bytes) { 4413 jlong cur, end; 4414 struct _stati64 stbuf64; 4415 4416 if (::_fstati64(fd, &stbuf64) >= 0) { 4417 int mode = stbuf64.st_mode; 4418 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4419 int ret; 4420 long lpbytes; 4421 if (fd == 0) { 4422 ret = stdinAvailable(fd, &lpbytes); 4423 } else { 4424 ret = nonSeekAvailable(fd, &lpbytes); 4425 } 4426 (*bytes) = (jlong)(lpbytes); 4427 return ret; 4428 } 4429 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4430 return FALSE; 4431 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4432 return FALSE; 4433 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4434 return FALSE; 4435 } 4436 *bytes = end - cur; 4437 return TRUE; 4438 } else { 4439 return FALSE; 4440 } 4441 } 4442 4443 // This code is a copy of JDK's nonSeekAvailable 4444 // from src/windows/hpi/src/sys_api_md.c 4445 4446 static int nonSeekAvailable(int fd, long *pbytes) { 4447 /* This is used for available on non-seekable devices 4448 * (like both named and anonymous pipes, such as pipes 4449 * connected to an exec'd process). 4450 * Standard Input is a special case. 4451 * 4452 */ 4453 HANDLE han; 4454 4455 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4456 return FALSE; 4457 } 4458 4459 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4460 /* PeekNamedPipe fails when at EOF. In that case we 4461 * simply make *pbytes = 0 which is consistent with the 4462 * behavior we get on Solaris when an fd is at EOF. 4463 * The only alternative is to raise an Exception, 4464 * which isn't really warranted. 4465 */ 4466 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4467 return FALSE; 4468 } 4469 *pbytes = 0; 4470 } 4471 return TRUE; 4472 } 4473 4474 #define MAX_INPUT_EVENTS 2000 4475 4476 // This code is a copy of JDK's stdinAvailable 4477 // from src/windows/hpi/src/sys_api_md.c 4478 4479 static int stdinAvailable(int fd, long *pbytes) { 4480 HANDLE han; 4481 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4482 DWORD numEvents = 0; /* Number of events in buffer */ 4483 DWORD i = 0; /* Loop index */ 4484 DWORD curLength = 0; /* Position marker */ 4485 DWORD actualLength = 0; /* Number of bytes readable */ 4486 BOOL error = FALSE; /* Error holder */ 4487 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4488 4489 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4490 return FALSE; 4491 } 4492 4493 /* Construct an array of input records in the console buffer */ 4494 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4495 if (error == 0) { 4496 return nonSeekAvailable(fd, pbytes); 4497 } 4498 4499 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4500 if (numEvents > MAX_INPUT_EVENTS) { 4501 numEvents = MAX_INPUT_EVENTS; 4502 } 4503 4504 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4505 if (lpBuffer == NULL) { 4506 return FALSE; 4507 } 4508 4509 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4510 if (error == 0) { 4511 os::free(lpBuffer, mtInternal); 4512 return FALSE; 4513 } 4514 4515 /* Examine input records for the number of bytes available */ 4516 for (i=0; i<numEvents; i++) { 4517 if (lpBuffer[i].EventType == KEY_EVENT) { 4518 4519 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4520 &(lpBuffer[i].Event); 4521 if (keyRecord->bKeyDown == TRUE) { 4522 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4523 curLength++; 4524 if (*keyPressed == '\r') { 4525 actualLength = curLength; 4526 } 4527 } 4528 } 4529 } 4530 4531 if (lpBuffer != NULL) { 4532 os::free(lpBuffer, mtInternal); 4533 } 4534 4535 *pbytes = (long) actualLength; 4536 return TRUE; 4537 } 4538 4539 // Map a block of memory. 4540 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4541 char *addr, size_t bytes, bool read_only, 4542 bool allow_exec) { 4543 HANDLE hFile; 4544 char* base; 4545 4546 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4547 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4548 if (hFile == NULL) { 4549 if (PrintMiscellaneous && Verbose) { 4550 DWORD err = GetLastError(); 4551 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4552 } 4553 return NULL; 4554 } 4555 4556 if (allow_exec) { 4557 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4558 // unless it comes from a PE image (which the shared archive is not.) 4559 // Even VirtualProtect refuses to give execute access to mapped memory 4560 // that was not previously executable. 4561 // 4562 // Instead, stick the executable region in anonymous memory. Yuck. 4563 // Penalty is that ~4 pages will not be shareable - in the future 4564 // we might consider DLLizing the shared archive with a proper PE 4565 // header so that mapping executable + sharing is possible. 4566 4567 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4568 PAGE_READWRITE); 4569 if (base == NULL) { 4570 if (PrintMiscellaneous && Verbose) { 4571 DWORD err = GetLastError(); 4572 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4573 } 4574 CloseHandle(hFile); 4575 return NULL; 4576 } 4577 4578 DWORD bytes_read; 4579 OVERLAPPED overlapped; 4580 overlapped.Offset = (DWORD)file_offset; 4581 overlapped.OffsetHigh = 0; 4582 overlapped.hEvent = NULL; 4583 // ReadFile guarantees that if the return value is true, the requested 4584 // number of bytes were read before returning. 4585 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4586 if (!res) { 4587 if (PrintMiscellaneous && Verbose) { 4588 DWORD err = GetLastError(); 4589 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4590 } 4591 release_memory(base, bytes); 4592 CloseHandle(hFile); 4593 return NULL; 4594 } 4595 } else { 4596 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4597 NULL /*file_name*/); 4598 if (hMap == NULL) { 4599 if (PrintMiscellaneous && Verbose) { 4600 DWORD err = GetLastError(); 4601 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4602 } 4603 CloseHandle(hFile); 4604 return NULL; 4605 } 4606 4607 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4608 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4609 (DWORD)bytes, addr); 4610 if (base == NULL) { 4611 if (PrintMiscellaneous && Verbose) { 4612 DWORD err = GetLastError(); 4613 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4614 } 4615 CloseHandle(hMap); 4616 CloseHandle(hFile); 4617 return NULL; 4618 } 4619 4620 if (CloseHandle(hMap) == 0) { 4621 if (PrintMiscellaneous && Verbose) { 4622 DWORD err = GetLastError(); 4623 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4624 } 4625 CloseHandle(hFile); 4626 return base; 4627 } 4628 } 4629 4630 if (allow_exec) { 4631 DWORD old_protect; 4632 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4633 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4634 4635 if (!res) { 4636 if (PrintMiscellaneous && Verbose) { 4637 DWORD err = GetLastError(); 4638 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4639 } 4640 // Don't consider this a hard error, on IA32 even if the 4641 // VirtualProtect fails, we should still be able to execute 4642 CloseHandle(hFile); 4643 return base; 4644 } 4645 } 4646 4647 if (CloseHandle(hFile) == 0) { 4648 if (PrintMiscellaneous && Verbose) { 4649 DWORD err = GetLastError(); 4650 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4651 } 4652 return base; 4653 } 4654 4655 return base; 4656 } 4657 4658 4659 // Remap a block of memory. 4660 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4661 char *addr, size_t bytes, bool read_only, 4662 bool allow_exec) { 4663 // This OS does not allow existing memory maps to be remapped so we 4664 // have to unmap the memory before we remap it. 4665 if (!os::unmap_memory(addr, bytes)) { 4666 return NULL; 4667 } 4668 4669 // There is a very small theoretical window between the unmap_memory() 4670 // call above and the map_memory() call below where a thread in native 4671 // code may be able to access an address that is no longer mapped. 4672 4673 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4674 read_only, allow_exec); 4675 } 4676 4677 4678 // Unmap a block of memory. 4679 // Returns true=success, otherwise false. 4680 4681 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4682 BOOL result = UnmapViewOfFile(addr); 4683 if (result == 0) { 4684 if (PrintMiscellaneous && Verbose) { 4685 DWORD err = GetLastError(); 4686 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4687 } 4688 return false; 4689 } 4690 return true; 4691 } 4692 4693 void os::pause() { 4694 char filename[MAX_PATH]; 4695 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4696 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4697 } else { 4698 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4699 } 4700 4701 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4702 if (fd != -1) { 4703 struct stat buf; 4704 ::close(fd); 4705 while (::stat(filename, &buf) == 0) { 4706 Sleep(100); 4707 } 4708 } else { 4709 jio_fprintf(stderr, 4710 "Could not open pause file '%s', continuing immediately.\n", filename); 4711 } 4712 } 4713 4714 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4715 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4716 } 4717 4718 /* 4719 * See the caveats for this class in os_windows.hpp 4720 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4721 * into this method and returns false. If no OS EXCEPTION was raised, returns 4722 * true. 4723 * The callback is supposed to provide the method that should be protected. 4724 */ 4725 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4726 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4727 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4728 "crash_protection already set?"); 4729 4730 bool success = true; 4731 __try { 4732 WatcherThread::watcher_thread()->set_crash_protection(this); 4733 cb.call(); 4734 } __except(EXCEPTION_EXECUTE_HANDLER) { 4735 // only for protection, nothing to do 4736 success = false; 4737 } 4738 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4739 return success; 4740 } 4741 4742 // An Event wraps a win32 "CreateEvent" kernel handle. 4743 // 4744 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4745 // 4746 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4747 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4748 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4749 // In addition, an unpark() operation might fetch the handle field, but the 4750 // event could recycle between the fetch and the SetEvent() operation. 4751 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4752 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4753 // on an stale but recycled handle would be harmless, but in practice this might 4754 // confuse other non-Sun code, so it's not a viable approach. 4755 // 4756 // 2: Once a win32 event handle is associated with an Event, it remains associated 4757 // with the Event. The event handle is never closed. This could be construed 4758 // as handle leakage, but only up to the maximum # of threads that have been extant 4759 // at any one time. This shouldn't be an issue, as windows platforms typically 4760 // permit a process to have hundreds of thousands of open handles. 4761 // 4762 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4763 // and release unused handles. 4764 // 4765 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4766 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4767 // 4768 // 5. Use an RCU-like mechanism (Read-Copy Update). 4769 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4770 // 4771 // We use (2). 4772 // 4773 // TODO-FIXME: 4774 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4775 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4776 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4777 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4778 // into a single win32 CreateEvent() handle. 4779 // 4780 // _Event transitions in park() 4781 // -1 => -1 : illegal 4782 // 1 => 0 : pass - return immediately 4783 // 0 => -1 : block 4784 // 4785 // _Event serves as a restricted-range semaphore : 4786 // -1 : thread is blocked 4787 // 0 : neutral - thread is running or ready 4788 // 1 : signaled - thread is running or ready 4789 // 4790 // Another possible encoding of _Event would be 4791 // with explicit "PARKED" and "SIGNALED" bits. 4792 4793 int os::PlatformEvent::park (jlong Millis) { 4794 guarantee(_ParkHandle != NULL , "Invariant"); 4795 guarantee(Millis > 0 , "Invariant"); 4796 int v; 4797 4798 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4799 // the initial park() operation. 4800 4801 for (;;) { 4802 v = _Event; 4803 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4804 } 4805 guarantee((v == 0) || (v == 1), "invariant"); 4806 if (v != 0) return OS_OK; 4807 4808 // Do this the hard way by blocking ... 4809 // TODO: consider a brief spin here, gated on the success of recent 4810 // spin attempts by this thread. 4811 // 4812 // We decompose long timeouts into series of shorter timed waits. 4813 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4814 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4815 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4816 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4817 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4818 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4819 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4820 // for the already waited time. This policy does not admit any new outcomes. 4821 // In the future, however, we might want to track the accumulated wait time and 4822 // adjust Millis accordingly if we encounter a spurious wakeup. 4823 4824 const int MAXTIMEOUT = 0x10000000; 4825 DWORD rv = WAIT_TIMEOUT; 4826 while (_Event < 0 && Millis > 0) { 4827 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4828 if (Millis > MAXTIMEOUT) { 4829 prd = MAXTIMEOUT; 4830 } 4831 rv = ::WaitForSingleObject(_ParkHandle, prd); 4832 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4833 if (rv == WAIT_TIMEOUT) { 4834 Millis -= prd; 4835 } 4836 } 4837 v = _Event; 4838 _Event = 0; 4839 // see comment at end of os::PlatformEvent::park() below: 4840 OrderAccess::fence(); 4841 // If we encounter a nearly simultanous timeout expiry and unpark() 4842 // we return OS_OK indicating we awoke via unpark(). 4843 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4844 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4845 } 4846 4847 void os::PlatformEvent::park() { 4848 guarantee(_ParkHandle != NULL, "Invariant"); 4849 // Invariant: Only the thread associated with the Event/PlatformEvent 4850 // may call park(). 4851 int v; 4852 for (;;) { 4853 v = _Event; 4854 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4855 } 4856 guarantee((v == 0) || (v == 1), "invariant"); 4857 if (v != 0) return; 4858 4859 // Do this the hard way by blocking ... 4860 // TODO: consider a brief spin here, gated on the success of recent 4861 // spin attempts by this thread. 4862 while (_Event < 0) { 4863 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4864 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4865 } 4866 4867 // Usually we'll find _Event == 0 at this point, but as 4868 // an optional optimization we clear it, just in case can 4869 // multiple unpark() operations drove _Event up to 1. 4870 _Event = 0; 4871 OrderAccess::fence(); 4872 guarantee(_Event >= 0, "invariant"); 4873 } 4874 4875 void os::PlatformEvent::unpark() { 4876 guarantee(_ParkHandle != NULL, "Invariant"); 4877 4878 // Transitions for _Event: 4879 // 0 :=> 1 4880 // 1 :=> 1 4881 // -1 :=> either 0 or 1; must signal target thread 4882 // That is, we can safely transition _Event from -1 to either 4883 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 4884 // unpark() calls. 4885 // See also: "Semaphores in Plan 9" by Mullender & Cox 4886 // 4887 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4888 // that it will take two back-to-back park() calls for the owning 4889 // thread to block. This has the benefit of forcing a spurious return 4890 // from the first park() call after an unpark() call which will help 4891 // shake out uses of park() and unpark() without condition variables. 4892 4893 if (Atomic::xchg(1, &_Event) >= 0) return; 4894 4895 ::SetEvent(_ParkHandle); 4896 } 4897 4898 4899 // JSR166 4900 // ------------------------------------------------------- 4901 4902 /* 4903 * The Windows implementation of Park is very straightforward: Basic 4904 * operations on Win32 Events turn out to have the right semantics to 4905 * use them directly. We opportunistically resuse the event inherited 4906 * from Monitor. 4907 */ 4908 4909 4910 void Parker::park(bool isAbsolute, jlong time) { 4911 guarantee(_ParkEvent != NULL, "invariant"); 4912 // First, demultiplex/decode time arguments 4913 if (time < 0) { // don't wait 4914 return; 4915 } 4916 else if (time == 0 && !isAbsolute) { 4917 time = INFINITE; 4918 } 4919 else if (isAbsolute) { 4920 time -= os::javaTimeMillis(); // convert to relative time 4921 if (time <= 0) // already elapsed 4922 return; 4923 } 4924 else { // relative 4925 time /= 1000000; // Must coarsen from nanos to millis 4926 if (time == 0) // Wait for the minimal time unit if zero 4927 time = 1; 4928 } 4929 4930 JavaThread* thread = (JavaThread*)(Thread::current()); 4931 assert(thread->is_Java_thread(), "Must be JavaThread"); 4932 JavaThread *jt = (JavaThread *)thread; 4933 4934 // Don't wait if interrupted or already triggered 4935 if (Thread::is_interrupted(thread, false) || 4936 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4937 ResetEvent(_ParkEvent); 4938 return; 4939 } 4940 else { 4941 ThreadBlockInVM tbivm(jt); 4942 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4943 jt->set_suspend_equivalent(); 4944 4945 WaitForSingleObject(_ParkEvent, time); 4946 ResetEvent(_ParkEvent); 4947 4948 // If externally suspended while waiting, re-suspend 4949 if (jt->handle_special_suspend_equivalent_condition()) { 4950 jt->java_suspend_self(); 4951 } 4952 } 4953 } 4954 4955 void Parker::unpark() { 4956 guarantee(_ParkEvent != NULL, "invariant"); 4957 SetEvent(_ParkEvent); 4958 } 4959 4960 // Run the specified command in a separate process. Return its exit value, 4961 // or -1 on failure (e.g. can't create a new process). 4962 int os::fork_and_exec(char* cmd) { 4963 STARTUPINFO si; 4964 PROCESS_INFORMATION pi; 4965 4966 memset(&si, 0, sizeof(si)); 4967 si.cb = sizeof(si); 4968 memset(&pi, 0, sizeof(pi)); 4969 BOOL rslt = CreateProcess(NULL, // executable name - use command line 4970 cmd, // command line 4971 NULL, // process security attribute 4972 NULL, // thread security attribute 4973 TRUE, // inherits system handles 4974 0, // no creation flags 4975 NULL, // use parent's environment block 4976 NULL, // use parent's starting directory 4977 &si, // (in) startup information 4978 &pi); // (out) process information 4979 4980 if (rslt) { 4981 // Wait until child process exits. 4982 WaitForSingleObject(pi.hProcess, INFINITE); 4983 4984 DWORD exit_code; 4985 GetExitCodeProcess(pi.hProcess, &exit_code); 4986 4987 // Close process and thread handles. 4988 CloseHandle(pi.hProcess); 4989 CloseHandle(pi.hThread); 4990 4991 return (int)exit_code; 4992 } else { 4993 return -1; 4994 } 4995 } 4996 4997 //-------------------------------------------------------------------------------------------------- 4998 // Non-product code 4999 5000 static int mallocDebugIntervalCounter = 0; 5001 static int mallocDebugCounter = 0; 5002 bool os::check_heap(bool force) { 5003 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5004 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5005 // Note: HeapValidate executes two hardware breakpoints when it finds something 5006 // wrong; at these points, eax contains the address of the offending block (I think). 5007 // To get to the exlicit error message(s) below, just continue twice. 5008 HANDLE heap = GetProcessHeap(); 5009 5010 // If we fail to lock the heap, then gflags.exe has been used 5011 // or some other special heap flag has been set that prevents 5012 // locking. We don't try to walk a heap we can't lock. 5013 if (HeapLock(heap) != 0) { 5014 PROCESS_HEAP_ENTRY phe; 5015 phe.lpData = NULL; 5016 while (HeapWalk(heap, &phe) != 0) { 5017 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5018 !HeapValidate(heap, 0, phe.lpData)) { 5019 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5020 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5021 fatal("corrupted C heap"); 5022 } 5023 } 5024 DWORD err = GetLastError(); 5025 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5026 fatal(err_msg("heap walk aborted with error %d", err)); 5027 } 5028 HeapUnlock(heap); 5029 } 5030 mallocDebugIntervalCounter = 0; 5031 } 5032 return true; 5033 } 5034 5035 5036 bool os::find(address addr, outputStream* st) { 5037 // Nothing yet 5038 return false; 5039 } 5040 5041 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5042 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5043 5044 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5045 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5046 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5047 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5048 5049 if (os::is_memory_serialize_page(thread, addr)) 5050 return EXCEPTION_CONTINUE_EXECUTION; 5051 } 5052 5053 return EXCEPTION_CONTINUE_SEARCH; 5054 } 5055 5056 // We don't build a headless jre for Windows 5057 bool os::is_headless_jre() { return false; } 5058 5059 static jint initSock() { 5060 WSADATA wsadata; 5061 5062 if (!os::WinSock2Dll::WinSock2Available()) { 5063 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5064 ::GetLastError()); 5065 return JNI_ERR; 5066 } 5067 5068 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5069 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5070 ::GetLastError()); 5071 return JNI_ERR; 5072 } 5073 return JNI_OK; 5074 } 5075 5076 struct hostent* os::get_host_by_name(char* name) { 5077 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5078 } 5079 5080 int os::socket_close(int fd) { 5081 return ::closesocket(fd); 5082 } 5083 5084 int os::socket_available(int fd, jint *pbytes) { 5085 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5086 return (ret < 0) ? 0 : 1; 5087 } 5088 5089 int os::socket(int domain, int type, int protocol) { 5090 return ::socket(domain, type, protocol); 5091 } 5092 5093 int os::listen(int fd, int count) { 5094 return ::listen(fd, count); 5095 } 5096 5097 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5098 return ::connect(fd, him, len); 5099 } 5100 5101 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5102 return ::accept(fd, him, len); 5103 } 5104 5105 int os::sendto(int fd, char* buf, size_t len, uint flags, 5106 struct sockaddr* to, socklen_t tolen) { 5107 5108 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5109 } 5110 5111 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5112 sockaddr* from, socklen_t* fromlen) { 5113 5114 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5115 } 5116 5117 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5118 return ::recv(fd, buf, (int)nBytes, flags); 5119 } 5120 5121 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5122 return ::send(fd, buf, (int)nBytes, flags); 5123 } 5124 5125 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5126 return ::send(fd, buf, (int)nBytes, flags); 5127 } 5128 5129 int os::timeout(int fd, long timeout) { 5130 fd_set tbl; 5131 struct timeval t; 5132 5133 t.tv_sec = timeout / 1000; 5134 t.tv_usec = (timeout % 1000) * 1000; 5135 5136 tbl.fd_count = 1; 5137 tbl.fd_array[0] = fd; 5138 5139 return ::select(1, &tbl, 0, 0, &t); 5140 } 5141 5142 int os::get_host_name(char* name, int namelen) { 5143 return ::gethostname(name, namelen); 5144 } 5145 5146 int os::socket_shutdown(int fd, int howto) { 5147 return ::shutdown(fd, howto); 5148 } 5149 5150 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5151 return ::bind(fd, him, len); 5152 } 5153 5154 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5155 return ::getsockname(fd, him, len); 5156 } 5157 5158 int os::get_sock_opt(int fd, int level, int optname, 5159 char* optval, socklen_t* optlen) { 5160 return ::getsockopt(fd, level, optname, optval, optlen); 5161 } 5162 5163 int os::set_sock_opt(int fd, int level, int optname, 5164 const char* optval, socklen_t optlen) { 5165 return ::setsockopt(fd, level, optname, optval, optlen); 5166 } 5167 5168 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5169 #if defined(IA32) 5170 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5171 #elif defined (AMD64) 5172 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5173 #endif 5174 5175 // returns true if thread could be suspended, 5176 // false otherwise 5177 static bool do_suspend(HANDLE* h) { 5178 if (h != NULL) { 5179 if (SuspendThread(*h) != ~0) { 5180 return true; 5181 } 5182 } 5183 return false; 5184 } 5185 5186 // resume the thread 5187 // calling resume on an active thread is a no-op 5188 static void do_resume(HANDLE* h) { 5189 if (h != NULL) { 5190 ResumeThread(*h); 5191 } 5192 } 5193 5194 // retrieve a suspend/resume context capable handle 5195 // from the tid. Caller validates handle return value. 5196 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5197 if (h != NULL) { 5198 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5199 } 5200 } 5201 5202 // 5203 // Thread sampling implementation 5204 // 5205 void os::SuspendedThreadTask::internal_do_task() { 5206 CONTEXT ctxt; 5207 HANDLE h = NULL; 5208 5209 // get context capable handle for thread 5210 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5211 5212 // sanity 5213 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5214 return; 5215 } 5216 5217 // suspend the thread 5218 if (do_suspend(&h)) { 5219 ctxt.ContextFlags = sampling_context_flags; 5220 // get thread context 5221 GetThreadContext(h, &ctxt); 5222 SuspendedThreadTaskContext context(_thread, &ctxt); 5223 // pass context to Thread Sampling impl 5224 do_task(context); 5225 // resume thread 5226 do_resume(&h); 5227 } 5228 5229 // close handle 5230 CloseHandle(h); 5231 } 5232 5233 5234 // Kernel32 API 5235 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5236 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5237 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5238 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5239 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5240 5241 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5242 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5243 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5244 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5245 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5246 5247 5248 BOOL os::Kernel32Dll::initialized = FALSE; 5249 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5250 assert(initialized && _GetLargePageMinimum != NULL, 5251 "GetLargePageMinimumAvailable() not yet called"); 5252 return _GetLargePageMinimum(); 5253 } 5254 5255 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5256 if (!initialized) { 5257 initialize(); 5258 } 5259 return _GetLargePageMinimum != NULL; 5260 } 5261 5262 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5263 if (!initialized) { 5264 initialize(); 5265 } 5266 return _VirtualAllocExNuma != NULL; 5267 } 5268 5269 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5270 assert(initialized && _VirtualAllocExNuma != NULL, 5271 "NUMACallsAvailable() not yet called"); 5272 5273 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5274 } 5275 5276 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5277 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5278 "NUMACallsAvailable() not yet called"); 5279 5280 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5281 } 5282 5283 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5284 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5285 "NUMACallsAvailable() not yet called"); 5286 5287 return _GetNumaNodeProcessorMask(node, proc_mask); 5288 } 5289 5290 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5291 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5292 if (!initialized) { 5293 initialize(); 5294 } 5295 5296 if (_RtlCaptureStackBackTrace != NULL) { 5297 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5298 BackTrace, BackTraceHash); 5299 } else { 5300 return 0; 5301 } 5302 } 5303 5304 void os::Kernel32Dll::initializeCommon() { 5305 if (!initialized) { 5306 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5307 assert(handle != NULL, "Just check"); 5308 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5309 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5310 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5311 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5312 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5313 initialized = TRUE; 5314 } 5315 } 5316 5317 5318 5319 #ifndef JDK6_OR_EARLIER 5320 5321 void os::Kernel32Dll::initialize() { 5322 initializeCommon(); 5323 } 5324 5325 5326 // Kernel32 API 5327 inline BOOL os::Kernel32Dll::SwitchToThread() { 5328 return ::SwitchToThread(); 5329 } 5330 5331 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5332 return true; 5333 } 5334 5335 // Help tools 5336 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5337 return true; 5338 } 5339 5340 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5341 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5342 } 5343 5344 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5345 return ::Module32First(hSnapshot, lpme); 5346 } 5347 5348 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5349 return ::Module32Next(hSnapshot, lpme); 5350 } 5351 5352 5353 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5354 return true; 5355 } 5356 5357 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5358 ::GetNativeSystemInfo(lpSystemInfo); 5359 } 5360 5361 // PSAPI API 5362 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5363 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5364 } 5365 5366 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5367 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5368 } 5369 5370 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5371 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5372 } 5373 5374 inline BOOL os::PSApiDll::PSApiAvailable() { 5375 return true; 5376 } 5377 5378 5379 // WinSock2 API 5380 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5381 return ::WSAStartup(wVersionRequested, lpWSAData); 5382 } 5383 5384 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5385 return ::gethostbyname(name); 5386 } 5387 5388 inline BOOL os::WinSock2Dll::WinSock2Available() { 5389 return true; 5390 } 5391 5392 // Advapi API 5393 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5394 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5395 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5396 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5397 BufferLength, PreviousState, ReturnLength); 5398 } 5399 5400 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5401 PHANDLE TokenHandle) { 5402 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5403 } 5404 5405 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5406 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5407 } 5408 5409 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5410 return true; 5411 } 5412 5413 void* os::get_default_process_handle() { 5414 return (void*)GetModuleHandle(NULL); 5415 } 5416 5417 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5418 // which is used to find statically linked in agents. 5419 // Additionally for windows, takes into account __stdcall names. 5420 // Parameters: 5421 // sym_name: Symbol in library we are looking for 5422 // lib_name: Name of library to look in, NULL for shared libs. 5423 // is_absolute_path == true if lib_name is absolute path to agent 5424 // such as "C:/a/b/L.dll" 5425 // == false if only the base name of the library is passed in 5426 // such as "L" 5427 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5428 bool is_absolute_path) { 5429 char *agent_entry_name; 5430 size_t len; 5431 size_t name_len; 5432 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5433 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5434 const char *start; 5435 5436 if (lib_name != NULL) { 5437 len = name_len = strlen(lib_name); 5438 if (is_absolute_path) { 5439 // Need to strip path, prefix and suffix 5440 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5441 lib_name = ++start; 5442 } else { 5443 // Need to check for drive prefix 5444 if ((start = strchr(lib_name, ':')) != NULL) { 5445 lib_name = ++start; 5446 } 5447 } 5448 if (len <= (prefix_len + suffix_len)) { 5449 return NULL; 5450 } 5451 lib_name += prefix_len; 5452 name_len = strlen(lib_name) - suffix_len; 5453 } 5454 } 5455 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5456 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5457 if (agent_entry_name == NULL) { 5458 return NULL; 5459 } 5460 if (lib_name != NULL) { 5461 const char *p = strrchr(sym_name, '@'); 5462 if (p != NULL && p != sym_name) { 5463 // sym_name == _Agent_OnLoad@XX 5464 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5465 agent_entry_name[(p-sym_name)] = '\0'; 5466 // agent_entry_name == _Agent_OnLoad 5467 strcat(agent_entry_name, "_"); 5468 strncat(agent_entry_name, lib_name, name_len); 5469 strcat(agent_entry_name, p); 5470 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5471 } else { 5472 strcpy(agent_entry_name, sym_name); 5473 strcat(agent_entry_name, "_"); 5474 strncat(agent_entry_name, lib_name, name_len); 5475 } 5476 } else { 5477 strcpy(agent_entry_name, sym_name); 5478 } 5479 return agent_entry_name; 5480 } 5481 5482 #else 5483 // Kernel32 API 5484 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5485 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5486 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5487 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5488 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5489 5490 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5491 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5492 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5493 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5494 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5495 5496 void os::Kernel32Dll::initialize() { 5497 if (!initialized) { 5498 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5499 assert(handle != NULL, "Just check"); 5500 5501 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5502 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5503 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5504 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5505 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5506 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5507 initializeCommon(); // resolve the functions that always need resolving 5508 5509 initialized = TRUE; 5510 } 5511 } 5512 5513 BOOL os::Kernel32Dll::SwitchToThread() { 5514 assert(initialized && _SwitchToThread != NULL, 5515 "SwitchToThreadAvailable() not yet called"); 5516 return _SwitchToThread(); 5517 } 5518 5519 5520 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5521 if (!initialized) { 5522 initialize(); 5523 } 5524 return _SwitchToThread != NULL; 5525 } 5526 5527 // Help tools 5528 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5529 if (!initialized) { 5530 initialize(); 5531 } 5532 return _CreateToolhelp32Snapshot != NULL && 5533 _Module32First != NULL && 5534 _Module32Next != NULL; 5535 } 5536 5537 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5538 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5539 "HelpToolsAvailable() not yet called"); 5540 5541 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5542 } 5543 5544 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5545 assert(initialized && _Module32First != NULL, 5546 "HelpToolsAvailable() not yet called"); 5547 5548 return _Module32First(hSnapshot, lpme); 5549 } 5550 5551 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5552 assert(initialized && _Module32Next != NULL, 5553 "HelpToolsAvailable() not yet called"); 5554 5555 return _Module32Next(hSnapshot, lpme); 5556 } 5557 5558 5559 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5560 if (!initialized) { 5561 initialize(); 5562 } 5563 return _GetNativeSystemInfo != NULL; 5564 } 5565 5566 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5567 assert(initialized && _GetNativeSystemInfo != NULL, 5568 "GetNativeSystemInfoAvailable() not yet called"); 5569 5570 _GetNativeSystemInfo(lpSystemInfo); 5571 } 5572 5573 // PSAPI API 5574 5575 5576 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5577 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5578 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5579 5580 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5581 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5582 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5583 BOOL os::PSApiDll::initialized = FALSE; 5584 5585 void os::PSApiDll::initialize() { 5586 if (!initialized) { 5587 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5588 if (handle != NULL) { 5589 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5590 "EnumProcessModules"); 5591 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5592 "GetModuleFileNameExA"); 5593 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5594 "GetModuleInformation"); 5595 } 5596 initialized = TRUE; 5597 } 5598 } 5599 5600 5601 5602 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5603 assert(initialized && _EnumProcessModules != NULL, 5604 "PSApiAvailable() not yet called"); 5605 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5606 } 5607 5608 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5609 assert(initialized && _GetModuleFileNameEx != NULL, 5610 "PSApiAvailable() not yet called"); 5611 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5612 } 5613 5614 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5615 assert(initialized && _GetModuleInformation != NULL, 5616 "PSApiAvailable() not yet called"); 5617 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5618 } 5619 5620 BOOL os::PSApiDll::PSApiAvailable() { 5621 if (!initialized) { 5622 initialize(); 5623 } 5624 return _EnumProcessModules != NULL && 5625 _GetModuleFileNameEx != NULL && 5626 _GetModuleInformation != NULL; 5627 } 5628 5629 5630 // WinSock2 API 5631 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5632 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5633 5634 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5635 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5636 BOOL os::WinSock2Dll::initialized = FALSE; 5637 5638 void os::WinSock2Dll::initialize() { 5639 if (!initialized) { 5640 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5641 if (handle != NULL) { 5642 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5643 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5644 } 5645 initialized = TRUE; 5646 } 5647 } 5648 5649 5650 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5651 assert(initialized && _WSAStartup != NULL, 5652 "WinSock2Available() not yet called"); 5653 return _WSAStartup(wVersionRequested, lpWSAData); 5654 } 5655 5656 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5657 assert(initialized && _gethostbyname != NULL, 5658 "WinSock2Available() not yet called"); 5659 return _gethostbyname(name); 5660 } 5661 5662 BOOL os::WinSock2Dll::WinSock2Available() { 5663 if (!initialized) { 5664 initialize(); 5665 } 5666 return _WSAStartup != NULL && 5667 _gethostbyname != NULL; 5668 } 5669 5670 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5671 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5672 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5673 5674 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5675 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5676 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5677 BOOL os::Advapi32Dll::initialized = FALSE; 5678 5679 void os::Advapi32Dll::initialize() { 5680 if (!initialized) { 5681 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5682 if (handle != NULL) { 5683 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5684 "AdjustTokenPrivileges"); 5685 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5686 "OpenProcessToken"); 5687 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5688 "LookupPrivilegeValueA"); 5689 } 5690 initialized = TRUE; 5691 } 5692 } 5693 5694 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5695 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5696 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5697 assert(initialized && _AdjustTokenPrivileges != NULL, 5698 "AdvapiAvailable() not yet called"); 5699 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5700 BufferLength, PreviousState, ReturnLength); 5701 } 5702 5703 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5704 PHANDLE TokenHandle) { 5705 assert(initialized && _OpenProcessToken != NULL, 5706 "AdvapiAvailable() not yet called"); 5707 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5708 } 5709 5710 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5711 assert(initialized && _LookupPrivilegeValue != NULL, 5712 "AdvapiAvailable() not yet called"); 5713 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5714 } 5715 5716 BOOL os::Advapi32Dll::AdvapiAvailable() { 5717 if (!initialized) { 5718 initialize(); 5719 } 5720 return _AdjustTokenPrivileges != NULL && 5721 _OpenProcessToken != NULL && 5722 _LookupPrivilegeValue != NULL; 5723 } 5724 5725 #endif 5726 5727 #ifndef PRODUCT 5728 5729 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5730 // contiguous memory block at a particular address. 5731 // The test first tries to find a good approximate address to allocate at by using the same 5732 // method to allocate some memory at any address. The test then tries to allocate memory in 5733 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5734 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5735 // the previously allocated memory is available for allocation. The only actual failure 5736 // that is reported is when the test tries to allocate at a particular location but gets a 5737 // different valid one. A NULL return value at this point is not considered an error but may 5738 // be legitimate. 5739 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5740 void TestReserveMemorySpecial_test() { 5741 if (!UseLargePages) { 5742 if (VerboseInternalVMTests) { 5743 gclog_or_tty->print("Skipping test because large pages are disabled"); 5744 } 5745 return; 5746 } 5747 // save current value of globals 5748 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5749 bool old_use_numa_interleaving = UseNUMAInterleaving; 5750 5751 // set globals to make sure we hit the correct code path 5752 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5753 5754 // do an allocation at an address selected by the OS to get a good one. 5755 const size_t large_allocation_size = os::large_page_size() * 4; 5756 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5757 if (result == NULL) { 5758 if (VerboseInternalVMTests) { 5759 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5760 large_allocation_size); 5761 } 5762 } else { 5763 os::release_memory_special(result, large_allocation_size); 5764 5765 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5766 // we managed to get it once. 5767 const size_t expected_allocation_size = os::large_page_size(); 5768 char* expected_location = result + os::large_page_size(); 5769 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5770 if (actual_location == NULL) { 5771 if (VerboseInternalVMTests) { 5772 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5773 expected_location, large_allocation_size); 5774 } 5775 } else { 5776 // release memory 5777 os::release_memory_special(actual_location, expected_allocation_size); 5778 // only now check, after releasing any memory to avoid any leaks. 5779 assert(actual_location == expected_location, 5780 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5781 expected_location, expected_allocation_size, actual_location)); 5782 } 5783 } 5784 5785 // restore globals 5786 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5787 UseNUMAInterleaving = old_use_numa_interleaving; 5788 } 5789 #endif // PRODUCT 5790