1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "services/attachListener.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/vmError.hpp" 74 75 #ifdef _DEBUG 76 #include <crtdbg.h> 77 #endif 78 79 80 #include <windows.h> 81 #include <sys/types.h> 82 #include <sys/stat.h> 83 #include <sys/timeb.h> 84 #include <objidl.h> 85 #include <shlobj.h> 86 87 #include <malloc.h> 88 #include <signal.h> 89 #include <direct.h> 90 #include <errno.h> 91 #include <fcntl.h> 92 #include <io.h> 93 #include <process.h> // For _beginthreadex(), _endthreadex() 94 #include <imagehlp.h> // For os::dll_address_to_function_name 95 /* for enumerating dll libraries */ 96 #include <vdmdbg.h> 97 98 // for timer info max values which include all bits 99 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 100 101 // For DLL loading/load error detection 102 // Values of PE COFF 103 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 104 #define IMAGE_FILE_SIGNATURE_LENGTH 4 105 106 static HANDLE main_process; 107 static HANDLE main_thread; 108 static int main_thread_id; 109 110 static FILETIME process_creation_time; 111 static FILETIME process_exit_time; 112 static FILETIME process_user_time; 113 static FILETIME process_kernel_time; 114 115 #ifdef _M_IA64 116 #define __CPU__ ia64 117 #elif _M_AMD64 118 #define __CPU__ amd64 119 #else 120 #define __CPU__ i486 121 #endif 122 123 // save DLL module handle, used by GetModuleFileName 124 125 HINSTANCE vm_lib_handle; 126 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 128 switch (reason) { 129 case DLL_PROCESS_ATTACH: 130 vm_lib_handle = hinst; 131 if (ForceTimeHighResolution) 132 timeBeginPeriod(1L); 133 break; 134 case DLL_PROCESS_DETACH: 135 if (ForceTimeHighResolution) 136 timeEndPeriod(1L); 137 138 // Workaround for issue when a custom launcher doesn't call 139 // DestroyJavaVM and NMT is trying to track memory when free is 140 // called from a static destructor 141 MemTracker::shutdown(); 142 143 break; 144 default: 145 break; 146 } 147 return true; 148 } 149 150 static inline double fileTimeAsDouble(FILETIME* time) { 151 const double high = (double) ((unsigned int) ~0); 152 const double split = 10000000.0; 153 double result = (time->dwLowDateTime / split) + 154 time->dwHighDateTime * (high/split); 155 return result; 156 } 157 158 // Implementation of os 159 160 bool os::getenv(const char* name, char* buffer, int len) { 161 int result = GetEnvironmentVariable(name, buffer, len); 162 return result > 0 && result < len; 163 } 164 165 bool os::unsetenv(const char* name) { 166 assert(name != NULL, "Null pointer"); 167 return (SetEnvironmentVariable(name, NULL) == TRUE); 168 } 169 170 // No setuid programs under Windows. 171 bool os::have_special_privileges() { 172 return false; 173 } 174 175 176 // This method is a periodic task to check for misbehaving JNI applications 177 // under CheckJNI, we can add any periodic checks here. 178 // For Windows at the moment does nothing 179 void os::run_periodic_checks() { 180 return; 181 } 182 183 // previous UnhandledExceptionFilter, if there is one 184 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 185 186 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 187 void os::init_system_properties_values() { 188 /* sysclasspath, java_home, dll_dir */ 189 { 190 char *home_path; 191 char *dll_path; 192 char *pslash; 193 char *bin = "\\bin"; 194 char home_dir[MAX_PATH]; 195 196 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 197 os::jvm_path(home_dir, sizeof(home_dir)); 198 // Found the full path to jvm.dll. 199 // Now cut the path to <java_home>/jre if we can. 200 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 201 pslash = strrchr(home_dir, '\\'); 202 if (pslash != NULL) { 203 *pslash = '\0'; /* get rid of \{client|server} */ 204 pslash = strrchr(home_dir, '\\'); 205 if (pslash != NULL) 206 *pslash = '\0'; /* get rid of \bin */ 207 } 208 } 209 210 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 211 if (home_path == NULL) 212 return; 213 strcpy(home_path, home_dir); 214 Arguments::set_java_home(home_path); 215 216 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 217 if (dll_path == NULL) 218 return; 219 strcpy(dll_path, home_dir); 220 strcat(dll_path, bin); 221 Arguments::set_dll_dir(dll_path); 222 223 if (!set_boot_path('\\', ';')) 224 return; 225 } 226 227 /* library_path */ 228 #define EXT_DIR "\\lib\\ext" 229 #define BIN_DIR "\\bin" 230 #define PACKAGE_DIR "\\Sun\\Java" 231 { 232 /* Win32 library search order (See the documentation for LoadLibrary): 233 * 234 * 1. The directory from which application is loaded. 235 * 2. The system wide Java Extensions directory (Java only) 236 * 3. System directory (GetSystemDirectory) 237 * 4. Windows directory (GetWindowsDirectory) 238 * 5. The PATH environment variable 239 * 6. The current directory 240 */ 241 242 char *library_path; 243 char tmp[MAX_PATH]; 244 char *path_str = ::getenv("PATH"); 245 246 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 247 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 248 249 library_path[0] = '\0'; 250 251 GetModuleFileName(NULL, tmp, sizeof(tmp)); 252 *(strrchr(tmp, '\\')) = '\0'; 253 strcat(library_path, tmp); 254 255 GetWindowsDirectory(tmp, sizeof(tmp)); 256 strcat(library_path, ";"); 257 strcat(library_path, tmp); 258 strcat(library_path, PACKAGE_DIR BIN_DIR); 259 260 GetSystemDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 264 GetWindowsDirectory(tmp, sizeof(tmp)); 265 strcat(library_path, ";"); 266 strcat(library_path, tmp); 267 268 if (path_str) { 269 strcat(library_path, ";"); 270 strcat(library_path, path_str); 271 } 272 273 strcat(library_path, ";."); 274 275 Arguments::set_library_path(library_path); 276 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 277 } 278 279 /* Default extensions directory */ 280 { 281 char path[MAX_PATH]; 282 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 283 GetWindowsDirectory(path, MAX_PATH); 284 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 285 path, PACKAGE_DIR, EXT_DIR); 286 Arguments::set_ext_dirs(buf); 287 } 288 #undef EXT_DIR 289 #undef BIN_DIR 290 #undef PACKAGE_DIR 291 292 /* Default endorsed standards directory. */ 293 { 294 #define ENDORSED_DIR "\\lib\\endorsed" 295 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 296 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 297 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 298 Arguments::set_endorsed_dirs(buf); 299 #undef ENDORSED_DIR 300 } 301 302 #ifndef _WIN64 303 // set our UnhandledExceptionFilter and save any previous one 304 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 305 #endif 306 307 // Done 308 return; 309 } 310 311 void os::breakpoint() { 312 DebugBreak(); 313 } 314 315 // Invoked from the BREAKPOINT Macro 316 extern "C" void breakpoint() { 317 os::breakpoint(); 318 } 319 320 /* 321 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 322 * So far, this method is only used by Native Memory Tracking, which is 323 * only supported on Windows XP or later. 324 */ 325 int os::get_native_stack(address* stack, int frames, int toSkip) { 326 #ifdef _NMT_NOINLINE_ 327 toSkip ++; 328 #endif 329 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 330 (PVOID*)stack, NULL); 331 for (int index = captured; index < frames; index ++) { 332 stack[index] = NULL; 333 } 334 return captured; 335 } 336 337 338 // os::current_stack_base() 339 // 340 // Returns the base of the stack, which is the stack's 341 // starting address. This function must be called 342 // while running on the stack of the thread being queried. 343 344 address os::current_stack_base() { 345 MEMORY_BASIC_INFORMATION minfo; 346 address stack_bottom; 347 size_t stack_size; 348 349 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 350 stack_bottom = (address)minfo.AllocationBase; 351 stack_size = minfo.RegionSize; 352 353 // Add up the sizes of all the regions with the same 354 // AllocationBase. 355 while (1) 356 { 357 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 358 if (stack_bottom == (address)minfo.AllocationBase) 359 stack_size += minfo.RegionSize; 360 else 361 break; 362 } 363 364 #ifdef _M_IA64 365 // IA64 has memory and register stacks 366 // 367 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 368 // at thread creation (1MB backing store growing upwards, 1MB memory stack 369 // growing downwards, 2MB summed up) 370 // 371 // ... 372 // ------- top of stack (high address) ----- 373 // | 374 // | 1MB 375 // | Backing Store (Register Stack) 376 // | 377 // | / \ 378 // | | 379 // | | 380 // | | 381 // ------------------------ stack base ----- 382 // | 1MB 383 // | Memory Stack 384 // | 385 // | | 386 // | | 387 // | | 388 // | \ / 389 // | 390 // ----- bottom of stack (low address) ----- 391 // ... 392 393 stack_size = stack_size / 2; 394 #endif 395 return stack_bottom + stack_size; 396 } 397 398 size_t os::current_stack_size() { 399 size_t sz; 400 MEMORY_BASIC_INFORMATION minfo; 401 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 402 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 403 return sz; 404 } 405 406 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 407 const struct tm* time_struct_ptr = localtime(clock); 408 if (time_struct_ptr != NULL) { 409 *res = *time_struct_ptr; 410 return res; 411 } 412 return NULL; 413 } 414 415 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 416 417 // Thread start routine for all new Java threads 418 static unsigned __stdcall java_start(Thread* thread) { 419 // Try to randomize the cache line index of hot stack frames. 420 // This helps when threads of the same stack traces evict each other's 421 // cache lines. The threads can be either from the same JVM instance, or 422 // from different JVM instances. The benefit is especially true for 423 // processors with hyperthreading technology. 424 static int counter = 0; 425 int pid = os::current_process_id(); 426 _alloca(((pid ^ counter++) & 7) * 128); 427 428 OSThread* osthr = thread->osthread(); 429 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 430 431 if (UseNUMA) { 432 int lgrp_id = os::numa_get_group_id(); 433 if (lgrp_id != -1) { 434 thread->set_lgrp_id(lgrp_id); 435 } 436 } 437 438 439 // Install a win32 structured exception handler around every thread created 440 // by VM, so VM can genrate error dump when an exception occurred in non- 441 // Java thread (e.g. VM thread). 442 __try { 443 thread->run(); 444 } __except(topLevelExceptionFilter( 445 (_EXCEPTION_POINTERS*)_exception_info())) { 446 // Nothing to do. 447 } 448 449 // One less thread is executing 450 // When the VMThread gets here, the main thread may have already exited 451 // which frees the CodeHeap containing the Atomic::add code 452 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 453 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 454 } 455 456 return 0; 457 } 458 459 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 460 // Allocate the OSThread object 461 OSThread* osthread = new OSThread(NULL, NULL); 462 if (osthread == NULL) return NULL; 463 464 // Initialize support for Java interrupts 465 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 466 if (interrupt_event == NULL) { 467 delete osthread; 468 return NULL; 469 } 470 osthread->set_interrupt_event(interrupt_event); 471 472 // Store info on the Win32 thread into the OSThread 473 osthread->set_thread_handle(thread_handle); 474 osthread->set_thread_id(thread_id); 475 476 if (UseNUMA) { 477 int lgrp_id = os::numa_get_group_id(); 478 if (lgrp_id != -1) { 479 thread->set_lgrp_id(lgrp_id); 480 } 481 } 482 483 // Initial thread state is INITIALIZED, not SUSPENDED 484 osthread->set_state(INITIALIZED); 485 486 return osthread; 487 } 488 489 490 bool os::create_attached_thread(JavaThread* thread) { 491 #ifdef ASSERT 492 thread->verify_not_published(); 493 #endif 494 HANDLE thread_h; 495 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 496 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 497 fatal("DuplicateHandle failed\n"); 498 } 499 OSThread* osthread = create_os_thread(thread, thread_h, 500 (int)current_thread_id()); 501 if (osthread == NULL) { 502 return false; 503 } 504 505 // Initial thread state is RUNNABLE 506 osthread->set_state(RUNNABLE); 507 508 thread->set_osthread(osthread); 509 return true; 510 } 511 512 bool os::create_main_thread(JavaThread* thread) { 513 #ifdef ASSERT 514 thread->verify_not_published(); 515 #endif 516 if (_starting_thread == NULL) { 517 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 518 if (_starting_thread == NULL) { 519 return false; 520 } 521 } 522 523 // The primordial thread is runnable from the start) 524 _starting_thread->set_state(RUNNABLE); 525 526 thread->set_osthread(_starting_thread); 527 return true; 528 } 529 530 // Allocate and initialize a new OSThread 531 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 532 unsigned thread_id; 533 534 // Allocate the OSThread object 535 OSThread* osthread = new OSThread(NULL, NULL); 536 if (osthread == NULL) { 537 return false; 538 } 539 540 // Initialize support for Java interrupts 541 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 542 if (interrupt_event == NULL) { 543 delete osthread; 544 return NULL; 545 } 546 osthread->set_interrupt_event(interrupt_event); 547 osthread->set_interrupted(false); 548 549 thread->set_osthread(osthread); 550 551 if (stack_size == 0) { 552 switch (thr_type) { 553 case os::java_thread: 554 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 555 if (JavaThread::stack_size_at_create() > 0) 556 stack_size = JavaThread::stack_size_at_create(); 557 break; 558 case os::compiler_thread: 559 if (CompilerThreadStackSize > 0) { 560 stack_size = (size_t)(CompilerThreadStackSize * K); 561 break; 562 } // else fall through: 563 // use VMThreadStackSize if CompilerThreadStackSize is not defined 564 case os::vm_thread: 565 case os::pgc_thread: 566 case os::cgc_thread: 567 case os::watcher_thread: 568 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 569 break; 570 } 571 } 572 573 // Create the Win32 thread 574 // 575 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 576 // does not specify stack size. Instead, it specifies the size of 577 // initially committed space. The stack size is determined by 578 // PE header in the executable. If the committed "stack_size" is larger 579 // than default value in the PE header, the stack is rounded up to the 580 // nearest multiple of 1MB. For example if the launcher has default 581 // stack size of 320k, specifying any size less than 320k does not 582 // affect the actual stack size at all, it only affects the initial 583 // commitment. On the other hand, specifying 'stack_size' larger than 584 // default value may cause significant increase in memory usage, because 585 // not only the stack space will be rounded up to MB, but also the 586 // entire space is committed upfront. 587 // 588 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 589 // for CreateThread() that can treat 'stack_size' as stack size. However we 590 // are not supposed to call CreateThread() directly according to MSDN 591 // document because JVM uses C runtime library. The good news is that the 592 // flag appears to work with _beginthredex() as well. 593 594 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 595 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 596 #endif 597 598 HANDLE thread_handle = 599 (HANDLE)_beginthreadex(NULL, 600 (unsigned)stack_size, 601 (unsigned (__stdcall *)(void*)) java_start, 602 thread, 603 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 604 &thread_id); 605 if (thread_handle == NULL) { 606 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 607 // without the flag. 608 thread_handle = 609 (HANDLE)_beginthreadex(NULL, 610 (unsigned)stack_size, 611 (unsigned (__stdcall *)(void*)) java_start, 612 thread, 613 CREATE_SUSPENDED, 614 &thread_id); 615 } 616 if (thread_handle == NULL) { 617 // Need to clean up stuff we've allocated so far 618 CloseHandle(osthread->interrupt_event()); 619 thread->set_osthread(NULL); 620 delete osthread; 621 return NULL; 622 } 623 624 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 625 626 // Store info on the Win32 thread into the OSThread 627 osthread->set_thread_handle(thread_handle); 628 osthread->set_thread_id(thread_id); 629 630 // Initial thread state is INITIALIZED, not SUSPENDED 631 osthread->set_state(INITIALIZED); 632 633 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 634 return true; 635 } 636 637 638 // Free Win32 resources related to the OSThread 639 void os::free_thread(OSThread* osthread) { 640 assert(osthread != NULL, "osthread not set"); 641 CloseHandle(osthread->thread_handle()); 642 CloseHandle(osthread->interrupt_event()); 643 delete osthread; 644 } 645 646 static jlong first_filetime; 647 static jlong initial_performance_count; 648 static jlong performance_frequency; 649 650 651 jlong as_long(LARGE_INTEGER x) { 652 jlong result = 0; // initialization to avoid warning 653 set_high(&result, x.HighPart); 654 set_low(&result, x.LowPart); 655 return result; 656 } 657 658 659 jlong os::elapsed_counter() { 660 LARGE_INTEGER count; 661 if (win32::_has_performance_count) { 662 QueryPerformanceCounter(&count); 663 return as_long(count) - initial_performance_count; 664 } else { 665 FILETIME wt; 666 GetSystemTimeAsFileTime(&wt); 667 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 668 } 669 } 670 671 672 jlong os::elapsed_frequency() { 673 if (win32::_has_performance_count) { 674 return performance_frequency; 675 } else { 676 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 677 return 10000000; 678 } 679 } 680 681 682 julong os::available_memory() { 683 return win32::available_memory(); 684 } 685 686 julong os::win32::available_memory() { 687 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 688 // value if total memory is larger than 4GB 689 MEMORYSTATUSEX ms; 690 ms.dwLength = sizeof(ms); 691 GlobalMemoryStatusEx(&ms); 692 693 return (julong)ms.ullAvailPhys; 694 } 695 696 julong os::physical_memory() { 697 return win32::physical_memory(); 698 } 699 700 bool os::has_allocatable_memory_limit(julong* limit) { 701 MEMORYSTATUSEX ms; 702 ms.dwLength = sizeof(ms); 703 GlobalMemoryStatusEx(&ms); 704 #ifdef _LP64 705 *limit = (julong)ms.ullAvailVirtual; 706 return true; 707 #else 708 // Limit to 1400m because of the 2gb address space wall 709 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 710 return true; 711 #endif 712 } 713 714 // VC6 lacks DWORD_PTR 715 #if _MSC_VER < 1300 716 typedef UINT_PTR DWORD_PTR; 717 #endif 718 719 int os::active_processor_count() { 720 DWORD_PTR lpProcessAffinityMask = 0; 721 DWORD_PTR lpSystemAffinityMask = 0; 722 int proc_count = processor_count(); 723 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 724 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 725 // Nof active processors is number of bits in process affinity mask 726 int bitcount = 0; 727 while (lpProcessAffinityMask != 0) { 728 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 729 bitcount++; 730 } 731 return bitcount; 732 } else { 733 return proc_count; 734 } 735 } 736 737 void os::set_native_thread_name(const char *name) { 738 // Not yet implemented. 739 return; 740 } 741 742 bool os::distribute_processes(uint length, uint* distribution) { 743 // Not yet implemented. 744 return false; 745 } 746 747 bool os::bind_to_processor(uint processor_id) { 748 // Not yet implemented. 749 return false; 750 } 751 752 void os::win32::initialize_performance_counter() { 753 LARGE_INTEGER count; 754 if (QueryPerformanceFrequency(&count)) { 755 win32::_has_performance_count = 1; 756 performance_frequency = as_long(count); 757 QueryPerformanceCounter(&count); 758 initial_performance_count = as_long(count); 759 } else { 760 win32::_has_performance_count = 0; 761 FILETIME wt; 762 GetSystemTimeAsFileTime(&wt); 763 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 764 } 765 } 766 767 768 double os::elapsedTime() { 769 return (double) elapsed_counter() / (double) elapsed_frequency(); 770 } 771 772 773 // Windows format: 774 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 775 // Java format: 776 // Java standards require the number of milliseconds since 1/1/1970 777 778 // Constant offset - calculated using offset() 779 static jlong _offset = 116444736000000000; 780 // Fake time counter for reproducible results when debugging 781 static jlong fake_time = 0; 782 783 #ifdef ASSERT 784 // Just to be safe, recalculate the offset in debug mode 785 static jlong _calculated_offset = 0; 786 static int _has_calculated_offset = 0; 787 788 jlong offset() { 789 if (_has_calculated_offset) return _calculated_offset; 790 SYSTEMTIME java_origin; 791 java_origin.wYear = 1970; 792 java_origin.wMonth = 1; 793 java_origin.wDayOfWeek = 0; // ignored 794 java_origin.wDay = 1; 795 java_origin.wHour = 0; 796 java_origin.wMinute = 0; 797 java_origin.wSecond = 0; 798 java_origin.wMilliseconds = 0; 799 FILETIME jot; 800 if (!SystemTimeToFileTime(&java_origin, &jot)) { 801 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 802 } 803 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 804 _has_calculated_offset = 1; 805 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 806 return _calculated_offset; 807 } 808 #else 809 jlong offset() { 810 return _offset; 811 } 812 #endif 813 814 jlong windows_to_java_time(FILETIME wt) { 815 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 816 return (a - offset()) / 10000; 817 } 818 819 FILETIME java_to_windows_time(jlong l) { 820 jlong a = (l * 10000) + offset(); 821 FILETIME result; 822 result.dwHighDateTime = high(a); 823 result.dwLowDateTime = low(a); 824 return result; 825 } 826 827 bool os::supports_vtime() { return true; } 828 bool os::enable_vtime() { return false; } 829 bool os::vtime_enabled() { return false; } 830 831 double os::elapsedVTime() { 832 FILETIME created; 833 FILETIME exited; 834 FILETIME kernel; 835 FILETIME user; 836 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 837 // the resolution of windows_to_java_time() should be sufficient (ms) 838 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 839 } else { 840 return elapsedTime(); 841 } 842 } 843 844 jlong os::javaTimeMillis() { 845 if (UseFakeTimers) { 846 return fake_time++; 847 } else { 848 FILETIME wt; 849 GetSystemTimeAsFileTime(&wt); 850 return windows_to_java_time(wt); 851 } 852 } 853 854 jlong os::javaTimeNanos() { 855 if (!win32::_has_performance_count) { 856 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 857 } else { 858 LARGE_INTEGER current_count; 859 QueryPerformanceCounter(¤t_count); 860 double current = as_long(current_count); 861 double freq = performance_frequency; 862 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 863 return time; 864 } 865 } 866 867 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 868 if (!win32::_has_performance_count) { 869 // javaTimeMillis() doesn't have much percision, 870 // but it is not going to wrap -- so all 64 bits 871 info_ptr->max_value = ALL_64_BITS; 872 873 // this is a wall clock timer, so may skip 874 info_ptr->may_skip_backward = true; 875 info_ptr->may_skip_forward = true; 876 } else { 877 jlong freq = performance_frequency; 878 if (freq < NANOSECS_PER_SEC) { 879 // the performance counter is 64 bits and we will 880 // be multiplying it -- so no wrap in 64 bits 881 info_ptr->max_value = ALL_64_BITS; 882 } else if (freq > NANOSECS_PER_SEC) { 883 // use the max value the counter can reach to 884 // determine the max value which could be returned 885 julong max_counter = (julong)ALL_64_BITS; 886 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 887 } else { 888 // the performance counter is 64 bits and we will 889 // be using it directly -- so no wrap in 64 bits 890 info_ptr->max_value = ALL_64_BITS; 891 } 892 893 // using a counter, so no skipping 894 info_ptr->may_skip_backward = false; 895 info_ptr->may_skip_forward = false; 896 } 897 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 898 } 899 900 char* os::local_time_string(char *buf, size_t buflen) { 901 SYSTEMTIME st; 902 GetLocalTime(&st); 903 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 904 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 905 return buf; 906 } 907 908 bool os::getTimesSecs(double* process_real_time, 909 double* process_user_time, 910 double* process_system_time) { 911 HANDLE h_process = GetCurrentProcess(); 912 FILETIME create_time, exit_time, kernel_time, user_time; 913 BOOL result = GetProcessTimes(h_process, 914 &create_time, 915 &exit_time, 916 &kernel_time, 917 &user_time); 918 if (result != 0) { 919 FILETIME wt; 920 GetSystemTimeAsFileTime(&wt); 921 jlong rtc_millis = windows_to_java_time(wt); 922 jlong user_millis = windows_to_java_time(user_time); 923 jlong system_millis = windows_to_java_time(kernel_time); 924 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 925 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 926 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 927 return true; 928 } else { 929 return false; 930 } 931 } 932 933 void os::shutdown() { 934 935 // allow PerfMemory to attempt cleanup of any persistent resources 936 perfMemory_exit(); 937 938 // flush buffered output, finish log files 939 ostream_abort(); 940 941 // Check for abort hook 942 abort_hook_t abort_hook = Arguments::abort_hook(); 943 if (abort_hook != NULL) { 944 abort_hook(); 945 } 946 } 947 948 949 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 950 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 951 952 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 953 HINSTANCE dbghelp; 954 EXCEPTION_POINTERS ep; 955 MINIDUMP_EXCEPTION_INFORMATION mei; 956 MINIDUMP_EXCEPTION_INFORMATION* pmei; 957 958 HANDLE hProcess = GetCurrentProcess(); 959 DWORD processId = GetCurrentProcessId(); 960 HANDLE dumpFile; 961 MINIDUMP_TYPE dumpType; 962 static const char* cwd; 963 964 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 965 #ifndef ASSERT 966 // If running on a client version of Windows and user has not explicitly enabled dumping 967 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 968 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 969 return; 970 // If running on a server version of Windows and user has explictly disabled dumping 971 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 972 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 973 return; 974 } 975 #else 976 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 977 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 978 return; 979 } 980 #endif 981 982 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 983 984 if (dbghelp == NULL) { 985 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 986 return; 987 } 988 989 _MiniDumpWriteDump = CAST_TO_FN_PTR( 990 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 991 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 992 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 993 994 if (_MiniDumpWriteDump == NULL) { 995 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 996 return; 997 } 998 999 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1000 1001 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1002 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1003 #if API_VERSION_NUMBER >= 11 1004 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1005 MiniDumpWithUnloadedModules); 1006 #endif 1007 1008 cwd = get_current_directory(NULL, 0); 1009 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id()); 1010 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1011 1012 if (dumpFile == INVALID_HANDLE_VALUE) { 1013 VMError::report_coredump_status("Failed to create file for dumping", false); 1014 return; 1015 } 1016 if (exceptionRecord != NULL && contextRecord != NULL) { 1017 ep.ContextRecord = (PCONTEXT) contextRecord; 1018 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1019 1020 mei.ThreadId = GetCurrentThreadId(); 1021 mei.ExceptionPointers = &ep; 1022 pmei = &mei; 1023 } else { 1024 pmei = NULL; 1025 } 1026 1027 1028 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1029 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1030 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1031 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1032 DWORD error = GetLastError(); 1033 LPTSTR msgbuf = NULL; 1034 1035 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1036 FORMAT_MESSAGE_FROM_SYSTEM | 1037 FORMAT_MESSAGE_IGNORE_INSERTS, 1038 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1039 1040 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1041 LocalFree(msgbuf); 1042 } else { 1043 // Call to FormatMessage failed, just include the result from GetLastError 1044 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1045 } 1046 VMError::report_coredump_status(buffer, false); 1047 } else { 1048 VMError::report_coredump_status(buffer, true); 1049 } 1050 1051 CloseHandle(dumpFile); 1052 } 1053 1054 1055 1056 void os::abort(bool dump_core) 1057 { 1058 os::shutdown(); 1059 // no core dump on Windows 1060 ::exit(1); 1061 } 1062 1063 // Die immediately, no exit hook, no abort hook, no cleanup. 1064 void os::die() { 1065 _exit(-1); 1066 } 1067 1068 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1069 // * dirent_md.c 1.15 00/02/02 1070 // 1071 // The declarations for DIR and struct dirent are in jvm_win32.h. 1072 1073 /* Caller must have already run dirname through JVM_NativePath, which removes 1074 duplicate slashes and converts all instances of '/' into '\\'. */ 1075 1076 DIR * 1077 os::opendir(const char *dirname) 1078 { 1079 assert(dirname != NULL, "just checking"); // hotspot change 1080 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1081 DWORD fattr; // hotspot change 1082 char alt_dirname[4] = { 0, 0, 0, 0 }; 1083 1084 if (dirp == 0) { 1085 errno = ENOMEM; 1086 return 0; 1087 } 1088 1089 /* 1090 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1091 * as a directory in FindFirstFile(). We detect this case here and 1092 * prepend the current drive name. 1093 */ 1094 if (dirname[1] == '\0' && dirname[0] == '\\') { 1095 alt_dirname[0] = _getdrive() + 'A' - 1; 1096 alt_dirname[1] = ':'; 1097 alt_dirname[2] = '\\'; 1098 alt_dirname[3] = '\0'; 1099 dirname = alt_dirname; 1100 } 1101 1102 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1103 if (dirp->path == 0) { 1104 free(dirp, mtInternal); 1105 errno = ENOMEM; 1106 return 0; 1107 } 1108 strcpy(dirp->path, dirname); 1109 1110 fattr = GetFileAttributes(dirp->path); 1111 if (fattr == 0xffffffff) { 1112 free(dirp->path, mtInternal); 1113 free(dirp, mtInternal); 1114 errno = ENOENT; 1115 return 0; 1116 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1117 free(dirp->path, mtInternal); 1118 free(dirp, mtInternal); 1119 errno = ENOTDIR; 1120 return 0; 1121 } 1122 1123 /* Append "*.*", or possibly "\\*.*", to path */ 1124 if (dirp->path[1] == ':' 1125 && (dirp->path[2] == '\0' 1126 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1127 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1128 strcat(dirp->path, "*.*"); 1129 } else { 1130 strcat(dirp->path, "\\*.*"); 1131 } 1132 1133 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1134 if (dirp->handle == INVALID_HANDLE_VALUE) { 1135 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1136 free(dirp->path, mtInternal); 1137 free(dirp, mtInternal); 1138 errno = EACCES; 1139 return 0; 1140 } 1141 } 1142 return dirp; 1143 } 1144 1145 /* parameter dbuf unused on Windows */ 1146 1147 struct dirent * 1148 os::readdir(DIR *dirp, dirent *dbuf) 1149 { 1150 assert(dirp != NULL, "just checking"); // hotspot change 1151 if (dirp->handle == INVALID_HANDLE_VALUE) { 1152 return 0; 1153 } 1154 1155 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1156 1157 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1158 if (GetLastError() == ERROR_INVALID_HANDLE) { 1159 errno = EBADF; 1160 return 0; 1161 } 1162 FindClose(dirp->handle); 1163 dirp->handle = INVALID_HANDLE_VALUE; 1164 } 1165 1166 return &dirp->dirent; 1167 } 1168 1169 int 1170 os::closedir(DIR *dirp) 1171 { 1172 assert(dirp != NULL, "just checking"); // hotspot change 1173 if (dirp->handle != INVALID_HANDLE_VALUE) { 1174 if (!FindClose(dirp->handle)) { 1175 errno = EBADF; 1176 return -1; 1177 } 1178 dirp->handle = INVALID_HANDLE_VALUE; 1179 } 1180 free(dirp->path, mtInternal); 1181 free(dirp, mtInternal); 1182 return 0; 1183 } 1184 1185 // This must be hard coded because it's the system's temporary 1186 // directory not the java application's temp directory, ala java.io.tmpdir. 1187 const char* os::get_temp_directory() { 1188 static char path_buf[MAX_PATH]; 1189 if (GetTempPath(MAX_PATH, path_buf)>0) 1190 return path_buf; 1191 else{ 1192 path_buf[0]='\0'; 1193 return path_buf; 1194 } 1195 } 1196 1197 static bool file_exists(const char* filename) { 1198 if (filename == NULL || strlen(filename) == 0) { 1199 return false; 1200 } 1201 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1202 } 1203 1204 bool os::dll_build_name(char *buffer, size_t buflen, 1205 const char* pname, const char* fname) { 1206 bool retval = false; 1207 const size_t pnamelen = pname ? strlen(pname) : 0; 1208 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1209 1210 // Return error on buffer overflow. 1211 if (pnamelen + strlen(fname) + 10 > buflen) { 1212 return retval; 1213 } 1214 1215 if (pnamelen == 0) { 1216 jio_snprintf(buffer, buflen, "%s.dll", fname); 1217 retval = true; 1218 } else if (c == ':' || c == '\\') { 1219 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1220 retval = true; 1221 } else if (strchr(pname, *os::path_separator()) != NULL) { 1222 int n; 1223 char** pelements = split_path(pname, &n); 1224 if (pelements == NULL) { 1225 return false; 1226 } 1227 for (int i = 0; i < n; i++) { 1228 char* path = pelements[i]; 1229 // Really shouldn't be NULL, but check can't hurt 1230 size_t plen = (path == NULL) ? 0 : strlen(path); 1231 if (plen == 0) { 1232 continue; // skip the empty path values 1233 } 1234 const char lastchar = path[plen - 1]; 1235 if (lastchar == ':' || lastchar == '\\') { 1236 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1237 } else { 1238 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1239 } 1240 if (file_exists(buffer)) { 1241 retval = true; 1242 break; 1243 } 1244 } 1245 // release the storage 1246 for (int i = 0; i < n; i++) { 1247 if (pelements[i] != NULL) { 1248 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1249 } 1250 } 1251 if (pelements != NULL) { 1252 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1253 } 1254 } else { 1255 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1256 retval = true; 1257 } 1258 return retval; 1259 } 1260 1261 // Needs to be in os specific directory because windows requires another 1262 // header file <direct.h> 1263 const char* os::get_current_directory(char *buf, size_t buflen) { 1264 int n = static_cast<int>(buflen); 1265 if (buflen > INT_MAX) n = INT_MAX; 1266 return _getcwd(buf, n); 1267 } 1268 1269 //----------------------------------------------------------- 1270 // Helper functions for fatal error handler 1271 #ifdef _WIN64 1272 // Helper routine which returns true if address in 1273 // within the NTDLL address space. 1274 // 1275 static bool _addr_in_ntdll( address addr ) 1276 { 1277 HMODULE hmod; 1278 MODULEINFO minfo; 1279 1280 hmod = GetModuleHandle("NTDLL.DLL"); 1281 if (hmod == NULL) return false; 1282 if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1283 &minfo, sizeof(MODULEINFO)) ) 1284 return false; 1285 1286 if ((addr >= minfo.lpBaseOfDll) && 1287 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1288 return true; 1289 else 1290 return false; 1291 } 1292 #endif 1293 1294 1295 // Enumerate all modules for a given process ID 1296 // 1297 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1298 // different API for doing this. We use PSAPI.DLL on NT based 1299 // Windows and ToolHelp on 95/98/Me. 1300 1301 // Callback function that is called by enumerate_modules() on 1302 // every DLL module. 1303 // Input parameters: 1304 // int pid, 1305 // char* module_file_name, 1306 // address module_base_addr, 1307 // unsigned module_size, 1308 // void* param 1309 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1310 1311 // enumerate_modules for Windows NT, using PSAPI 1312 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1313 { 1314 HANDLE hProcess; 1315 1316 # define MAX_NUM_MODULES 128 1317 HMODULE modules[MAX_NUM_MODULES]; 1318 static char filename[MAX_PATH]; 1319 int result = 0; 1320 1321 if (!os::PSApiDll::PSApiAvailable()) { 1322 return 0; 1323 } 1324 1325 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1326 FALSE, pid); 1327 if (hProcess == NULL) return 0; 1328 1329 DWORD size_needed; 1330 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1331 sizeof(modules), &size_needed)) { 1332 CloseHandle(hProcess); 1333 return 0; 1334 } 1335 1336 // number of modules that are currently loaded 1337 int num_modules = size_needed / sizeof(HMODULE); 1338 1339 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1340 // Get Full pathname: 1341 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1342 filename, sizeof(filename))) { 1343 filename[0] = '\0'; 1344 } 1345 1346 MODULEINFO modinfo; 1347 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1348 &modinfo, sizeof(modinfo))) { 1349 modinfo.lpBaseOfDll = NULL; 1350 modinfo.SizeOfImage = 0; 1351 } 1352 1353 // Invoke callback function 1354 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1355 modinfo.SizeOfImage, param); 1356 if (result) break; 1357 } 1358 1359 CloseHandle(hProcess); 1360 return result; 1361 } 1362 1363 1364 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1365 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1366 { 1367 HANDLE hSnapShot; 1368 static MODULEENTRY32 modentry; 1369 int result = 0; 1370 1371 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1372 return 0; 1373 } 1374 1375 // Get a handle to a Toolhelp snapshot of the system 1376 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid); 1377 if (hSnapShot == INVALID_HANDLE_VALUE) { 1378 return FALSE; 1379 } 1380 1381 // iterate through all modules 1382 modentry.dwSize = sizeof(MODULEENTRY32); 1383 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1384 1385 while (not_done) { 1386 // invoke the callback 1387 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1388 modentry.modBaseSize, param); 1389 if (result) break; 1390 1391 modentry.dwSize = sizeof(MODULEENTRY32); 1392 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1393 } 1394 1395 CloseHandle(hSnapShot); 1396 return result; 1397 } 1398 1399 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1400 { 1401 // Get current process ID if caller doesn't provide it. 1402 if (!pid) pid = os::current_process_id(); 1403 1404 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1405 else return _enumerate_modules_windows(pid, func, param); 1406 } 1407 1408 struct _modinfo { 1409 address addr; 1410 char* full_path; // point to a char buffer 1411 int buflen; // size of the buffer 1412 address base_addr; 1413 }; 1414 1415 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1416 unsigned size, void * param) { 1417 struct _modinfo *pmod = (struct _modinfo *)param; 1418 if (!pmod) return -1; 1419 1420 if (base_addr <= pmod->addr && 1421 base_addr+size > pmod->addr) { 1422 // if a buffer is provided, copy path name to the buffer 1423 if (pmod->full_path) { 1424 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1425 } 1426 pmod->base_addr = base_addr; 1427 return 1; 1428 } 1429 return 0; 1430 } 1431 1432 bool os::dll_address_to_library_name(address addr, char* buf, 1433 int buflen, int* offset) { 1434 // buf is not optional, but offset is optional 1435 assert(buf != NULL, "sanity check"); 1436 1437 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1438 // return the full path to the DLL file, sometimes it returns path 1439 // to the corresponding PDB file (debug info); sometimes it only 1440 // returns partial path, which makes life painful. 1441 1442 struct _modinfo mi; 1443 mi.addr = addr; 1444 mi.full_path = buf; 1445 mi.buflen = buflen; 1446 int pid = os::current_process_id(); 1447 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1448 // buf already contains path name 1449 if (offset) *offset = addr - mi.base_addr; 1450 return true; 1451 } 1452 1453 buf[0] = '\0'; 1454 if (offset) *offset = -1; 1455 return false; 1456 } 1457 1458 bool os::dll_address_to_function_name(address addr, char *buf, 1459 int buflen, int *offset) { 1460 // buf is not optional, but offset is optional 1461 assert(buf != NULL, "sanity check"); 1462 1463 if (Decoder::decode(addr, buf, buflen, offset)) { 1464 return true; 1465 } 1466 if (offset != NULL) *offset = -1; 1467 buf[0] = '\0'; 1468 return false; 1469 } 1470 1471 // save the start and end address of jvm.dll into param[0] and param[1] 1472 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1473 unsigned size, void * param) { 1474 if (!param) return -1; 1475 1476 if (base_addr <= (address)_locate_jvm_dll && 1477 base_addr+size > (address)_locate_jvm_dll) { 1478 ((address*)param)[0] = base_addr; 1479 ((address*)param)[1] = base_addr + size; 1480 return 1; 1481 } 1482 return 0; 1483 } 1484 1485 address vm_lib_location[2]; // start and end address of jvm.dll 1486 1487 // check if addr is inside jvm.dll 1488 bool os::address_is_in_vm(address addr) { 1489 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1490 int pid = os::current_process_id(); 1491 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1492 assert(false, "Can't find jvm module."); 1493 return false; 1494 } 1495 } 1496 1497 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1498 } 1499 1500 // print module info; param is outputStream* 1501 static int _print_module(int pid, char* fname, address base, 1502 unsigned size, void* param) { 1503 if (!param) return -1; 1504 1505 outputStream* st = (outputStream*)param; 1506 1507 address end_addr = base + size; 1508 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1509 return 0; 1510 } 1511 1512 // Loads .dll/.so and 1513 // in case of error it checks if .dll/.so was built for the 1514 // same architecture as Hotspot is running on 1515 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1516 { 1517 void * result = LoadLibrary(name); 1518 if (result != NULL) 1519 { 1520 return result; 1521 } 1522 1523 DWORD errcode = GetLastError(); 1524 if (errcode == ERROR_MOD_NOT_FOUND) { 1525 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1526 ebuf[ebuflen-1]='\0'; 1527 return NULL; 1528 } 1529 1530 // Parsing dll below 1531 // If we can read dll-info and find that dll was built 1532 // for an architecture other than Hotspot is running in 1533 // - then print to buffer "DLL was built for a different architecture" 1534 // else call os::lasterror to obtain system error message 1535 1536 // Read system error message into ebuf 1537 // It may or may not be overwritten below (in the for loop and just above) 1538 lasterror(ebuf, (size_t) ebuflen); 1539 ebuf[ebuflen-1]='\0'; 1540 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1541 if (file_descriptor<0) 1542 { 1543 return NULL; 1544 } 1545 1546 uint32_t signature_offset; 1547 uint16_t lib_arch=0; 1548 bool failed_to_get_lib_arch= 1549 ( 1550 //Go to position 3c in the dll 1551 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1552 || 1553 // Read loacation of signature 1554 (sizeof(signature_offset)!= 1555 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1556 || 1557 //Go to COFF File Header in dll 1558 //that is located after"signature" (4 bytes long) 1559 (os::seek_to_file_offset(file_descriptor, 1560 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1561 || 1562 //Read field that contains code of architecture 1563 // that dll was build for 1564 (sizeof(lib_arch)!= 1565 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1566 ); 1567 1568 ::close(file_descriptor); 1569 if (failed_to_get_lib_arch) 1570 { 1571 // file i/o error - report os::lasterror(...) msg 1572 return NULL; 1573 } 1574 1575 typedef struct 1576 { 1577 uint16_t arch_code; 1578 char* arch_name; 1579 } arch_t; 1580 1581 static const arch_t arch_array[]={ 1582 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1583 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1584 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1585 }; 1586 #if (defined _M_IA64) 1587 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1588 #elif (defined _M_AMD64) 1589 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1590 #elif (defined _M_IX86) 1591 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1592 #else 1593 #error Method os::dll_load requires that one of following \ 1594 is defined :_M_IA64,_M_AMD64 or _M_IX86 1595 #endif 1596 1597 1598 // Obtain a string for printf operation 1599 // lib_arch_str shall contain string what platform this .dll was built for 1600 // running_arch_str shall string contain what platform Hotspot was built for 1601 char *running_arch_str=NULL,*lib_arch_str=NULL; 1602 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1603 { 1604 if (lib_arch==arch_array[i].arch_code) 1605 lib_arch_str=arch_array[i].arch_name; 1606 if (running_arch==arch_array[i].arch_code) 1607 running_arch_str=arch_array[i].arch_name; 1608 } 1609 1610 assert(running_arch_str, 1611 "Didn't find runing architecture code in arch_array"); 1612 1613 // If the architure is right 1614 // but some other error took place - report os::lasterror(...) msg 1615 if (lib_arch == running_arch) 1616 { 1617 return NULL; 1618 } 1619 1620 if (lib_arch_str!=NULL) 1621 { 1622 ::_snprintf(ebuf, ebuflen-1, 1623 "Can't load %s-bit .dll on a %s-bit platform", 1624 lib_arch_str,running_arch_str); 1625 } 1626 else 1627 { 1628 // don't know what architecture this dll was build for 1629 ::_snprintf(ebuf, ebuflen-1, 1630 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1631 lib_arch,running_arch_str); 1632 } 1633 1634 return NULL; 1635 } 1636 1637 1638 void os::print_dll_info(outputStream *st) { 1639 int pid = os::current_process_id(); 1640 st->print_cr("Dynamic libraries:"); 1641 enumerate_modules(pid, _print_module, (void *)st); 1642 } 1643 1644 void os::print_os_info_brief(outputStream* st) { 1645 os::print_os_info(st); 1646 } 1647 1648 void os::print_os_info(outputStream* st) { 1649 st->print("OS:"); 1650 1651 os::win32::print_windows_version(st); 1652 } 1653 1654 void os::win32::print_windows_version(outputStream* st) { 1655 OSVERSIONINFOEX osvi; 1656 SYSTEM_INFO si; 1657 1658 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1659 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1660 1661 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1662 st->print_cr("N/A"); 1663 return; 1664 } 1665 1666 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; 1667 1668 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1669 if (os_vers >= 5002) { 1670 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1671 // find out whether we are running on 64 bit processor or not. 1672 if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) { 1673 os::Kernel32Dll::GetNativeSystemInfo(&si); 1674 } else { 1675 GetSystemInfo(&si); 1676 } 1677 } 1678 1679 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { 1680 switch (os_vers) { 1681 case 3051: st->print(" Windows NT 3.51"); break; 1682 case 4000: st->print(" Windows NT 4.0"); break; 1683 case 5000: st->print(" Windows 2000"); break; 1684 case 5001: st->print(" Windows XP"); break; 1685 case 5002: 1686 if (osvi.wProductType == VER_NT_WORKSTATION && 1687 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1688 st->print(" Windows XP x64 Edition"); 1689 } else { 1690 st->print(" Windows Server 2003 family"); 1691 } 1692 break; 1693 1694 case 6000: 1695 if (osvi.wProductType == VER_NT_WORKSTATION) { 1696 st->print(" Windows Vista"); 1697 } else { 1698 st->print(" Windows Server 2008"); 1699 } 1700 break; 1701 1702 case 6001: 1703 if (osvi.wProductType == VER_NT_WORKSTATION) { 1704 st->print(" Windows 7"); 1705 } else { 1706 st->print(" Windows Server 2008 R2"); 1707 } 1708 break; 1709 1710 case 6002: 1711 if (osvi.wProductType == VER_NT_WORKSTATION) { 1712 st->print(" Windows 8"); 1713 } else { 1714 st->print(" Windows Server 2012"); 1715 } 1716 break; 1717 1718 case 6003: 1719 if (osvi.wProductType == VER_NT_WORKSTATION) { 1720 st->print(" Windows 8.1"); 1721 } else { 1722 st->print(" Windows Server 2012 R2"); 1723 } 1724 break; 1725 1726 default: // future os 1727 // Unrecognized windows, print out its major and minor versions 1728 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1729 } 1730 } else { 1731 switch (os_vers) { 1732 case 4000: st->print(" Windows 95"); break; 1733 case 4010: st->print(" Windows 98"); break; 1734 case 4090: st->print(" Windows Me"); break; 1735 default: // future windows, print out its major and minor versions 1736 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1737 } 1738 } 1739 1740 if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1741 st->print(" , 64 bit"); 1742 } 1743 1744 st->print(" Build %d", osvi.dwBuildNumber); 1745 st->print(" %s", osvi.szCSDVersion); // service pack 1746 st->cr(); 1747 } 1748 1749 void os::pd_print_cpu_info(outputStream* st) { 1750 // Nothing to do for now. 1751 } 1752 1753 void os::print_memory_info(outputStream* st) { 1754 st->print("Memory:"); 1755 st->print(" %dk page", os::vm_page_size()>>10); 1756 1757 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1758 // value if total memory is larger than 4GB 1759 MEMORYSTATUSEX ms; 1760 ms.dwLength = sizeof(ms); 1761 GlobalMemoryStatusEx(&ms); 1762 1763 st->print(", physical %uk", os::physical_memory() >> 10); 1764 st->print("(%uk free)", os::available_memory() >> 10); 1765 1766 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1767 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1768 st->cr(); 1769 } 1770 1771 void os::print_siginfo(outputStream *st, void *siginfo) { 1772 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1773 st->print("siginfo:"); 1774 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1775 1776 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1777 er->NumberParameters >= 2) { 1778 switch (er->ExceptionInformation[0]) { 1779 case 0: st->print(", reading address"); break; 1780 case 1: st->print(", writing address"); break; 1781 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1782 er->ExceptionInformation[0]); 1783 } 1784 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1785 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1786 er->NumberParameters >= 2 && UseSharedSpaces) { 1787 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1788 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1789 st->print("\n\nError accessing class data sharing archive." \ 1790 " Mapped file inaccessible during execution, " \ 1791 " possible disk/network problem."); 1792 } 1793 } else { 1794 int num = er->NumberParameters; 1795 if (num > 0) { 1796 st->print(", ExceptionInformation="); 1797 for (int i = 0; i < num; i++) { 1798 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1799 } 1800 } 1801 } 1802 st->cr(); 1803 } 1804 1805 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1806 // do nothing 1807 } 1808 1809 static char saved_jvm_path[MAX_PATH] = {0}; 1810 1811 // Find the full path to the current module, jvm.dll 1812 void os::jvm_path(char *buf, jint buflen) { 1813 // Error checking. 1814 if (buflen < MAX_PATH) { 1815 assert(false, "must use a large-enough buffer"); 1816 buf[0] = '\0'; 1817 return; 1818 } 1819 // Lazy resolve the path to current module. 1820 if (saved_jvm_path[0] != 0) { 1821 strcpy(buf, saved_jvm_path); 1822 return; 1823 } 1824 1825 buf[0] = '\0'; 1826 if (Arguments::sun_java_launcher_is_altjvm()) { 1827 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1828 // for a JAVA_HOME environment variable and fix up the path so it 1829 // looks like jvm.dll is installed there (append a fake suffix 1830 // hotspot/jvm.dll). 1831 char* java_home_var = ::getenv("JAVA_HOME"); 1832 if (java_home_var != NULL && java_home_var[0] != 0 && 1833 strlen(java_home_var) < (size_t)buflen) { 1834 1835 strncpy(buf, java_home_var, buflen); 1836 1837 // determine if this is a legacy image or modules image 1838 // modules image doesn't have "jre" subdirectory 1839 size_t len = strlen(buf); 1840 char* jrebin_p = buf + len; 1841 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1842 if (0 != _access(buf, 0)) { 1843 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1844 } 1845 len = strlen(buf); 1846 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1847 } 1848 } 1849 1850 if (buf[0] == '\0') { 1851 GetModuleFileName(vm_lib_handle, buf, buflen); 1852 } 1853 strncpy(saved_jvm_path, buf, MAX_PATH); 1854 } 1855 1856 1857 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1858 #ifndef _WIN64 1859 st->print("_"); 1860 #endif 1861 } 1862 1863 1864 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1865 #ifndef _WIN64 1866 st->print("@%d", args_size * sizeof(int)); 1867 #endif 1868 } 1869 1870 // This method is a copy of JDK's sysGetLastErrorString 1871 // from src/windows/hpi/src/system_md.c 1872 1873 size_t os::lasterror(char* buf, size_t len) { 1874 DWORD errval; 1875 1876 if ((errval = GetLastError()) != 0) { 1877 // DOS error 1878 size_t n = (size_t)FormatMessage( 1879 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1880 NULL, 1881 errval, 1882 0, 1883 buf, 1884 (DWORD)len, 1885 NULL); 1886 if (n > 3) { 1887 // Drop final '.', CR, LF 1888 if (buf[n - 1] == '\n') n--; 1889 if (buf[n - 1] == '\r') n--; 1890 if (buf[n - 1] == '.') n--; 1891 buf[n] = '\0'; 1892 } 1893 return n; 1894 } 1895 1896 if (errno != 0) { 1897 // C runtime error that has no corresponding DOS error code 1898 const char* s = strerror(errno); 1899 size_t n = strlen(s); 1900 if (n >= len) n = len - 1; 1901 strncpy(buf, s, n); 1902 buf[n] = '\0'; 1903 return n; 1904 } 1905 1906 return 0; 1907 } 1908 1909 int os::get_last_error() { 1910 DWORD error = GetLastError(); 1911 if (error == 0) 1912 error = errno; 1913 return (int)error; 1914 } 1915 1916 // sun.misc.Signal 1917 // NOTE that this is a workaround for an apparent kernel bug where if 1918 // a signal handler for SIGBREAK is installed then that signal handler 1919 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1920 // See bug 4416763. 1921 static void (*sigbreakHandler)(int) = NULL; 1922 1923 static void UserHandler(int sig, void *siginfo, void *context) { 1924 os::signal_notify(sig); 1925 // We need to reinstate the signal handler each time... 1926 os::signal(sig, (void*)UserHandler); 1927 } 1928 1929 void* os::user_handler() { 1930 return (void*) UserHandler; 1931 } 1932 1933 void* os::signal(int signal_number, void* handler) { 1934 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1935 void (*oldHandler)(int) = sigbreakHandler; 1936 sigbreakHandler = (void (*)(int)) handler; 1937 return (void*) oldHandler; 1938 } else { 1939 return (void*)::signal(signal_number, (void (*)(int))handler); 1940 } 1941 } 1942 1943 void os::signal_raise(int signal_number) { 1944 raise(signal_number); 1945 } 1946 1947 // The Win32 C runtime library maps all console control events other than ^C 1948 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1949 // logoff, and shutdown events. We therefore install our own console handler 1950 // that raises SIGTERM for the latter cases. 1951 // 1952 static BOOL WINAPI consoleHandler(DWORD event) { 1953 switch (event) { 1954 case CTRL_C_EVENT: 1955 if (is_error_reported()) { 1956 // Ctrl-C is pressed during error reporting, likely because the error 1957 // handler fails to abort. Let VM die immediately. 1958 os::die(); 1959 } 1960 1961 os::signal_raise(SIGINT); 1962 return TRUE; 1963 break; 1964 case CTRL_BREAK_EVENT: 1965 if (sigbreakHandler != NULL) { 1966 (*sigbreakHandler)(SIGBREAK); 1967 } 1968 return TRUE; 1969 break; 1970 case CTRL_LOGOFF_EVENT: { 1971 // Don't terminate JVM if it is running in a non-interactive session, 1972 // such as a service process. 1973 USEROBJECTFLAGS flags; 1974 HANDLE handle = GetProcessWindowStation(); 1975 if (handle != NULL && 1976 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1977 sizeof(USEROBJECTFLAGS), NULL)) { 1978 // If it is a non-interactive session, let next handler to deal 1979 // with it. 1980 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1981 return FALSE; 1982 } 1983 } 1984 } 1985 case CTRL_CLOSE_EVENT: 1986 case CTRL_SHUTDOWN_EVENT: 1987 os::signal_raise(SIGTERM); 1988 return TRUE; 1989 break; 1990 default: 1991 break; 1992 } 1993 return FALSE; 1994 } 1995 1996 /* 1997 * The following code is moved from os.cpp for making this 1998 * code platform specific, which it is by its very nature. 1999 */ 2000 2001 // Return maximum OS signal used + 1 for internal use only 2002 // Used as exit signal for signal_thread 2003 int os::sigexitnum_pd() { 2004 return NSIG; 2005 } 2006 2007 // a counter for each possible signal value, including signal_thread exit signal 2008 static volatile jint pending_signals[NSIG+1] = { 0 }; 2009 static HANDLE sig_sem = NULL; 2010 2011 void os::signal_init_pd() { 2012 // Initialize signal structures 2013 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2014 2015 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2016 2017 // Programs embedding the VM do not want it to attempt to receive 2018 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2019 // shutdown hooks mechanism introduced in 1.3. For example, when 2020 // the VM is run as part of a Windows NT service (i.e., a servlet 2021 // engine in a web server), the correct behavior is for any console 2022 // control handler to return FALSE, not TRUE, because the OS's 2023 // "final" handler for such events allows the process to continue if 2024 // it is a service (while terminating it if it is not a service). 2025 // To make this behavior uniform and the mechanism simpler, we 2026 // completely disable the VM's usage of these console events if -Xrs 2027 // (=ReduceSignalUsage) is specified. This means, for example, that 2028 // the CTRL-BREAK thread dump mechanism is also disabled in this 2029 // case. See bugs 4323062, 4345157, and related bugs. 2030 2031 if (!ReduceSignalUsage) { 2032 // Add a CTRL-C handler 2033 SetConsoleCtrlHandler(consoleHandler, TRUE); 2034 } 2035 } 2036 2037 void os::signal_notify(int signal_number) { 2038 BOOL ret; 2039 if (sig_sem != NULL) { 2040 Atomic::inc(&pending_signals[signal_number]); 2041 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2042 assert(ret != 0, "ReleaseSemaphore() failed"); 2043 } 2044 } 2045 2046 static int check_pending_signals(bool wait_for_signal) { 2047 DWORD ret; 2048 while (true) { 2049 for (int i = 0; i < NSIG + 1; i++) { 2050 jint n = pending_signals[i]; 2051 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2052 return i; 2053 } 2054 } 2055 if (!wait_for_signal) { 2056 return -1; 2057 } 2058 2059 JavaThread *thread = JavaThread::current(); 2060 2061 ThreadBlockInVM tbivm(thread); 2062 2063 bool threadIsSuspended; 2064 do { 2065 thread->set_suspend_equivalent(); 2066 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2067 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2068 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2069 2070 // were we externally suspended while we were waiting? 2071 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2072 if (threadIsSuspended) { 2073 // 2074 // The semaphore has been incremented, but while we were waiting 2075 // another thread suspended us. We don't want to continue running 2076 // while suspended because that would surprise the thread that 2077 // suspended us. 2078 // 2079 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2080 assert(ret != 0, "ReleaseSemaphore() failed"); 2081 2082 thread->java_suspend_self(); 2083 } 2084 } while (threadIsSuspended); 2085 } 2086 } 2087 2088 int os::signal_lookup() { 2089 return check_pending_signals(false); 2090 } 2091 2092 int os::signal_wait() { 2093 return check_pending_signals(true); 2094 } 2095 2096 // Implicit OS exception handling 2097 2098 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2099 JavaThread* thread = JavaThread::current(); 2100 // Save pc in thread 2101 #ifdef _M_IA64 2102 // Do not blow up if no thread info available. 2103 if (thread) { 2104 // Saving PRECISE pc (with slot information) in thread. 2105 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2106 // Convert precise PC into "Unix" format 2107 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2108 thread->set_saved_exception_pc((address)precise_pc); 2109 } 2110 // Set pc to handler 2111 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2112 // Clear out psr.ri (= Restart Instruction) in order to continue 2113 // at the beginning of the target bundle. 2114 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2115 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2116 #elif _M_AMD64 2117 // Do not blow up if no thread info available. 2118 if (thread) { 2119 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2120 } 2121 // Set pc to handler 2122 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2123 #else 2124 // Do not blow up if no thread info available. 2125 if (thread) { 2126 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2127 } 2128 // Set pc to handler 2129 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2130 #endif 2131 2132 // Continue the execution 2133 return EXCEPTION_CONTINUE_EXECUTION; 2134 } 2135 2136 2137 // Used for PostMortemDump 2138 extern "C" void safepoints(); 2139 extern "C" void find(int x); 2140 extern "C" void events(); 2141 2142 // According to Windows API documentation, an illegal instruction sequence should generate 2143 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2144 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2145 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2146 2147 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2148 2149 // From "Execution Protection in the Windows Operating System" draft 0.35 2150 // Once a system header becomes available, the "real" define should be 2151 // included or copied here. 2152 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2153 2154 // Handle NAT Bit consumption on IA64. 2155 #ifdef _M_IA64 2156 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2157 #endif 2158 2159 // Windows Vista/2008 heap corruption check 2160 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2161 2162 #define def_excpt(val) #val, val 2163 2164 struct siglabel { 2165 char *name; 2166 int number; 2167 }; 2168 2169 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2170 // C++ compiler contain this error code. Because this is a compiler-generated 2171 // error, the code is not listed in the Win32 API header files. 2172 // The code is actually a cryptic mnemonic device, with the initial "E" 2173 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2174 // ASCII values of "msc". 2175 2176 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2177 2178 2179 struct siglabel exceptlabels[] = { 2180 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2181 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2182 def_excpt(EXCEPTION_BREAKPOINT), 2183 def_excpt(EXCEPTION_SINGLE_STEP), 2184 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2185 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2186 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2187 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2188 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2189 def_excpt(EXCEPTION_FLT_OVERFLOW), 2190 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2191 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2192 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2193 def_excpt(EXCEPTION_INT_OVERFLOW), 2194 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2195 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2196 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2197 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2198 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2199 def_excpt(EXCEPTION_STACK_OVERFLOW), 2200 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2201 def_excpt(EXCEPTION_GUARD_PAGE), 2202 def_excpt(EXCEPTION_INVALID_HANDLE), 2203 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2204 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2205 #ifdef _M_IA64 2206 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2207 #endif 2208 NULL, 0 2209 }; 2210 2211 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2212 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2213 if (exceptlabels[i].number == exception_code) { 2214 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2215 return buf; 2216 } 2217 } 2218 2219 return NULL; 2220 } 2221 2222 //----------------------------------------------------------------------------- 2223 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2224 // handle exception caused by idiv; should only happen for -MinInt/-1 2225 // (division by zero is handled explicitly) 2226 #ifdef _M_IA64 2227 assert(0, "Fix Handle_IDiv_Exception"); 2228 #elif _M_AMD64 2229 PCONTEXT ctx = exceptionInfo->ContextRecord; 2230 address pc = (address)ctx->Rip; 2231 assert(pc[0] == 0xF7, "not an idiv opcode"); 2232 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2233 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2234 // set correct result values and continue after idiv instruction 2235 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2236 ctx->Rax = (DWORD)min_jint; // result 2237 ctx->Rdx = (DWORD)0; // remainder 2238 // Continue the execution 2239 #else 2240 PCONTEXT ctx = exceptionInfo->ContextRecord; 2241 address pc = (address)ctx->Eip; 2242 assert(pc[0] == 0xF7, "not an idiv opcode"); 2243 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2244 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2245 // set correct result values and continue after idiv instruction 2246 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2247 ctx->Eax = (DWORD)min_jint; // result 2248 ctx->Edx = (DWORD)0; // remainder 2249 // Continue the execution 2250 #endif 2251 return EXCEPTION_CONTINUE_EXECUTION; 2252 } 2253 2254 //----------------------------------------------------------------------------- 2255 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2256 PCONTEXT ctx = exceptionInfo->ContextRecord; 2257 #ifndef _WIN64 2258 // handle exception caused by native method modifying control word 2259 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2260 2261 switch (exception_code) { 2262 case EXCEPTION_FLT_DENORMAL_OPERAND: 2263 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2264 case EXCEPTION_FLT_INEXACT_RESULT: 2265 case EXCEPTION_FLT_INVALID_OPERATION: 2266 case EXCEPTION_FLT_OVERFLOW: 2267 case EXCEPTION_FLT_STACK_CHECK: 2268 case EXCEPTION_FLT_UNDERFLOW: 2269 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2270 if (fp_control_word != ctx->FloatSave.ControlWord) { 2271 // Restore FPCW and mask out FLT exceptions 2272 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2273 // Mask out pending FLT exceptions 2274 ctx->FloatSave.StatusWord &= 0xffffff00; 2275 return EXCEPTION_CONTINUE_EXECUTION; 2276 } 2277 } 2278 2279 if (prev_uef_handler != NULL) { 2280 // We didn't handle this exception so pass it to the previous 2281 // UnhandledExceptionFilter. 2282 return (prev_uef_handler)(exceptionInfo); 2283 } 2284 #else // !_WIN64 2285 /* 2286 On Windows, the mxcsr control bits are non-volatile across calls 2287 See also CR 6192333 2288 */ 2289 jint MxCsr = INITIAL_MXCSR; 2290 // we can't use StubRoutines::addr_mxcsr_std() 2291 // because in Win64 mxcsr is not saved there 2292 if (MxCsr != ctx->MxCsr) { 2293 ctx->MxCsr = MxCsr; 2294 return EXCEPTION_CONTINUE_EXECUTION; 2295 } 2296 #endif // !_WIN64 2297 2298 return EXCEPTION_CONTINUE_SEARCH; 2299 } 2300 2301 static inline void report_error(Thread* t, DWORD exception_code, 2302 address addr, void* siginfo, void* context) { 2303 VMError err(t, exception_code, addr, siginfo, context); 2304 err.report_and_die(); 2305 2306 // If UseOsErrorReporting, this will return here and save the error file 2307 // somewhere where we can find it in the minidump. 2308 } 2309 2310 //----------------------------------------------------------------------------- 2311 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2312 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2313 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2314 #ifdef _M_IA64 2315 // On Itanium, we need the "precise pc", which has the slot number coded 2316 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2317 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2318 // Convert the pc to "Unix format", which has the slot number coded 2319 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2320 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2321 // information is saved in the Unix format. 2322 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2323 #elif _M_AMD64 2324 address pc = (address) exceptionInfo->ContextRecord->Rip; 2325 #else 2326 address pc = (address) exceptionInfo->ContextRecord->Eip; 2327 #endif 2328 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2329 2330 // Handle SafeFetch32 and SafeFetchN exceptions. 2331 if (StubRoutines::is_safefetch_fault(pc)) { 2332 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2333 } 2334 2335 #ifndef _WIN64 2336 // Execution protection violation - win32 running on AMD64 only 2337 // Handled first to avoid misdiagnosis as a "normal" access violation; 2338 // This is safe to do because we have a new/unique ExceptionInformation 2339 // code for this condition. 2340 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2341 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2342 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2343 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2344 2345 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2346 int page_size = os::vm_page_size(); 2347 2348 // Make sure the pc and the faulting address are sane. 2349 // 2350 // If an instruction spans a page boundary, and the page containing 2351 // the beginning of the instruction is executable but the following 2352 // page is not, the pc and the faulting address might be slightly 2353 // different - we still want to unguard the 2nd page in this case. 2354 // 2355 // 15 bytes seems to be a (very) safe value for max instruction size. 2356 bool pc_is_near_addr = 2357 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2358 bool instr_spans_page_boundary = 2359 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2360 (intptr_t) page_size) > 0); 2361 2362 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2363 static volatile address last_addr = 2364 (address) os::non_memory_address_word(); 2365 2366 // In conservative mode, don't unguard unless the address is in the VM 2367 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2368 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2369 2370 // Set memory to RWX and retry 2371 address page_start = 2372 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2373 bool res = os::protect_memory((char*) page_start, page_size, 2374 os::MEM_PROT_RWX); 2375 2376 if (PrintMiscellaneous && Verbose) { 2377 char buf[256]; 2378 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2379 "at " INTPTR_FORMAT 2380 ", unguarding " INTPTR_FORMAT ": %s", addr, 2381 page_start, (res ? "success" : strerror(errno))); 2382 tty->print_raw_cr(buf); 2383 } 2384 2385 // Set last_addr so if we fault again at the same address, we don't 2386 // end up in an endless loop. 2387 // 2388 // There are two potential complications here. Two threads trapping 2389 // at the same address at the same time could cause one of the 2390 // threads to think it already unguarded, and abort the VM. Likely 2391 // very rare. 2392 // 2393 // The other race involves two threads alternately trapping at 2394 // different addresses and failing to unguard the page, resulting in 2395 // an endless loop. This condition is probably even more unlikely 2396 // than the first. 2397 // 2398 // Although both cases could be avoided by using locks or thread 2399 // local last_addr, these solutions are unnecessary complication: 2400 // this handler is a best-effort safety net, not a complete solution. 2401 // It is disabled by default and should only be used as a workaround 2402 // in case we missed any no-execute-unsafe VM code. 2403 2404 last_addr = addr; 2405 2406 return EXCEPTION_CONTINUE_EXECUTION; 2407 } 2408 } 2409 2410 // Last unguard failed or not unguarding 2411 tty->print_raw_cr("Execution protection violation"); 2412 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2413 exceptionInfo->ContextRecord); 2414 return EXCEPTION_CONTINUE_SEARCH; 2415 } 2416 } 2417 #endif // _WIN64 2418 2419 // Check to see if we caught the safepoint code in the 2420 // process of write protecting the memory serialization page. 2421 // It write enables the page immediately after protecting it 2422 // so just return. 2423 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2424 JavaThread* thread = (JavaThread*) t; 2425 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2426 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2427 if (os::is_memory_serialize_page(thread, addr)) { 2428 // Block current thread until the memory serialize page permission restored. 2429 os::block_on_serialize_page_trap(); 2430 return EXCEPTION_CONTINUE_EXECUTION; 2431 } 2432 } 2433 2434 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2435 VM_Version::is_cpuinfo_segv_addr(pc)) { 2436 // Verify that OS save/restore AVX registers. 2437 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2438 } 2439 2440 if (t != NULL && t->is_Java_thread()) { 2441 JavaThread* thread = (JavaThread*) t; 2442 bool in_java = thread->thread_state() == _thread_in_Java; 2443 2444 // Handle potential stack overflows up front. 2445 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2446 if (os::uses_stack_guard_pages()) { 2447 #ifdef _M_IA64 2448 // Use guard page for register stack. 2449 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2450 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2451 // Check for a register stack overflow on Itanium 2452 if (thread->addr_inside_register_stack_red_zone(addr)) { 2453 // Fatal red zone violation happens if the Java program 2454 // catches a StackOverflow error and does so much processing 2455 // that it runs beyond the unprotected yellow guard zone. As 2456 // a result, we are out of here. 2457 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2458 } else if(thread->addr_inside_register_stack(addr)) { 2459 // Disable the yellow zone which sets the state that 2460 // we've got a stack overflow problem. 2461 if (thread->stack_yellow_zone_enabled()) { 2462 thread->disable_stack_yellow_zone(); 2463 } 2464 // Give us some room to process the exception. 2465 thread->disable_register_stack_guard(); 2466 // Tracing with +Verbose. 2467 if (Verbose) { 2468 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2469 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2470 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2471 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2472 thread->register_stack_base(), 2473 thread->register_stack_base() + thread->stack_size()); 2474 } 2475 2476 // Reguard the permanent register stack red zone just to be sure. 2477 // We saw Windows silently disabling this without telling us. 2478 thread->enable_register_stack_red_zone(); 2479 2480 return Handle_Exception(exceptionInfo, 2481 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2482 } 2483 #endif 2484 if (thread->stack_yellow_zone_enabled()) { 2485 // Yellow zone violation. The o/s has unprotected the first yellow 2486 // zone page for us. Note: must call disable_stack_yellow_zone to 2487 // update the enabled status, even if the zone contains only one page. 2488 thread->disable_stack_yellow_zone(); 2489 // If not in java code, return and hope for the best. 2490 return in_java ? Handle_Exception(exceptionInfo, 2491 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2492 : EXCEPTION_CONTINUE_EXECUTION; 2493 } else { 2494 // Fatal red zone violation. 2495 thread->disable_stack_red_zone(); 2496 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2497 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2498 exceptionInfo->ContextRecord); 2499 return EXCEPTION_CONTINUE_SEARCH; 2500 } 2501 } else if (in_java) { 2502 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2503 // a one-time-only guard page, which it has released to us. The next 2504 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2505 return Handle_Exception(exceptionInfo, 2506 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2507 } else { 2508 // Can only return and hope for the best. Further stack growth will 2509 // result in an ACCESS_VIOLATION. 2510 return EXCEPTION_CONTINUE_EXECUTION; 2511 } 2512 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2513 // Either stack overflow or null pointer exception. 2514 if (in_java) { 2515 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2516 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2517 address stack_end = thread->stack_base() - thread->stack_size(); 2518 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2519 // Stack overflow. 2520 assert(!os::uses_stack_guard_pages(), 2521 "should be caught by red zone code above."); 2522 return Handle_Exception(exceptionInfo, 2523 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2524 } 2525 // 2526 // Check for safepoint polling and implicit null 2527 // We only expect null pointers in the stubs (vtable) 2528 // the rest are checked explicitly now. 2529 // 2530 CodeBlob* cb = CodeCache::find_blob(pc); 2531 if (cb != NULL) { 2532 if (os::is_poll_address(addr)) { 2533 address stub = SharedRuntime::get_poll_stub(pc); 2534 return Handle_Exception(exceptionInfo, stub); 2535 } 2536 } 2537 { 2538 #ifdef _WIN64 2539 // 2540 // If it's a legal stack address map the entire region in 2541 // 2542 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2543 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2544 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2545 addr = (address)((uintptr_t)addr & 2546 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2547 os::commit_memory((char *)addr, thread->stack_base() - addr, 2548 !ExecMem); 2549 return EXCEPTION_CONTINUE_EXECUTION; 2550 } 2551 else 2552 #endif 2553 { 2554 // Null pointer exception. 2555 #ifdef _M_IA64 2556 // Process implicit null checks in compiled code. Note: Implicit null checks 2557 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2558 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2559 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2560 // Handle implicit null check in UEP method entry 2561 if (cb && (cb->is_frame_complete_at(pc) || 2562 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2563 if (Verbose) { 2564 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2565 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2566 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2567 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2568 *(bundle_start + 1), *bundle_start); 2569 } 2570 return Handle_Exception(exceptionInfo, 2571 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2572 } 2573 } 2574 2575 // Implicit null checks were processed above. Hence, we should not reach 2576 // here in the usual case => die! 2577 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2578 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2579 exceptionInfo->ContextRecord); 2580 return EXCEPTION_CONTINUE_SEARCH; 2581 2582 #else // !IA64 2583 2584 // Windows 98 reports faulting addresses incorrectly 2585 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2586 !os::win32::is_nt()) { 2587 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2588 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2589 } 2590 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2591 exceptionInfo->ContextRecord); 2592 return EXCEPTION_CONTINUE_SEARCH; 2593 #endif 2594 } 2595 } 2596 } 2597 2598 #ifdef _WIN64 2599 // Special care for fast JNI field accessors. 2600 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2601 // in and the heap gets shrunk before the field access. 2602 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2603 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2604 if (addr != (address)-1) { 2605 return Handle_Exception(exceptionInfo, addr); 2606 } 2607 } 2608 #endif 2609 2610 // Stack overflow or null pointer exception in native code. 2611 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2612 exceptionInfo->ContextRecord); 2613 return EXCEPTION_CONTINUE_SEARCH; 2614 } // /EXCEPTION_ACCESS_VIOLATION 2615 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2616 #if defined _M_IA64 2617 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2618 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2619 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2620 2621 // Compiled method patched to be non entrant? Following conditions must apply: 2622 // 1. must be first instruction in bundle 2623 // 2. must be a break instruction with appropriate code 2624 if ((((uint64_t) pc & 0x0F) == 0) && 2625 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2626 return Handle_Exception(exceptionInfo, 2627 (address)SharedRuntime::get_handle_wrong_method_stub()); 2628 } 2629 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2630 #endif 2631 2632 2633 if (in_java) { 2634 switch (exception_code) { 2635 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2636 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2637 2638 case EXCEPTION_INT_OVERFLOW: 2639 return Handle_IDiv_Exception(exceptionInfo); 2640 2641 } // switch 2642 } 2643 if (((thread->thread_state() == _thread_in_Java) || 2644 (thread->thread_state() == _thread_in_native)) && 2645 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2646 { 2647 LONG result=Handle_FLT_Exception(exceptionInfo); 2648 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2649 } 2650 } 2651 2652 if (exception_code != EXCEPTION_BREAKPOINT) { 2653 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2654 exceptionInfo->ContextRecord); 2655 } 2656 return EXCEPTION_CONTINUE_SEARCH; 2657 } 2658 2659 #ifndef _WIN64 2660 // Special care for fast JNI accessors. 2661 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2662 // the heap gets shrunk before the field access. 2663 // Need to install our own structured exception handler since native code may 2664 // install its own. 2665 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2666 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2667 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2668 address pc = (address) exceptionInfo->ContextRecord->Eip; 2669 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2670 if (addr != (address)-1) { 2671 return Handle_Exception(exceptionInfo, addr); 2672 } 2673 } 2674 return EXCEPTION_CONTINUE_SEARCH; 2675 } 2676 2677 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2678 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2679 __try { \ 2680 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2681 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2682 } \ 2683 return 0; \ 2684 } 2685 2686 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2687 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2688 DEFINE_FAST_GETFIELD(jchar, char, Char) 2689 DEFINE_FAST_GETFIELD(jshort, short, Short) 2690 DEFINE_FAST_GETFIELD(jint, int, Int) 2691 DEFINE_FAST_GETFIELD(jlong, long, Long) 2692 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2693 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2694 2695 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2696 switch (type) { 2697 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2698 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2699 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2700 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2701 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2702 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2703 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2704 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2705 default: ShouldNotReachHere(); 2706 } 2707 return (address)-1; 2708 } 2709 #endif 2710 2711 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2712 // Install a win32 structured exception handler around the test 2713 // function call so the VM can generate an error dump if needed. 2714 __try { 2715 (*funcPtr)(); 2716 } __except(topLevelExceptionFilter( 2717 (_EXCEPTION_POINTERS*)_exception_info())) { 2718 // Nothing to do. 2719 } 2720 } 2721 2722 // Virtual Memory 2723 2724 int os::vm_page_size() { return os::win32::vm_page_size(); } 2725 int os::vm_allocation_granularity() { 2726 return os::win32::vm_allocation_granularity(); 2727 } 2728 2729 // Windows large page support is available on Windows 2003. In order to use 2730 // large page memory, the administrator must first assign additional privilege 2731 // to the user: 2732 // + select Control Panel -> Administrative Tools -> Local Security Policy 2733 // + select Local Policies -> User Rights Assignment 2734 // + double click "Lock pages in memory", add users and/or groups 2735 // + reboot 2736 // Note the above steps are needed for administrator as well, as administrators 2737 // by default do not have the privilege to lock pages in memory. 2738 // 2739 // Note about Windows 2003: although the API supports committing large page 2740 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2741 // scenario, I found through experiment it only uses large page if the entire 2742 // memory region is reserved and committed in a single VirtualAlloc() call. 2743 // This makes Windows large page support more or less like Solaris ISM, in 2744 // that the entire heap must be committed upfront. This probably will change 2745 // in the future, if so the code below needs to be revisited. 2746 2747 #ifndef MEM_LARGE_PAGES 2748 #define MEM_LARGE_PAGES 0x20000000 2749 #endif 2750 2751 static HANDLE _hProcess; 2752 static HANDLE _hToken; 2753 2754 // Container for NUMA node list info 2755 class NUMANodeListHolder { 2756 private: 2757 int *_numa_used_node_list; // allocated below 2758 int _numa_used_node_count; 2759 2760 void free_node_list() { 2761 if (_numa_used_node_list != NULL) { 2762 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2763 } 2764 } 2765 2766 public: 2767 NUMANodeListHolder() { 2768 _numa_used_node_count = 0; 2769 _numa_used_node_list = NULL; 2770 // do rest of initialization in build routine (after function pointers are set up) 2771 } 2772 2773 ~NUMANodeListHolder() { 2774 free_node_list(); 2775 } 2776 2777 bool build() { 2778 DWORD_PTR proc_aff_mask; 2779 DWORD_PTR sys_aff_mask; 2780 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2781 ULONG highest_node_number; 2782 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2783 free_node_list(); 2784 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2785 for (unsigned int i = 0; i <= highest_node_number; i++) { 2786 ULONGLONG proc_mask_numa_node; 2787 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2788 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2789 _numa_used_node_list[_numa_used_node_count++] = i; 2790 } 2791 } 2792 return (_numa_used_node_count > 1); 2793 } 2794 2795 int get_count() { return _numa_used_node_count; } 2796 int get_node_list_entry(int n) { 2797 // for indexes out of range, returns -1 2798 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2799 } 2800 2801 } numa_node_list_holder; 2802 2803 2804 2805 static size_t _large_page_size = 0; 2806 2807 static bool resolve_functions_for_large_page_init() { 2808 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2809 os::Advapi32Dll::AdvapiAvailable(); 2810 } 2811 2812 static bool request_lock_memory_privilege() { 2813 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2814 os::current_process_id()); 2815 2816 LUID luid; 2817 if (_hProcess != NULL && 2818 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2819 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2820 2821 TOKEN_PRIVILEGES tp; 2822 tp.PrivilegeCount = 1; 2823 tp.Privileges[0].Luid = luid; 2824 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2825 2826 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2827 // privilege. Check GetLastError() too. See MSDN document. 2828 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2829 (GetLastError() == ERROR_SUCCESS)) { 2830 return true; 2831 } 2832 } 2833 2834 return false; 2835 } 2836 2837 static void cleanup_after_large_page_init() { 2838 if (_hProcess) CloseHandle(_hProcess); 2839 _hProcess = NULL; 2840 if (_hToken) CloseHandle(_hToken); 2841 _hToken = NULL; 2842 } 2843 2844 static bool numa_interleaving_init() { 2845 bool success = false; 2846 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2847 2848 // print a warning if UseNUMAInterleaving flag is specified on command line 2849 bool warn_on_failure = use_numa_interleaving_specified; 2850 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2851 2852 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2853 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2854 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2855 2856 if (os::Kernel32Dll::NumaCallsAvailable()) { 2857 if (numa_node_list_holder.build()) { 2858 if (PrintMiscellaneous && Verbose) { 2859 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2860 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2861 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2862 } 2863 tty->print("\n"); 2864 } 2865 success = true; 2866 } else { 2867 WARN("Process does not cover multiple NUMA nodes."); 2868 } 2869 } else { 2870 WARN("NUMA Interleaving is not supported by the operating system."); 2871 } 2872 if (!success) { 2873 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2874 } 2875 return success; 2876 #undef WARN 2877 } 2878 2879 // this routine is used whenever we need to reserve a contiguous VA range 2880 // but we need to make separate VirtualAlloc calls for each piece of the range 2881 // Reasons for doing this: 2882 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2883 // * UseNUMAInterleaving requires a separate node for each piece 2884 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2885 bool should_inject_error=false) { 2886 char * p_buf; 2887 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2888 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2889 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2890 2891 // first reserve enough address space in advance since we want to be 2892 // able to break a single contiguous virtual address range into multiple 2893 // large page commits but WS2003 does not allow reserving large page space 2894 // so we just use 4K pages for reserve, this gives us a legal contiguous 2895 // address space. then we will deallocate that reservation, and re alloc 2896 // using large pages 2897 const size_t size_of_reserve = bytes + chunk_size; 2898 if (bytes > size_of_reserve) { 2899 // Overflowed. 2900 return NULL; 2901 } 2902 p_buf = (char *) VirtualAlloc(addr, 2903 size_of_reserve, // size of Reserve 2904 MEM_RESERVE, 2905 PAGE_READWRITE); 2906 // If reservation failed, return NULL 2907 if (p_buf == NULL) return NULL; 2908 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2909 os::release_memory(p_buf, bytes + chunk_size); 2910 2911 // we still need to round up to a page boundary (in case we are using large pages) 2912 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2913 // instead we handle this in the bytes_to_rq computation below 2914 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2915 2916 // now go through and allocate one chunk at a time until all bytes are 2917 // allocated 2918 size_t bytes_remaining = bytes; 2919 // An overflow of align_size_up() would have been caught above 2920 // in the calculation of size_of_reserve. 2921 char * next_alloc_addr = p_buf; 2922 HANDLE hProc = GetCurrentProcess(); 2923 2924 #ifdef ASSERT 2925 // Variable for the failure injection 2926 long ran_num = os::random(); 2927 size_t fail_after = ran_num % bytes; 2928 #endif 2929 2930 int count=0; 2931 while (bytes_remaining) { 2932 // select bytes_to_rq to get to the next chunk_size boundary 2933 2934 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2935 // Note allocate and commit 2936 char * p_new; 2937 2938 #ifdef ASSERT 2939 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2940 #else 2941 const bool inject_error_now = false; 2942 #endif 2943 2944 if (inject_error_now) { 2945 p_new = NULL; 2946 } else { 2947 if (!UseNUMAInterleaving) { 2948 p_new = (char *) VirtualAlloc(next_alloc_addr, 2949 bytes_to_rq, 2950 flags, 2951 prot); 2952 } else { 2953 // get the next node to use from the used_node_list 2954 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2955 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2956 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2957 next_alloc_addr, 2958 bytes_to_rq, 2959 flags, 2960 prot, 2961 node); 2962 } 2963 } 2964 2965 if (p_new == NULL) { 2966 // Free any allocated pages 2967 if (next_alloc_addr > p_buf) { 2968 // Some memory was committed so release it. 2969 size_t bytes_to_release = bytes - bytes_remaining; 2970 // NMT has yet to record any individual blocks, so it 2971 // need to create a dummy 'reserve' record to match 2972 // the release. 2973 MemTracker::record_virtual_memory_reserve((address)p_buf, 2974 bytes_to_release, CALLER_PC); 2975 os::release_memory(p_buf, bytes_to_release); 2976 } 2977 #ifdef ASSERT 2978 if (should_inject_error) { 2979 if (TracePageSizes && Verbose) { 2980 tty->print_cr("Reserving pages individually failed."); 2981 } 2982 } 2983 #endif 2984 return NULL; 2985 } 2986 2987 bytes_remaining -= bytes_to_rq; 2988 next_alloc_addr += bytes_to_rq; 2989 count++; 2990 } 2991 // Although the memory is allocated individually, it is returned as one. 2992 // NMT records it as one block. 2993 if ((flags & MEM_COMMIT) != 0) { 2994 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2995 } else { 2996 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2997 } 2998 2999 // made it this far, success 3000 return p_buf; 3001 } 3002 3003 3004 3005 void os::large_page_init() { 3006 if (!UseLargePages) return; 3007 3008 // print a warning if any large page related flag is specified on command line 3009 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3010 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3011 bool success = false; 3012 3013 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3014 if (resolve_functions_for_large_page_init()) { 3015 if (request_lock_memory_privilege()) { 3016 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3017 if (s) { 3018 #if defined(IA32) || defined(AMD64) 3019 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3020 WARN("JVM cannot use large pages bigger than 4mb."); 3021 } else { 3022 #endif 3023 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3024 _large_page_size = LargePageSizeInBytes; 3025 } else { 3026 _large_page_size = s; 3027 } 3028 success = true; 3029 #if defined(IA32) || defined(AMD64) 3030 } 3031 #endif 3032 } else { 3033 WARN("Large page is not supported by the processor."); 3034 } 3035 } else { 3036 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3037 } 3038 } else { 3039 WARN("Large page is not supported by the operating system."); 3040 } 3041 #undef WARN 3042 3043 const size_t default_page_size = (size_t) vm_page_size(); 3044 if (success && _large_page_size > default_page_size) { 3045 _page_sizes[0] = _large_page_size; 3046 _page_sizes[1] = default_page_size; 3047 _page_sizes[2] = 0; 3048 } 3049 3050 cleanup_after_large_page_init(); 3051 UseLargePages = success; 3052 } 3053 3054 // On win32, one cannot release just a part of reserved memory, it's an 3055 // all or nothing deal. When we split a reservation, we must break the 3056 // reservation into two reservations. 3057 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3058 bool realloc) { 3059 if (size > 0) { 3060 release_memory(base, size); 3061 if (realloc) { 3062 reserve_memory(split, base); 3063 } 3064 if (size != split) { 3065 reserve_memory(size - split, base + split); 3066 } 3067 } 3068 } 3069 3070 // Multiple threads can race in this code but it's not possible to unmap small sections of 3071 // virtual space to get requested alignment, like posix-like os's. 3072 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3073 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3074 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3075 "Alignment must be a multiple of allocation granularity (page size)"); 3076 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3077 3078 size_t extra_size = size + alignment; 3079 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3080 3081 char* aligned_base = NULL; 3082 3083 do { 3084 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3085 if (extra_base == NULL) { 3086 return NULL; 3087 } 3088 // Do manual alignment 3089 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3090 3091 os::release_memory(extra_base, extra_size); 3092 3093 aligned_base = os::reserve_memory(size, aligned_base); 3094 3095 } while (aligned_base == NULL); 3096 3097 return aligned_base; 3098 } 3099 3100 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3101 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3102 "reserve alignment"); 3103 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3104 char* res; 3105 // note that if UseLargePages is on, all the areas that require interleaving 3106 // will go thru reserve_memory_special rather than thru here. 3107 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3108 if (!use_individual) { 3109 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3110 } else { 3111 elapsedTimer reserveTimer; 3112 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3113 // in numa interleaving, we have to allocate pages individually 3114 // (well really chunks of NUMAInterleaveGranularity size) 3115 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3116 if (res == NULL) { 3117 warning("NUMA page allocation failed"); 3118 } 3119 if (Verbose && PrintMiscellaneous) { 3120 reserveTimer.stop(); 3121 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3122 reserveTimer.milliseconds(), reserveTimer.ticks()); 3123 } 3124 } 3125 assert(res == NULL || addr == NULL || addr == res, 3126 "Unexpected address from reserve."); 3127 3128 return res; 3129 } 3130 3131 // Reserve memory at an arbitrary address, only if that area is 3132 // available (and not reserved for something else). 3133 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3134 // Windows os::reserve_memory() fails of the requested address range is 3135 // not avilable. 3136 return reserve_memory(bytes, requested_addr); 3137 } 3138 3139 size_t os::large_page_size() { 3140 return _large_page_size; 3141 } 3142 3143 bool os::can_commit_large_page_memory() { 3144 // Windows only uses large page memory when the entire region is reserved 3145 // and committed in a single VirtualAlloc() call. This may change in the 3146 // future, but with Windows 2003 it's not possible to commit on demand. 3147 return false; 3148 } 3149 3150 bool os::can_execute_large_page_memory() { 3151 return true; 3152 } 3153 3154 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3155 assert(UseLargePages, "only for large pages"); 3156 3157 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3158 return NULL; // Fallback to small pages. 3159 } 3160 3161 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3162 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3163 3164 // with large pages, there are two cases where we need to use Individual Allocation 3165 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3166 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3167 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3168 if (TracePageSizes && Verbose) { 3169 tty->print_cr("Reserving large pages individually."); 3170 } 3171 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3172 if (p_buf == NULL) { 3173 // give an appropriate warning message 3174 if (UseNUMAInterleaving) { 3175 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3176 } 3177 if (UseLargePagesIndividualAllocation) { 3178 warning("Individually allocated large pages failed, " 3179 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3180 } 3181 return NULL; 3182 } 3183 3184 return p_buf; 3185 3186 } else { 3187 if (TracePageSizes && Verbose) { 3188 tty->print_cr("Reserving large pages in a single large chunk."); 3189 } 3190 // normal policy just allocate it all at once 3191 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3192 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3193 if (res != NULL) { 3194 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3195 } 3196 3197 return res; 3198 } 3199 } 3200 3201 bool os::release_memory_special(char* base, size_t bytes) { 3202 assert(base != NULL, "Sanity check"); 3203 return release_memory(base, bytes); 3204 } 3205 3206 void os::print_statistics() { 3207 } 3208 3209 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3210 int err = os::get_last_error(); 3211 char buf[256]; 3212 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3213 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3214 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3215 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3216 } 3217 3218 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3219 if (bytes == 0) { 3220 // Don't bother the OS with noops. 3221 return true; 3222 } 3223 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3224 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3225 // Don't attempt to print anything if the OS call fails. We're 3226 // probably low on resources, so the print itself may cause crashes. 3227 3228 // unless we have NUMAInterleaving enabled, the range of a commit 3229 // is always within a reserve covered by a single VirtualAlloc 3230 // in that case we can just do a single commit for the requested size 3231 if (!UseNUMAInterleaving) { 3232 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3233 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3234 return false; 3235 } 3236 if (exec) { 3237 DWORD oldprot; 3238 // Windows doc says to use VirtualProtect to get execute permissions 3239 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3240 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3241 return false; 3242 } 3243 } 3244 return true; 3245 } else { 3246 3247 // when NUMAInterleaving is enabled, the commit might cover a range that 3248 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3249 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3250 // returns represents the number of bytes that can be committed in one step. 3251 size_t bytes_remaining = bytes; 3252 char * next_alloc_addr = addr; 3253 while (bytes_remaining > 0) { 3254 MEMORY_BASIC_INFORMATION alloc_info; 3255 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3256 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3257 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3258 PAGE_READWRITE) == NULL) { 3259 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3260 exec);) 3261 return false; 3262 } 3263 if (exec) { 3264 DWORD oldprot; 3265 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3266 PAGE_EXECUTE_READWRITE, &oldprot)) { 3267 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3268 exec);) 3269 return false; 3270 } 3271 } 3272 bytes_remaining -= bytes_to_rq; 3273 next_alloc_addr += bytes_to_rq; 3274 } 3275 } 3276 // if we made it this far, return true 3277 return true; 3278 } 3279 3280 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3281 bool exec) { 3282 // alignment_hint is ignored on this OS 3283 return pd_commit_memory(addr, size, exec); 3284 } 3285 3286 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3287 const char* mesg) { 3288 assert(mesg != NULL, "mesg must be specified"); 3289 if (!pd_commit_memory(addr, size, exec)) { 3290 warn_fail_commit_memory(addr, size, exec); 3291 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3292 } 3293 } 3294 3295 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3296 size_t alignment_hint, bool exec, 3297 const char* mesg) { 3298 // alignment_hint is ignored on this OS 3299 pd_commit_memory_or_exit(addr, size, exec, mesg); 3300 } 3301 3302 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3303 if (bytes == 0) { 3304 // Don't bother the OS with noops. 3305 return true; 3306 } 3307 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3308 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3309 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3310 } 3311 3312 bool os::pd_release_memory(char* addr, size_t bytes) { 3313 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3314 } 3315 3316 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3317 return os::commit_memory(addr, size, !ExecMem); 3318 } 3319 3320 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3321 return os::uncommit_memory(addr, size); 3322 } 3323 3324 // Set protections specified 3325 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3326 bool is_committed) { 3327 unsigned int p = 0; 3328 switch (prot) { 3329 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3330 case MEM_PROT_READ: p = PAGE_READONLY; break; 3331 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3332 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3333 default: 3334 ShouldNotReachHere(); 3335 } 3336 3337 DWORD old_status; 3338 3339 // Strange enough, but on Win32 one can change protection only for committed 3340 // memory, not a big deal anyway, as bytes less or equal than 64K 3341 if (!is_committed) { 3342 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3343 "cannot commit protection page"); 3344 } 3345 // One cannot use os::guard_memory() here, as on Win32 guard page 3346 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3347 // 3348 // Pages in the region become guard pages. Any attempt to access a guard page 3349 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3350 // the guard page status. Guard pages thus act as a one-time access alarm. 3351 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3352 } 3353 3354 bool os::guard_memory(char* addr, size_t bytes) { 3355 DWORD old_status; 3356 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3357 } 3358 3359 bool os::unguard_memory(char* addr, size_t bytes) { 3360 DWORD old_status; 3361 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3362 } 3363 3364 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3365 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3366 void os::numa_make_global(char *addr, size_t bytes) { } 3367 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3368 bool os::numa_topology_changed() { return false; } 3369 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3370 int os::numa_get_group_id() { return 0; } 3371 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3372 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3373 // Provide an answer for UMA systems 3374 ids[0] = 0; 3375 return 1; 3376 } else { 3377 // check for size bigger than actual groups_num 3378 size = MIN2(size, numa_get_groups_num()); 3379 for (int i = 0; i < (int)size; i++) { 3380 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3381 } 3382 return size; 3383 } 3384 } 3385 3386 bool os::get_page_info(char *start, page_info* info) { 3387 return false; 3388 } 3389 3390 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3391 return end; 3392 } 3393 3394 char* os::non_memory_address_word() { 3395 // Must never look like an address returned by reserve_memory, 3396 // even in its subfields (as defined by the CPU immediate fields, 3397 // if the CPU splits constants across multiple instructions). 3398 return (char*)-1; 3399 } 3400 3401 #define MAX_ERROR_COUNT 100 3402 #define SYS_THREAD_ERROR 0xffffffffUL 3403 3404 void os::pd_start_thread(Thread* thread) { 3405 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3406 // Returns previous suspend state: 3407 // 0: Thread was not suspended 3408 // 1: Thread is running now 3409 // >1: Thread is still suspended. 3410 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3411 } 3412 3413 class HighResolutionInterval : public CHeapObj<mtThread> { 3414 // The default timer resolution seems to be 10 milliseconds. 3415 // (Where is this written down?) 3416 // If someone wants to sleep for only a fraction of the default, 3417 // then we set the timer resolution down to 1 millisecond for 3418 // the duration of their interval. 3419 // We carefully set the resolution back, since otherwise we 3420 // seem to incur an overhead (3%?) that we don't need. 3421 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3422 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3423 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3424 // timeBeginPeriod() if the relative error exceeded some threshold. 3425 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3426 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3427 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3428 // resolution timers running. 3429 private: 3430 jlong resolution; 3431 public: 3432 HighResolutionInterval(jlong ms) { 3433 resolution = ms % 10L; 3434 if (resolution != 0) { 3435 MMRESULT result = timeBeginPeriod(1L); 3436 } 3437 } 3438 ~HighResolutionInterval() { 3439 if (resolution != 0) { 3440 MMRESULT result = timeEndPeriod(1L); 3441 } 3442 resolution = 0L; 3443 } 3444 }; 3445 3446 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3447 jlong limit = (jlong) MAXDWORD; 3448 3449 while (ms > limit) { 3450 int res; 3451 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3452 return res; 3453 ms -= limit; 3454 } 3455 3456 assert(thread == Thread::current(), "thread consistency check"); 3457 OSThread* osthread = thread->osthread(); 3458 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3459 int result; 3460 if (interruptable) { 3461 assert(thread->is_Java_thread(), "must be java thread"); 3462 JavaThread *jt = (JavaThread *) thread; 3463 ThreadBlockInVM tbivm(jt); 3464 3465 jt->set_suspend_equivalent(); 3466 // cleared by handle_special_suspend_equivalent_condition() or 3467 // java_suspend_self() via check_and_wait_while_suspended() 3468 3469 HANDLE events[1]; 3470 events[0] = osthread->interrupt_event(); 3471 HighResolutionInterval *phri=NULL; 3472 if (!ForceTimeHighResolution) 3473 phri = new HighResolutionInterval(ms); 3474 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3475 result = OS_TIMEOUT; 3476 } else { 3477 ResetEvent(osthread->interrupt_event()); 3478 osthread->set_interrupted(false); 3479 result = OS_INTRPT; 3480 } 3481 delete phri; //if it is NULL, harmless 3482 3483 // were we externally suspended while we were waiting? 3484 jt->check_and_wait_while_suspended(); 3485 } else { 3486 assert(!thread->is_Java_thread(), "must not be java thread"); 3487 Sleep((long) ms); 3488 result = OS_TIMEOUT; 3489 } 3490 return result; 3491 } 3492 3493 // 3494 // Short sleep, direct OS call. 3495 // 3496 // ms = 0, means allow others (if any) to run. 3497 // 3498 void os::naked_short_sleep(jlong ms) { 3499 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3500 Sleep(ms); 3501 } 3502 3503 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3504 void os::infinite_sleep() { 3505 while (true) { // sleep forever ... 3506 Sleep(100000); // ... 100 seconds at a time 3507 } 3508 } 3509 3510 typedef BOOL (WINAPI * STTSignature)(void); 3511 3512 void os::naked_yield() { 3513 // Use either SwitchToThread() or Sleep(0) 3514 // Consider passing back the return value from SwitchToThread(). 3515 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3516 SwitchToThread(); 3517 } else { 3518 Sleep(0); 3519 } 3520 } 3521 3522 // Win32 only gives you access to seven real priorities at a time, 3523 // so we compress Java's ten down to seven. It would be better 3524 // if we dynamically adjusted relative priorities. 3525 3526 int os::java_to_os_priority[CriticalPriority + 1] = { 3527 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3528 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3529 THREAD_PRIORITY_LOWEST, // 2 3530 THREAD_PRIORITY_BELOW_NORMAL, // 3 3531 THREAD_PRIORITY_BELOW_NORMAL, // 4 3532 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3533 THREAD_PRIORITY_NORMAL, // 6 3534 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3535 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3536 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3537 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3538 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3539 }; 3540 3541 int prio_policy1[CriticalPriority + 1] = { 3542 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3543 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3544 THREAD_PRIORITY_LOWEST, // 2 3545 THREAD_PRIORITY_BELOW_NORMAL, // 3 3546 THREAD_PRIORITY_BELOW_NORMAL, // 4 3547 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3548 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3549 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3550 THREAD_PRIORITY_HIGHEST, // 8 3551 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3552 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3553 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3554 }; 3555 3556 static int prio_init() { 3557 // If ThreadPriorityPolicy is 1, switch tables 3558 if (ThreadPriorityPolicy == 1) { 3559 int i; 3560 for (i = 0; i < CriticalPriority + 1; i++) { 3561 os::java_to_os_priority[i] = prio_policy1[i]; 3562 } 3563 } 3564 if (UseCriticalJavaThreadPriority) { 3565 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3566 } 3567 return 0; 3568 } 3569 3570 OSReturn os::set_native_priority(Thread* thread, int priority) { 3571 if (!UseThreadPriorities) return OS_OK; 3572 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3573 return ret ? OS_OK : OS_ERR; 3574 } 3575 3576 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3577 if (!UseThreadPriorities) { 3578 *priority_ptr = java_to_os_priority[NormPriority]; 3579 return OS_OK; 3580 } 3581 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3582 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3583 assert(false, "GetThreadPriority failed"); 3584 return OS_ERR; 3585 } 3586 *priority_ptr = os_prio; 3587 return OS_OK; 3588 } 3589 3590 3591 // Hint to the underlying OS that a task switch would not be good. 3592 // Void return because it's a hint and can fail. 3593 void os::hint_no_preempt() {} 3594 3595 void os::interrupt(Thread* thread) { 3596 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3597 "possibility of dangling Thread pointer"); 3598 3599 OSThread* osthread = thread->osthread(); 3600 osthread->set_interrupted(true); 3601 // More than one thread can get here with the same value of osthread, 3602 // resulting in multiple notifications. We do, however, want the store 3603 // to interrupted() to be visible to other threads before we post 3604 // the interrupt event. 3605 OrderAccess::release(); 3606 SetEvent(osthread->interrupt_event()); 3607 // For JSR166: unpark after setting status 3608 if (thread->is_Java_thread()) 3609 ((JavaThread*)thread)->parker()->unpark(); 3610 3611 ParkEvent * ev = thread->_ParkEvent; 3612 if (ev != NULL) ev->unpark(); 3613 3614 } 3615 3616 3617 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3618 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3619 "possibility of dangling Thread pointer"); 3620 3621 OSThread* osthread = thread->osthread(); 3622 // There is no synchronization between the setting of the interrupt 3623 // and it being cleared here. It is critical - see 6535709 - that 3624 // we only clear the interrupt state, and reset the interrupt event, 3625 // if we are going to report that we were indeed interrupted - else 3626 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3627 // depending on the timing. By checking thread interrupt event to see 3628 // if the thread gets real interrupt thus prevent spurious wakeup. 3629 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3630 if (interrupted && clear_interrupted) { 3631 osthread->set_interrupted(false); 3632 ResetEvent(osthread->interrupt_event()); 3633 } // Otherwise leave the interrupted state alone 3634 3635 return interrupted; 3636 } 3637 3638 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3639 ExtendedPC os::get_thread_pc(Thread* thread) { 3640 CONTEXT context; 3641 context.ContextFlags = CONTEXT_CONTROL; 3642 HANDLE handle = thread->osthread()->thread_handle(); 3643 #ifdef _M_IA64 3644 assert(0, "Fix get_thread_pc"); 3645 return ExtendedPC(NULL); 3646 #else 3647 if (GetThreadContext(handle, &context)) { 3648 #ifdef _M_AMD64 3649 return ExtendedPC((address) context.Rip); 3650 #else 3651 return ExtendedPC((address) context.Eip); 3652 #endif 3653 } else { 3654 return ExtendedPC(NULL); 3655 } 3656 #endif 3657 } 3658 3659 // GetCurrentThreadId() returns DWORD 3660 intx os::current_thread_id() { return GetCurrentThreadId(); } 3661 3662 static int _initial_pid = 0; 3663 3664 int os::current_process_id() 3665 { 3666 return (_initial_pid ? _initial_pid : _getpid()); 3667 } 3668 3669 int os::win32::_vm_page_size = 0; 3670 int os::win32::_vm_allocation_granularity = 0; 3671 int os::win32::_processor_type = 0; 3672 // Processor level is not available on non-NT systems, use vm_version instead 3673 int os::win32::_processor_level = 0; 3674 julong os::win32::_physical_memory = 0; 3675 size_t os::win32::_default_stack_size = 0; 3676 3677 intx os::win32::_os_thread_limit = 0; 3678 volatile intx os::win32::_os_thread_count = 0; 3679 3680 bool os::win32::_is_nt = false; 3681 bool os::win32::_is_windows_2003 = false; 3682 bool os::win32::_is_windows_server = false; 3683 3684 bool os::win32::_has_performance_count = 0; 3685 3686 void os::win32::initialize_system_info() { 3687 SYSTEM_INFO si; 3688 GetSystemInfo(&si); 3689 _vm_page_size = si.dwPageSize; 3690 _vm_allocation_granularity = si.dwAllocationGranularity; 3691 _processor_type = si.dwProcessorType; 3692 _processor_level = si.wProcessorLevel; 3693 set_processor_count(si.dwNumberOfProcessors); 3694 3695 MEMORYSTATUSEX ms; 3696 ms.dwLength = sizeof(ms); 3697 3698 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3699 // dwMemoryLoad (% of memory in use) 3700 GlobalMemoryStatusEx(&ms); 3701 _physical_memory = ms.ullTotalPhys; 3702 3703 OSVERSIONINFOEX oi; 3704 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3705 GetVersionEx((OSVERSIONINFO*)&oi); 3706 switch (oi.dwPlatformId) { 3707 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3708 case VER_PLATFORM_WIN32_NT: 3709 _is_nt = true; 3710 { 3711 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3712 if (os_vers == 5002) { 3713 _is_windows_2003 = true; 3714 } 3715 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3716 oi.wProductType == VER_NT_SERVER) { 3717 _is_windows_server = true; 3718 } 3719 } 3720 break; 3721 default: fatal("Unknown platform"); 3722 } 3723 3724 _default_stack_size = os::current_stack_size(); 3725 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3726 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3727 "stack size not a multiple of page size"); 3728 3729 initialize_performance_counter(); 3730 3731 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3732 // known to deadlock the system, if the VM issues to thread operations with 3733 // a too high frequency, e.g., such as changing the priorities. 3734 // The 6000 seems to work well - no deadlocks has been notices on the test 3735 // programs that we have seen experience this problem. 3736 if (!os::win32::is_nt()) { 3737 StarvationMonitorInterval = 6000; 3738 } 3739 } 3740 3741 3742 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3743 char path[MAX_PATH]; 3744 DWORD size; 3745 DWORD pathLen = (DWORD)sizeof(path); 3746 HINSTANCE result = NULL; 3747 3748 // only allow library name without path component 3749 assert(strchr(name, '\\') == NULL, "path not allowed"); 3750 assert(strchr(name, ':') == NULL, "path not allowed"); 3751 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3752 jio_snprintf(ebuf, ebuflen, 3753 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3754 return NULL; 3755 } 3756 3757 // search system directory 3758 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3759 strcat(path, "\\"); 3760 strcat(path, name); 3761 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3762 return result; 3763 } 3764 } 3765 3766 // try Windows directory 3767 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3768 strcat(path, "\\"); 3769 strcat(path, name); 3770 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3771 return result; 3772 } 3773 } 3774 3775 jio_snprintf(ebuf, ebuflen, 3776 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3777 return NULL; 3778 } 3779 3780 void os::win32::setmode_streams() { 3781 _setmode(_fileno(stdin), _O_BINARY); 3782 _setmode(_fileno(stdout), _O_BINARY); 3783 _setmode(_fileno(stderr), _O_BINARY); 3784 } 3785 3786 3787 bool os::is_debugger_attached() { 3788 return IsDebuggerPresent() ? true : false; 3789 } 3790 3791 3792 void os::wait_for_keypress_at_exit(void) { 3793 if (PauseAtExit) { 3794 fprintf(stderr, "Press any key to continue...\n"); 3795 fgetc(stdin); 3796 } 3797 } 3798 3799 3800 int os::message_box(const char* title, const char* message) { 3801 int result = MessageBox(NULL, message, title, 3802 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3803 return result == IDYES; 3804 } 3805 3806 int os::allocate_thread_local_storage() { 3807 return TlsAlloc(); 3808 } 3809 3810 3811 void os::free_thread_local_storage(int index) { 3812 TlsFree(index); 3813 } 3814 3815 3816 void os::thread_local_storage_at_put(int index, void* value) { 3817 TlsSetValue(index, value); 3818 assert(thread_local_storage_at(index) == value, "Just checking"); 3819 } 3820 3821 3822 void* os::thread_local_storage_at(int index) { 3823 return TlsGetValue(index); 3824 } 3825 3826 3827 #ifndef PRODUCT 3828 #ifndef _WIN64 3829 // Helpers to check whether NX protection is enabled 3830 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3831 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3832 pex->ExceptionRecord->NumberParameters > 0 && 3833 pex->ExceptionRecord->ExceptionInformation[0] == 3834 EXCEPTION_INFO_EXEC_VIOLATION) { 3835 return EXCEPTION_EXECUTE_HANDLER; 3836 } 3837 return EXCEPTION_CONTINUE_SEARCH; 3838 } 3839 3840 void nx_check_protection() { 3841 // If NX is enabled we'll get an exception calling into code on the stack 3842 char code[] = { (char)0xC3 }; // ret 3843 void *code_ptr = (void *)code; 3844 __try { 3845 __asm call code_ptr 3846 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3847 tty->print_raw_cr("NX protection detected."); 3848 } 3849 } 3850 #endif // _WIN64 3851 #endif // PRODUCT 3852 3853 // this is called _before_ the global arguments have been parsed 3854 void os::init(void) { 3855 _initial_pid = _getpid(); 3856 3857 init_random(1234567); 3858 3859 win32::initialize_system_info(); 3860 win32::setmode_streams(); 3861 init_page_sizes((size_t) win32::vm_page_size()); 3862 3863 // This may be overridden later when argument processing is done. 3864 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3865 os::win32::is_windows_2003()); 3866 3867 // Initialize main_process and main_thread 3868 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3869 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3870 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3871 fatal("DuplicateHandle failed\n"); 3872 } 3873 main_thread_id = (int) GetCurrentThreadId(); 3874 } 3875 3876 // To install functions for atexit processing 3877 extern "C" { 3878 static void perfMemory_exit_helper() { 3879 perfMemory_exit(); 3880 } 3881 } 3882 3883 static jint initSock(); 3884 3885 // this is called _after_ the global arguments have been parsed 3886 jint os::init_2(void) { 3887 // Allocate a single page and mark it as readable for safepoint polling 3888 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3889 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3890 3891 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3892 guarantee(return_page != NULL, "Commit Failed for polling page"); 3893 3894 os::set_polling_page(polling_page); 3895 3896 #ifndef PRODUCT 3897 if (Verbose && PrintMiscellaneous) 3898 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3899 #endif 3900 3901 if (!UseMembar) { 3902 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3903 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3904 3905 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3906 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3907 3908 os::set_memory_serialize_page(mem_serialize_page); 3909 3910 #ifndef PRODUCT 3911 if (Verbose && PrintMiscellaneous) 3912 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3913 #endif 3914 } 3915 3916 // Setup Windows Exceptions 3917 3918 // for debugging float code generation bugs 3919 if (ForceFloatExceptions) { 3920 #ifndef _WIN64 3921 static long fp_control_word = 0; 3922 __asm { fstcw fp_control_word } 3923 // see Intel PPro Manual, Vol. 2, p 7-16 3924 const long precision = 0x20; 3925 const long underflow = 0x10; 3926 const long overflow = 0x08; 3927 const long zero_div = 0x04; 3928 const long denorm = 0x02; 3929 const long invalid = 0x01; 3930 fp_control_word |= invalid; 3931 __asm { fldcw fp_control_word } 3932 #endif 3933 } 3934 3935 // If stack_commit_size is 0, windows will reserve the default size, 3936 // but only commit a small portion of it. 3937 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 3938 size_t default_reserve_size = os::win32::default_stack_size(); 3939 size_t actual_reserve_size = stack_commit_size; 3940 if (stack_commit_size < default_reserve_size) { 3941 // If stack_commit_size == 0, we want this too 3942 actual_reserve_size = default_reserve_size; 3943 } 3944 3945 // Check minimum allowable stack size for thread creation and to initialize 3946 // the java system classes, including StackOverflowError - depends on page 3947 // size. Add a page for compiler2 recursion in main thread. 3948 // Add in 2*BytesPerWord times page size to account for VM stack during 3949 // class initialization depending on 32 or 64 bit VM. 3950 size_t min_stack_allowed = 3951 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 3952 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 3953 if (actual_reserve_size < min_stack_allowed) { 3954 tty->print_cr("\nThe stack size specified is too small, " 3955 "Specify at least %dk", 3956 min_stack_allowed / K); 3957 return JNI_ERR; 3958 } 3959 3960 JavaThread::set_stack_size_at_create(stack_commit_size); 3961 3962 // Calculate theoretical max. size of Threads to guard gainst artifical 3963 // out-of-memory situations, where all available address-space has been 3964 // reserved by thread stacks. 3965 assert(actual_reserve_size != 0, "Must have a stack"); 3966 3967 // Calculate the thread limit when we should start doing Virtual Memory 3968 // banging. Currently when the threads will have used all but 200Mb of space. 3969 // 3970 // TODO: consider performing a similar calculation for commit size instead 3971 // as reserve size, since on a 64-bit platform we'll run into that more 3972 // often than running out of virtual memory space. We can use the 3973 // lower value of the two calculations as the os_thread_limit. 3974 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 3975 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 3976 3977 // at exit methods are called in the reverse order of their registration. 3978 // there is no limit to the number of functions registered. atexit does 3979 // not set errno. 3980 3981 if (PerfAllowAtExitRegistration) { 3982 // only register atexit functions if PerfAllowAtExitRegistration is set. 3983 // atexit functions can be delayed until process exit time, which 3984 // can be problematic for embedded VM situations. Embedded VMs should 3985 // call DestroyJavaVM() to assure that VM resources are released. 3986 3987 // note: perfMemory_exit_helper atexit function may be removed in 3988 // the future if the appropriate cleanup code can be added to the 3989 // VM_Exit VMOperation's doit method. 3990 if (atexit(perfMemory_exit_helper) != 0) { 3991 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3992 } 3993 } 3994 3995 #ifndef _WIN64 3996 // Print something if NX is enabled (win32 on AMD64) 3997 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 3998 #endif 3999 4000 // initialize thread priority policy 4001 prio_init(); 4002 4003 if (UseNUMA && !ForceNUMA) { 4004 UseNUMA = false; // We don't fully support this yet 4005 } 4006 4007 if (UseNUMAInterleaving) { 4008 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4009 bool success = numa_interleaving_init(); 4010 if (!success) UseNUMAInterleaving = false; 4011 } 4012 4013 if (initSock() != JNI_OK) { 4014 return JNI_ERR; 4015 } 4016 4017 return JNI_OK; 4018 } 4019 4020 void os::init_3(void) { 4021 return; 4022 } 4023 4024 // Mark the polling page as unreadable 4025 void os::make_polling_page_unreadable(void) { 4026 DWORD old_status; 4027 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status)) 4028 fatal("Could not disable polling page"); 4029 }; 4030 4031 // Mark the polling page as readable 4032 void os::make_polling_page_readable(void) { 4033 DWORD old_status; 4034 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status)) 4035 fatal("Could not enable polling page"); 4036 }; 4037 4038 4039 int os::stat(const char *path, struct stat *sbuf) { 4040 char pathbuf[MAX_PATH]; 4041 if (strlen(path) > MAX_PATH - 1) { 4042 errno = ENAMETOOLONG; 4043 return -1; 4044 } 4045 os::native_path(strcpy(pathbuf, path)); 4046 int ret = ::stat(pathbuf, sbuf); 4047 if (sbuf != NULL && UseUTCFileTimestamp) { 4048 // Fix for 6539723. st_mtime returned from stat() is dependent on 4049 // the system timezone and so can return different values for the 4050 // same file if/when daylight savings time changes. This adjustment 4051 // makes sure the same timestamp is returned regardless of the TZ. 4052 // 4053 // See: 4054 // http://msdn.microsoft.com/library/ 4055 // default.asp?url=/library/en-us/sysinfo/base/ 4056 // time_zone_information_str.asp 4057 // and 4058 // http://msdn.microsoft.com/library/default.asp?url= 4059 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4060 // 4061 // NOTE: there is a insidious bug here: If the timezone is changed 4062 // after the call to stat() but before 'GetTimeZoneInformation()', then 4063 // the adjustment we do here will be wrong and we'll return the wrong 4064 // value (which will likely end up creating an invalid class data 4065 // archive). Absent a better API for this, or some time zone locking 4066 // mechanism, we'll have to live with this risk. 4067 TIME_ZONE_INFORMATION tz; 4068 DWORD tzid = GetTimeZoneInformation(&tz); 4069 int daylightBias = 4070 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4071 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4072 } 4073 return ret; 4074 } 4075 4076 4077 #define FT2INT64(ft) \ 4078 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4079 4080 4081 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4082 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4083 // of a thread. 4084 // 4085 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4086 // the fast estimate available on the platform. 4087 4088 // current_thread_cpu_time() is not optimized for Windows yet 4089 jlong os::current_thread_cpu_time() { 4090 // return user + sys since the cost is the same 4091 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4092 } 4093 4094 jlong os::thread_cpu_time(Thread* thread) { 4095 // consistent with what current_thread_cpu_time() returns. 4096 return os::thread_cpu_time(thread, true /* user+sys */); 4097 } 4098 4099 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4100 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4101 } 4102 4103 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4104 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4105 // If this function changes, os::is_thread_cpu_time_supported() should too 4106 if (os::win32::is_nt()) { 4107 FILETIME CreationTime; 4108 FILETIME ExitTime; 4109 FILETIME KernelTime; 4110 FILETIME UserTime; 4111 4112 if (GetThreadTimes(thread->osthread()->thread_handle(), 4113 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4114 return -1; 4115 else 4116 if (user_sys_cpu_time) { 4117 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4118 } else { 4119 return FT2INT64(UserTime) * 100; 4120 } 4121 } else { 4122 return (jlong) timeGetTime() * 1000000; 4123 } 4124 } 4125 4126 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4127 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4128 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4129 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4130 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4131 } 4132 4133 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4134 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4135 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4136 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4137 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4138 } 4139 4140 bool os::is_thread_cpu_time_supported() { 4141 // see os::thread_cpu_time 4142 if (os::win32::is_nt()) { 4143 FILETIME CreationTime; 4144 FILETIME ExitTime; 4145 FILETIME KernelTime; 4146 FILETIME UserTime; 4147 4148 if (GetThreadTimes(GetCurrentThread(), 4149 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4150 return false; 4151 else 4152 return true; 4153 } else { 4154 return false; 4155 } 4156 } 4157 4158 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4159 // It does have primitives (PDH API) to get CPU usage and run queue length. 4160 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4161 // If we wanted to implement loadavg on Windows, we have a few options: 4162 // 4163 // a) Query CPU usage and run queue length and "fake" an answer by 4164 // returning the CPU usage if it's under 100%, and the run queue 4165 // length otherwise. It turns out that querying is pretty slow 4166 // on Windows, on the order of 200 microseconds on a fast machine. 4167 // Note that on the Windows the CPU usage value is the % usage 4168 // since the last time the API was called (and the first call 4169 // returns 100%), so we'd have to deal with that as well. 4170 // 4171 // b) Sample the "fake" answer using a sampling thread and store 4172 // the answer in a global variable. The call to loadavg would 4173 // just return the value of the global, avoiding the slow query. 4174 // 4175 // c) Sample a better answer using exponential decay to smooth the 4176 // value. This is basically the algorithm used by UNIX kernels. 4177 // 4178 // Note that sampling thread starvation could affect both (b) and (c). 4179 int os::loadavg(double loadavg[], int nelem) { 4180 return -1; 4181 } 4182 4183 4184 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4185 bool os::dont_yield() { 4186 return DontYieldALot; 4187 } 4188 4189 // This method is a slightly reworked copy of JDK's sysOpen 4190 // from src/windows/hpi/src/sys_api_md.c 4191 4192 int os::open(const char *path, int oflag, int mode) { 4193 char pathbuf[MAX_PATH]; 4194 4195 if (strlen(path) > MAX_PATH - 1) { 4196 errno = ENAMETOOLONG; 4197 return -1; 4198 } 4199 os::native_path(strcpy(pathbuf, path)); 4200 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4201 } 4202 4203 FILE* os::open(int fd, const char* mode) { 4204 return ::_fdopen(fd, mode); 4205 } 4206 4207 // Is a (classpath) directory empty? 4208 bool os::dir_is_empty(const char* path) { 4209 WIN32_FIND_DATA fd; 4210 HANDLE f = FindFirstFile(path, &fd); 4211 if (f == INVALID_HANDLE_VALUE) { 4212 return true; 4213 } 4214 FindClose(f); 4215 return false; 4216 } 4217 4218 // create binary file, rewriting existing file if required 4219 int os::create_binary_file(const char* path, bool rewrite_existing) { 4220 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4221 if (!rewrite_existing) { 4222 oflags |= _O_EXCL; 4223 } 4224 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4225 } 4226 4227 // return current position of file pointer 4228 jlong os::current_file_offset(int fd) { 4229 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4230 } 4231 4232 // move file pointer to the specified offset 4233 jlong os::seek_to_file_offset(int fd, jlong offset) { 4234 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4235 } 4236 4237 4238 jlong os::lseek(int fd, jlong offset, int whence) { 4239 return (jlong) ::_lseeki64(fd, offset, whence); 4240 } 4241 4242 // This method is a slightly reworked copy of JDK's sysNativePath 4243 // from src/windows/hpi/src/path_md.c 4244 4245 /* Convert a pathname to native format. On win32, this involves forcing all 4246 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4247 sometimes rejects '/') and removing redundant separators. The input path is 4248 assumed to have been converted into the character encoding used by the local 4249 system. Because this might be a double-byte encoding, care is taken to 4250 treat double-byte lead characters correctly. 4251 4252 This procedure modifies the given path in place, as the result is never 4253 longer than the original. There is no error return; this operation always 4254 succeeds. */ 4255 char * os::native_path(char *path) { 4256 char *src = path, *dst = path, *end = path; 4257 char *colon = NULL; /* If a drive specifier is found, this will 4258 point to the colon following the drive 4259 letter */ 4260 4261 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4262 assert(((!::IsDBCSLeadByte('/')) 4263 && (!::IsDBCSLeadByte('\\')) 4264 && (!::IsDBCSLeadByte(':'))), 4265 "Illegal lead byte"); 4266 4267 /* Check for leading separators */ 4268 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4269 while (isfilesep(*src)) { 4270 src++; 4271 } 4272 4273 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4274 /* Remove leading separators if followed by drive specifier. This 4275 hack is necessary to support file URLs containing drive 4276 specifiers (e.g., "file://c:/path"). As a side effect, 4277 "/c:/path" can be used as an alternative to "c:/path". */ 4278 *dst++ = *src++; 4279 colon = dst; 4280 *dst++ = ':'; 4281 src++; 4282 } else { 4283 src = path; 4284 if (isfilesep(src[0]) && isfilesep(src[1])) { 4285 /* UNC pathname: Retain first separator; leave src pointed at 4286 second separator so that further separators will be collapsed 4287 into the second separator. The result will be a pathname 4288 beginning with "\\\\" followed (most likely) by a host name. */ 4289 src = dst = path + 1; 4290 path[0] = '\\'; /* Force first separator to '\\' */ 4291 } 4292 } 4293 4294 end = dst; 4295 4296 /* Remove redundant separators from remainder of path, forcing all 4297 separators to be '\\' rather than '/'. Also, single byte space 4298 characters are removed from the end of the path because those 4299 are not legal ending characters on this operating system. 4300 */ 4301 while (*src != '\0') { 4302 if (isfilesep(*src)) { 4303 *dst++ = '\\'; src++; 4304 while (isfilesep(*src)) src++; 4305 if (*src == '\0') { 4306 /* Check for trailing separator */ 4307 end = dst; 4308 if (colon == dst - 2) break; /* "z:\\" */ 4309 if (dst == path + 1) break; /* "\\" */ 4310 if (dst == path + 2 && isfilesep(path[0])) { 4311 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4312 beginning of a UNC pathname. Even though it is not, by 4313 itself, a valid UNC pathname, we leave it as is in order 4314 to be consistent with the path canonicalizer as well 4315 as the win32 APIs, which treat this case as an invalid 4316 UNC pathname rather than as an alias for the root 4317 directory of the current drive. */ 4318 break; 4319 } 4320 end = --dst; /* Path does not denote a root directory, so 4321 remove trailing separator */ 4322 break; 4323 } 4324 end = dst; 4325 } else { 4326 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4327 *dst++ = *src++; 4328 if (*src) *dst++ = *src++; 4329 end = dst; 4330 } else { /* Copy a single-byte character */ 4331 char c = *src++; 4332 *dst++ = c; 4333 /* Space is not a legal ending character */ 4334 if (c != ' ') end = dst; 4335 } 4336 } 4337 } 4338 4339 *end = '\0'; 4340 4341 /* For "z:", add "." to work around a bug in the C runtime library */ 4342 if (colon == dst - 1) { 4343 path[2] = '.'; 4344 path[3] = '\0'; 4345 } 4346 4347 return path; 4348 } 4349 4350 // This code is a copy of JDK's sysSetLength 4351 // from src/windows/hpi/src/sys_api_md.c 4352 4353 int os::ftruncate(int fd, jlong length) { 4354 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4355 long high = (long)(length >> 32); 4356 DWORD ret; 4357 4358 if (h == (HANDLE)(-1)) { 4359 return -1; 4360 } 4361 4362 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4363 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4364 return -1; 4365 } 4366 4367 if (::SetEndOfFile(h) == FALSE) { 4368 return -1; 4369 } 4370 4371 return 0; 4372 } 4373 4374 4375 // This code is a copy of JDK's sysSync 4376 // from src/windows/hpi/src/sys_api_md.c 4377 // except for the legacy workaround for a bug in Win 98 4378 4379 int os::fsync(int fd) { 4380 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4381 4382 if ((!::FlushFileBuffers(handle)) && 4383 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4384 /* from winerror.h */ 4385 return -1; 4386 } 4387 return 0; 4388 } 4389 4390 static int nonSeekAvailable(int, long *); 4391 static int stdinAvailable(int, long *); 4392 4393 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4394 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4395 4396 // This code is a copy of JDK's sysAvailable 4397 // from src/windows/hpi/src/sys_api_md.c 4398 4399 int os::available(int fd, jlong *bytes) { 4400 jlong cur, end; 4401 struct _stati64 stbuf64; 4402 4403 if (::_fstati64(fd, &stbuf64) >= 0) { 4404 int mode = stbuf64.st_mode; 4405 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4406 int ret; 4407 long lpbytes; 4408 if (fd == 0) { 4409 ret = stdinAvailable(fd, &lpbytes); 4410 } else { 4411 ret = nonSeekAvailable(fd, &lpbytes); 4412 } 4413 (*bytes) = (jlong)(lpbytes); 4414 return ret; 4415 } 4416 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4417 return FALSE; 4418 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4419 return FALSE; 4420 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4421 return FALSE; 4422 } 4423 *bytes = end - cur; 4424 return TRUE; 4425 } else { 4426 return FALSE; 4427 } 4428 } 4429 4430 // This code is a copy of JDK's nonSeekAvailable 4431 // from src/windows/hpi/src/sys_api_md.c 4432 4433 static int nonSeekAvailable(int fd, long *pbytes) { 4434 /* This is used for available on non-seekable devices 4435 * (like both named and anonymous pipes, such as pipes 4436 * connected to an exec'd process). 4437 * Standard Input is a special case. 4438 * 4439 */ 4440 HANDLE han; 4441 4442 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4443 return FALSE; 4444 } 4445 4446 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4447 /* PeekNamedPipe fails when at EOF. In that case we 4448 * simply make *pbytes = 0 which is consistent with the 4449 * behavior we get on Solaris when an fd is at EOF. 4450 * The only alternative is to raise an Exception, 4451 * which isn't really warranted. 4452 */ 4453 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4454 return FALSE; 4455 } 4456 *pbytes = 0; 4457 } 4458 return TRUE; 4459 } 4460 4461 #define MAX_INPUT_EVENTS 2000 4462 4463 // This code is a copy of JDK's stdinAvailable 4464 // from src/windows/hpi/src/sys_api_md.c 4465 4466 static int stdinAvailable(int fd, long *pbytes) { 4467 HANDLE han; 4468 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4469 DWORD numEvents = 0; /* Number of events in buffer */ 4470 DWORD i = 0; /* Loop index */ 4471 DWORD curLength = 0; /* Position marker */ 4472 DWORD actualLength = 0; /* Number of bytes readable */ 4473 BOOL error = FALSE; /* Error holder */ 4474 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4475 4476 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4477 return FALSE; 4478 } 4479 4480 /* Construct an array of input records in the console buffer */ 4481 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4482 if (error == 0) { 4483 return nonSeekAvailable(fd, pbytes); 4484 } 4485 4486 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4487 if (numEvents > MAX_INPUT_EVENTS) { 4488 numEvents = MAX_INPUT_EVENTS; 4489 } 4490 4491 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4492 if (lpBuffer == NULL) { 4493 return FALSE; 4494 } 4495 4496 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4497 if (error == 0) { 4498 os::free(lpBuffer, mtInternal); 4499 return FALSE; 4500 } 4501 4502 /* Examine input records for the number of bytes available */ 4503 for (i=0; i<numEvents; i++) { 4504 if (lpBuffer[i].EventType == KEY_EVENT) { 4505 4506 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4507 &(lpBuffer[i].Event); 4508 if (keyRecord->bKeyDown == TRUE) { 4509 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4510 curLength++; 4511 if (*keyPressed == '\r') { 4512 actualLength = curLength; 4513 } 4514 } 4515 } 4516 } 4517 4518 if (lpBuffer != NULL) { 4519 os::free(lpBuffer, mtInternal); 4520 } 4521 4522 *pbytes = (long) actualLength; 4523 return TRUE; 4524 } 4525 4526 // Map a block of memory. 4527 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4528 char *addr, size_t bytes, bool read_only, 4529 bool allow_exec) { 4530 HANDLE hFile; 4531 char* base; 4532 4533 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4534 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4535 if (hFile == NULL) { 4536 if (PrintMiscellaneous && Verbose) { 4537 DWORD err = GetLastError(); 4538 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4539 } 4540 return NULL; 4541 } 4542 4543 if (allow_exec) { 4544 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4545 // unless it comes from a PE image (which the shared archive is not.) 4546 // Even VirtualProtect refuses to give execute access to mapped memory 4547 // that was not previously executable. 4548 // 4549 // Instead, stick the executable region in anonymous memory. Yuck. 4550 // Penalty is that ~4 pages will not be shareable - in the future 4551 // we might consider DLLizing the shared archive with a proper PE 4552 // header so that mapping executable + sharing is possible. 4553 4554 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4555 PAGE_READWRITE); 4556 if (base == NULL) { 4557 if (PrintMiscellaneous && Verbose) { 4558 DWORD err = GetLastError(); 4559 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4560 } 4561 CloseHandle(hFile); 4562 return NULL; 4563 } 4564 4565 DWORD bytes_read; 4566 OVERLAPPED overlapped; 4567 overlapped.Offset = (DWORD)file_offset; 4568 overlapped.OffsetHigh = 0; 4569 overlapped.hEvent = NULL; 4570 // ReadFile guarantees that if the return value is true, the requested 4571 // number of bytes were read before returning. 4572 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4573 if (!res) { 4574 if (PrintMiscellaneous && Verbose) { 4575 DWORD err = GetLastError(); 4576 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4577 } 4578 release_memory(base, bytes); 4579 CloseHandle(hFile); 4580 return NULL; 4581 } 4582 } else { 4583 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4584 NULL /*file_name*/); 4585 if (hMap == NULL) { 4586 if (PrintMiscellaneous && Verbose) { 4587 DWORD err = GetLastError(); 4588 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4589 } 4590 CloseHandle(hFile); 4591 return NULL; 4592 } 4593 4594 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4595 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4596 (DWORD)bytes, addr); 4597 if (base == NULL) { 4598 if (PrintMiscellaneous && Verbose) { 4599 DWORD err = GetLastError(); 4600 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4601 } 4602 CloseHandle(hMap); 4603 CloseHandle(hFile); 4604 return NULL; 4605 } 4606 4607 if (CloseHandle(hMap) == 0) { 4608 if (PrintMiscellaneous && Verbose) { 4609 DWORD err = GetLastError(); 4610 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4611 } 4612 CloseHandle(hFile); 4613 return base; 4614 } 4615 } 4616 4617 if (allow_exec) { 4618 DWORD old_protect; 4619 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4620 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4621 4622 if (!res) { 4623 if (PrintMiscellaneous && Verbose) { 4624 DWORD err = GetLastError(); 4625 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4626 } 4627 // Don't consider this a hard error, on IA32 even if the 4628 // VirtualProtect fails, we should still be able to execute 4629 CloseHandle(hFile); 4630 return base; 4631 } 4632 } 4633 4634 if (CloseHandle(hFile) == 0) { 4635 if (PrintMiscellaneous && Verbose) { 4636 DWORD err = GetLastError(); 4637 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4638 } 4639 return base; 4640 } 4641 4642 return base; 4643 } 4644 4645 4646 // Remap a block of memory. 4647 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4648 char *addr, size_t bytes, bool read_only, 4649 bool allow_exec) { 4650 // This OS does not allow existing memory maps to be remapped so we 4651 // have to unmap the memory before we remap it. 4652 if (!os::unmap_memory(addr, bytes)) { 4653 return NULL; 4654 } 4655 4656 // There is a very small theoretical window between the unmap_memory() 4657 // call above and the map_memory() call below where a thread in native 4658 // code may be able to access an address that is no longer mapped. 4659 4660 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4661 read_only, allow_exec); 4662 } 4663 4664 4665 // Unmap a block of memory. 4666 // Returns true=success, otherwise false. 4667 4668 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4669 BOOL result = UnmapViewOfFile(addr); 4670 if (result == 0) { 4671 if (PrintMiscellaneous && Verbose) { 4672 DWORD err = GetLastError(); 4673 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4674 } 4675 return false; 4676 } 4677 return true; 4678 } 4679 4680 void os::pause() { 4681 char filename[MAX_PATH]; 4682 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4683 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4684 } else { 4685 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4686 } 4687 4688 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4689 if (fd != -1) { 4690 struct stat buf; 4691 ::close(fd); 4692 while (::stat(filename, &buf) == 0) { 4693 Sleep(100); 4694 } 4695 } else { 4696 jio_fprintf(stderr, 4697 "Could not open pause file '%s', continuing immediately.\n", filename); 4698 } 4699 } 4700 4701 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4702 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4703 } 4704 4705 /* 4706 * See the caveats for this class in os_windows.hpp 4707 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4708 * into this method and returns false. If no OS EXCEPTION was raised, returns 4709 * true. 4710 * The callback is supposed to provide the method that should be protected. 4711 */ 4712 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4713 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4714 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4715 "crash_protection already set?"); 4716 4717 bool success = true; 4718 __try { 4719 WatcherThread::watcher_thread()->set_crash_protection(this); 4720 cb.call(); 4721 } __except(EXCEPTION_EXECUTE_HANDLER) { 4722 // only for protection, nothing to do 4723 success = false; 4724 } 4725 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4726 return success; 4727 } 4728 4729 // An Event wraps a win32 "CreateEvent" kernel handle. 4730 // 4731 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4732 // 4733 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4734 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4735 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4736 // In addition, an unpark() operation might fetch the handle field, but the 4737 // event could recycle between the fetch and the SetEvent() operation. 4738 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4739 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4740 // on an stale but recycled handle would be harmless, but in practice this might 4741 // confuse other non-Sun code, so it's not a viable approach. 4742 // 4743 // 2: Once a win32 event handle is associated with an Event, it remains associated 4744 // with the Event. The event handle is never closed. This could be construed 4745 // as handle leakage, but only up to the maximum # of threads that have been extant 4746 // at any one time. This shouldn't be an issue, as windows platforms typically 4747 // permit a process to have hundreds of thousands of open handles. 4748 // 4749 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4750 // and release unused handles. 4751 // 4752 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4753 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4754 // 4755 // 5. Use an RCU-like mechanism (Read-Copy Update). 4756 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4757 // 4758 // We use (2). 4759 // 4760 // TODO-FIXME: 4761 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4762 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4763 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4764 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4765 // into a single win32 CreateEvent() handle. 4766 // 4767 // _Event transitions in park() 4768 // -1 => -1 : illegal 4769 // 1 => 0 : pass - return immediately 4770 // 0 => -1 : block 4771 // 4772 // _Event serves as a restricted-range semaphore : 4773 // -1 : thread is blocked 4774 // 0 : neutral - thread is running or ready 4775 // 1 : signaled - thread is running or ready 4776 // 4777 // Another possible encoding of _Event would be 4778 // with explicit "PARKED" and "SIGNALED" bits. 4779 4780 int os::PlatformEvent::park (jlong Millis) { 4781 guarantee(_ParkHandle != NULL , "Invariant"); 4782 guarantee(Millis > 0 , "Invariant"); 4783 int v; 4784 4785 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4786 // the initial park() operation. 4787 4788 for (;;) { 4789 v = _Event; 4790 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4791 } 4792 guarantee((v == 0) || (v == 1), "invariant"); 4793 if (v != 0) return OS_OK; 4794 4795 // Do this the hard way by blocking ... 4796 // TODO: consider a brief spin here, gated on the success of recent 4797 // spin attempts by this thread. 4798 // 4799 // We decompose long timeouts into series of shorter timed waits. 4800 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4801 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4802 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4803 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4804 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4805 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4806 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4807 // for the already waited time. This policy does not admit any new outcomes. 4808 // In the future, however, we might want to track the accumulated wait time and 4809 // adjust Millis accordingly if we encounter a spurious wakeup. 4810 4811 const int MAXTIMEOUT = 0x10000000; 4812 DWORD rv = WAIT_TIMEOUT; 4813 while (_Event < 0 && Millis > 0) { 4814 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4815 if (Millis > MAXTIMEOUT) { 4816 prd = MAXTIMEOUT; 4817 } 4818 rv = ::WaitForSingleObject(_ParkHandle, prd); 4819 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4820 if (rv == WAIT_TIMEOUT) { 4821 Millis -= prd; 4822 } 4823 } 4824 v = _Event; 4825 _Event = 0; 4826 // see comment at end of os::PlatformEvent::park() below: 4827 OrderAccess::fence(); 4828 // If we encounter a nearly simultanous timeout expiry and unpark() 4829 // we return OS_OK indicating we awoke via unpark(). 4830 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4831 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4832 } 4833 4834 void os::PlatformEvent::park() { 4835 guarantee(_ParkHandle != NULL, "Invariant"); 4836 // Invariant: Only the thread associated with the Event/PlatformEvent 4837 // may call park(). 4838 int v; 4839 for (;;) { 4840 v = _Event; 4841 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4842 } 4843 guarantee((v == 0) || (v == 1), "invariant"); 4844 if (v != 0) return; 4845 4846 // Do this the hard way by blocking ... 4847 // TODO: consider a brief spin here, gated on the success of recent 4848 // spin attempts by this thread. 4849 while (_Event < 0) { 4850 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4851 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4852 } 4853 4854 // Usually we'll find _Event == 0 at this point, but as 4855 // an optional optimization we clear it, just in case can 4856 // multiple unpark() operations drove _Event up to 1. 4857 _Event = 0; 4858 OrderAccess::fence(); 4859 guarantee(_Event >= 0, "invariant"); 4860 } 4861 4862 void os::PlatformEvent::unpark() { 4863 guarantee(_ParkHandle != NULL, "Invariant"); 4864 4865 // Transitions for _Event: 4866 // 0 :=> 1 4867 // 1 :=> 1 4868 // -1 :=> either 0 or 1; must signal target thread 4869 // That is, we can safely transition _Event from -1 to either 4870 // 0 or 1. 4871 // See also: "Semaphores in Plan 9" by Mullender & Cox 4872 // 4873 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4874 // that it will take two back-to-back park() calls for the owning 4875 // thread to block. This has the benefit of forcing a spurious return 4876 // from the first park() call after an unpark() call which will help 4877 // shake out uses of park() and unpark() without condition variables. 4878 4879 if (Atomic::xchg(1, &_Event) >= 0) return; 4880 4881 ::SetEvent(_ParkHandle); 4882 } 4883 4884 4885 // JSR166 4886 // ------------------------------------------------------- 4887 4888 /* 4889 * The Windows implementation of Park is very straightforward: Basic 4890 * operations on Win32 Events turn out to have the right semantics to 4891 * use them directly. We opportunistically resuse the event inherited 4892 * from Monitor. 4893 */ 4894 4895 4896 void Parker::park(bool isAbsolute, jlong time) { 4897 guarantee(_ParkEvent != NULL, "invariant"); 4898 // First, demultiplex/decode time arguments 4899 if (time < 0) { // don't wait 4900 return; 4901 } 4902 else if (time == 0 && !isAbsolute) { 4903 time = INFINITE; 4904 } 4905 else if (isAbsolute) { 4906 time -= os::javaTimeMillis(); // convert to relative time 4907 if (time <= 0) // already elapsed 4908 return; 4909 } 4910 else { // relative 4911 time /= 1000000; // Must coarsen from nanos to millis 4912 if (time == 0) // Wait for the minimal time unit if zero 4913 time = 1; 4914 } 4915 4916 JavaThread* thread = (JavaThread*)(Thread::current()); 4917 assert(thread->is_Java_thread(), "Must be JavaThread"); 4918 JavaThread *jt = (JavaThread *)thread; 4919 4920 // Don't wait if interrupted or already triggered 4921 if (Thread::is_interrupted(thread, false) || 4922 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4923 ResetEvent(_ParkEvent); 4924 return; 4925 } 4926 else { 4927 ThreadBlockInVM tbivm(jt); 4928 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4929 jt->set_suspend_equivalent(); 4930 4931 WaitForSingleObject(_ParkEvent, time); 4932 ResetEvent(_ParkEvent); 4933 4934 // If externally suspended while waiting, re-suspend 4935 if (jt->handle_special_suspend_equivalent_condition()) { 4936 jt->java_suspend_self(); 4937 } 4938 } 4939 } 4940 4941 void Parker::unpark() { 4942 guarantee(_ParkEvent != NULL, "invariant"); 4943 SetEvent(_ParkEvent); 4944 } 4945 4946 // Run the specified command in a separate process. Return its exit value, 4947 // or -1 on failure (e.g. can't create a new process). 4948 int os::fork_and_exec(char* cmd) { 4949 STARTUPINFO si; 4950 PROCESS_INFORMATION pi; 4951 4952 memset(&si, 0, sizeof(si)); 4953 si.cb = sizeof(si); 4954 memset(&pi, 0, sizeof(pi)); 4955 BOOL rslt = CreateProcess(NULL, // executable name - use command line 4956 cmd, // command line 4957 NULL, // process security attribute 4958 NULL, // thread security attribute 4959 TRUE, // inherits system handles 4960 0, // no creation flags 4961 NULL, // use parent's environment block 4962 NULL, // use parent's starting directory 4963 &si, // (in) startup information 4964 &pi); // (out) process information 4965 4966 if (rslt) { 4967 // Wait until child process exits. 4968 WaitForSingleObject(pi.hProcess, INFINITE); 4969 4970 DWORD exit_code; 4971 GetExitCodeProcess(pi.hProcess, &exit_code); 4972 4973 // Close process and thread handles. 4974 CloseHandle(pi.hProcess); 4975 CloseHandle(pi.hThread); 4976 4977 return (int)exit_code; 4978 } else { 4979 return -1; 4980 } 4981 } 4982 4983 //-------------------------------------------------------------------------------------------------- 4984 // Non-product code 4985 4986 static int mallocDebugIntervalCounter = 0; 4987 static int mallocDebugCounter = 0; 4988 bool os::check_heap(bool force) { 4989 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 4990 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 4991 // Note: HeapValidate executes two hardware breakpoints when it finds something 4992 // wrong; at these points, eax contains the address of the offending block (I think). 4993 // To get to the exlicit error message(s) below, just continue twice. 4994 HANDLE heap = GetProcessHeap(); 4995 4996 // If we fail to lock the heap, then gflags.exe has been used 4997 // or some other special heap flag has been set that prevents 4998 // locking. We don't try to walk a heap we can't lock. 4999 if (HeapLock(heap) != 0) { 5000 PROCESS_HEAP_ENTRY phe; 5001 phe.lpData = NULL; 5002 while (HeapWalk(heap, &phe) != 0) { 5003 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5004 !HeapValidate(heap, 0, phe.lpData)) { 5005 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5006 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5007 fatal("corrupted C heap"); 5008 } 5009 } 5010 DWORD err = GetLastError(); 5011 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5012 fatal(err_msg("heap walk aborted with error %d", err)); 5013 } 5014 HeapUnlock(heap); 5015 } 5016 mallocDebugIntervalCounter = 0; 5017 } 5018 return true; 5019 } 5020 5021 5022 bool os::find(address addr, outputStream* st) { 5023 // Nothing yet 5024 return false; 5025 } 5026 5027 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5028 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5029 5030 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5031 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5032 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5033 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5034 5035 if (os::is_memory_serialize_page(thread, addr)) 5036 return EXCEPTION_CONTINUE_EXECUTION; 5037 } 5038 5039 return EXCEPTION_CONTINUE_SEARCH; 5040 } 5041 5042 // We don't build a headless jre for Windows 5043 bool os::is_headless_jre() { return false; } 5044 5045 static jint initSock() { 5046 WSADATA wsadata; 5047 5048 if (!os::WinSock2Dll::WinSock2Available()) { 5049 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5050 ::GetLastError()); 5051 return JNI_ERR; 5052 } 5053 5054 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5055 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5056 ::GetLastError()); 5057 return JNI_ERR; 5058 } 5059 return JNI_OK; 5060 } 5061 5062 struct hostent* os::get_host_by_name(char* name) { 5063 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5064 } 5065 5066 int os::socket_close(int fd) { 5067 return ::closesocket(fd); 5068 } 5069 5070 int os::socket_available(int fd, jint *pbytes) { 5071 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5072 return (ret < 0) ? 0 : 1; 5073 } 5074 5075 int os::socket(int domain, int type, int protocol) { 5076 return ::socket(domain, type, protocol); 5077 } 5078 5079 int os::listen(int fd, int count) { 5080 return ::listen(fd, count); 5081 } 5082 5083 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5084 return ::connect(fd, him, len); 5085 } 5086 5087 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5088 return ::accept(fd, him, len); 5089 } 5090 5091 int os::sendto(int fd, char* buf, size_t len, uint flags, 5092 struct sockaddr* to, socklen_t tolen) { 5093 5094 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5095 } 5096 5097 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5098 sockaddr* from, socklen_t* fromlen) { 5099 5100 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5101 } 5102 5103 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5104 return ::recv(fd, buf, (int)nBytes, flags); 5105 } 5106 5107 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5108 return ::send(fd, buf, (int)nBytes, flags); 5109 } 5110 5111 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5112 return ::send(fd, buf, (int)nBytes, flags); 5113 } 5114 5115 int os::timeout(int fd, long timeout) { 5116 fd_set tbl; 5117 struct timeval t; 5118 5119 t.tv_sec = timeout / 1000; 5120 t.tv_usec = (timeout % 1000) * 1000; 5121 5122 tbl.fd_count = 1; 5123 tbl.fd_array[0] = fd; 5124 5125 return ::select(1, &tbl, 0, 0, &t); 5126 } 5127 5128 int os::get_host_name(char* name, int namelen) { 5129 return ::gethostname(name, namelen); 5130 } 5131 5132 int os::socket_shutdown(int fd, int howto) { 5133 return ::shutdown(fd, howto); 5134 } 5135 5136 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5137 return ::bind(fd, him, len); 5138 } 5139 5140 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5141 return ::getsockname(fd, him, len); 5142 } 5143 5144 int os::get_sock_opt(int fd, int level, int optname, 5145 char* optval, socklen_t* optlen) { 5146 return ::getsockopt(fd, level, optname, optval, optlen); 5147 } 5148 5149 int os::set_sock_opt(int fd, int level, int optname, 5150 const char* optval, socklen_t optlen) { 5151 return ::setsockopt(fd, level, optname, optval, optlen); 5152 } 5153 5154 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5155 #if defined(IA32) 5156 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5157 #elif defined (AMD64) 5158 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5159 #endif 5160 5161 // returns true if thread could be suspended, 5162 // false otherwise 5163 static bool do_suspend(HANDLE* h) { 5164 if (h != NULL) { 5165 if (SuspendThread(*h) != ~0) { 5166 return true; 5167 } 5168 } 5169 return false; 5170 } 5171 5172 // resume the thread 5173 // calling resume on an active thread is a no-op 5174 static void do_resume(HANDLE* h) { 5175 if (h != NULL) { 5176 ResumeThread(*h); 5177 } 5178 } 5179 5180 // retrieve a suspend/resume context capable handle 5181 // from the tid. Caller validates handle return value. 5182 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5183 if (h != NULL) { 5184 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5185 } 5186 } 5187 5188 // 5189 // Thread sampling implementation 5190 // 5191 void os::SuspendedThreadTask::internal_do_task() { 5192 CONTEXT ctxt; 5193 HANDLE h = NULL; 5194 5195 // get context capable handle for thread 5196 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5197 5198 // sanity 5199 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5200 return; 5201 } 5202 5203 // suspend the thread 5204 if (do_suspend(&h)) { 5205 ctxt.ContextFlags = sampling_context_flags; 5206 // get thread context 5207 GetThreadContext(h, &ctxt); 5208 SuspendedThreadTaskContext context(_thread, &ctxt); 5209 // pass context to Thread Sampling impl 5210 do_task(context); 5211 // resume thread 5212 do_resume(&h); 5213 } 5214 5215 // close handle 5216 CloseHandle(h); 5217 } 5218 5219 5220 // Kernel32 API 5221 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5222 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5223 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5224 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5225 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5226 5227 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5228 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5229 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5230 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5231 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5232 5233 5234 BOOL os::Kernel32Dll::initialized = FALSE; 5235 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5236 assert(initialized && _GetLargePageMinimum != NULL, 5237 "GetLargePageMinimumAvailable() not yet called"); 5238 return _GetLargePageMinimum(); 5239 } 5240 5241 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5242 if (!initialized) { 5243 initialize(); 5244 } 5245 return _GetLargePageMinimum != NULL; 5246 } 5247 5248 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5249 if (!initialized) { 5250 initialize(); 5251 } 5252 return _VirtualAllocExNuma != NULL; 5253 } 5254 5255 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5256 assert(initialized && _VirtualAllocExNuma != NULL, 5257 "NUMACallsAvailable() not yet called"); 5258 5259 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5260 } 5261 5262 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5263 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5264 "NUMACallsAvailable() not yet called"); 5265 5266 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5267 } 5268 5269 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5270 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5271 "NUMACallsAvailable() not yet called"); 5272 5273 return _GetNumaNodeProcessorMask(node, proc_mask); 5274 } 5275 5276 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5277 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5278 if (!initialized) { 5279 initialize(); 5280 } 5281 5282 if (_RtlCaptureStackBackTrace != NULL) { 5283 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5284 BackTrace, BackTraceHash); 5285 } else { 5286 return 0; 5287 } 5288 } 5289 5290 void os::Kernel32Dll::initializeCommon() { 5291 if (!initialized) { 5292 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5293 assert(handle != NULL, "Just check"); 5294 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5295 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5296 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5297 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5298 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5299 initialized = TRUE; 5300 } 5301 } 5302 5303 5304 5305 #ifndef JDK6_OR_EARLIER 5306 5307 void os::Kernel32Dll::initialize() { 5308 initializeCommon(); 5309 } 5310 5311 5312 // Kernel32 API 5313 inline BOOL os::Kernel32Dll::SwitchToThread() { 5314 return ::SwitchToThread(); 5315 } 5316 5317 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5318 return true; 5319 } 5320 5321 // Help tools 5322 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5323 return true; 5324 } 5325 5326 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5327 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5328 } 5329 5330 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5331 return ::Module32First(hSnapshot, lpme); 5332 } 5333 5334 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5335 return ::Module32Next(hSnapshot, lpme); 5336 } 5337 5338 5339 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5340 return true; 5341 } 5342 5343 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5344 ::GetNativeSystemInfo(lpSystemInfo); 5345 } 5346 5347 // PSAPI API 5348 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5349 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5350 } 5351 5352 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5353 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5354 } 5355 5356 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5357 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5358 } 5359 5360 inline BOOL os::PSApiDll::PSApiAvailable() { 5361 return true; 5362 } 5363 5364 5365 // WinSock2 API 5366 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5367 return ::WSAStartup(wVersionRequested, lpWSAData); 5368 } 5369 5370 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5371 return ::gethostbyname(name); 5372 } 5373 5374 inline BOOL os::WinSock2Dll::WinSock2Available() { 5375 return true; 5376 } 5377 5378 // Advapi API 5379 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5380 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5381 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5382 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5383 BufferLength, PreviousState, ReturnLength); 5384 } 5385 5386 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5387 PHANDLE TokenHandle) { 5388 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5389 } 5390 5391 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5392 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5393 } 5394 5395 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5396 return true; 5397 } 5398 5399 void* os::get_default_process_handle() { 5400 return (void*)GetModuleHandle(NULL); 5401 } 5402 5403 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5404 // which is used to find statically linked in agents. 5405 // Additionally for windows, takes into account __stdcall names. 5406 // Parameters: 5407 // sym_name: Symbol in library we are looking for 5408 // lib_name: Name of library to look in, NULL for shared libs. 5409 // is_absolute_path == true if lib_name is absolute path to agent 5410 // such as "C:/a/b/L.dll" 5411 // == false if only the base name of the library is passed in 5412 // such as "L" 5413 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5414 bool is_absolute_path) { 5415 char *agent_entry_name; 5416 size_t len; 5417 size_t name_len; 5418 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5419 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5420 const char *start; 5421 5422 if (lib_name != NULL) { 5423 len = name_len = strlen(lib_name); 5424 if (is_absolute_path) { 5425 // Need to strip path, prefix and suffix 5426 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5427 lib_name = ++start; 5428 } else { 5429 // Need to check for drive prefix 5430 if ((start = strchr(lib_name, ':')) != NULL) { 5431 lib_name = ++start; 5432 } 5433 } 5434 if (len <= (prefix_len + suffix_len)) { 5435 return NULL; 5436 } 5437 lib_name += prefix_len; 5438 name_len = strlen(lib_name) - suffix_len; 5439 } 5440 } 5441 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5442 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5443 if (agent_entry_name == NULL) { 5444 return NULL; 5445 } 5446 if (lib_name != NULL) { 5447 const char *p = strrchr(sym_name, '@'); 5448 if (p != NULL && p != sym_name) { 5449 // sym_name == _Agent_OnLoad@XX 5450 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5451 agent_entry_name[(p-sym_name)] = '\0'; 5452 // agent_entry_name == _Agent_OnLoad 5453 strcat(agent_entry_name, "_"); 5454 strncat(agent_entry_name, lib_name, name_len); 5455 strcat(agent_entry_name, p); 5456 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5457 } else { 5458 strcpy(agent_entry_name, sym_name); 5459 strcat(agent_entry_name, "_"); 5460 strncat(agent_entry_name, lib_name, name_len); 5461 } 5462 } else { 5463 strcpy(agent_entry_name, sym_name); 5464 } 5465 return agent_entry_name; 5466 } 5467 5468 #else 5469 // Kernel32 API 5470 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5471 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5472 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5473 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5474 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5475 5476 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5477 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5478 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5479 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5480 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5481 5482 void os::Kernel32Dll::initialize() { 5483 if (!initialized) { 5484 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5485 assert(handle != NULL, "Just check"); 5486 5487 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5488 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5489 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5490 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5491 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5492 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5493 initializeCommon(); // resolve the functions that always need resolving 5494 5495 initialized = TRUE; 5496 } 5497 } 5498 5499 BOOL os::Kernel32Dll::SwitchToThread() { 5500 assert(initialized && _SwitchToThread != NULL, 5501 "SwitchToThreadAvailable() not yet called"); 5502 return _SwitchToThread(); 5503 } 5504 5505 5506 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5507 if (!initialized) { 5508 initialize(); 5509 } 5510 return _SwitchToThread != NULL; 5511 } 5512 5513 // Help tools 5514 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5515 if (!initialized) { 5516 initialize(); 5517 } 5518 return _CreateToolhelp32Snapshot != NULL && 5519 _Module32First != NULL && 5520 _Module32Next != NULL; 5521 } 5522 5523 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5524 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5525 "HelpToolsAvailable() not yet called"); 5526 5527 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5528 } 5529 5530 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5531 assert(initialized && _Module32First != NULL, 5532 "HelpToolsAvailable() not yet called"); 5533 5534 return _Module32First(hSnapshot, lpme); 5535 } 5536 5537 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5538 assert(initialized && _Module32Next != NULL, 5539 "HelpToolsAvailable() not yet called"); 5540 5541 return _Module32Next(hSnapshot, lpme); 5542 } 5543 5544 5545 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5546 if (!initialized) { 5547 initialize(); 5548 } 5549 return _GetNativeSystemInfo != NULL; 5550 } 5551 5552 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5553 assert(initialized && _GetNativeSystemInfo != NULL, 5554 "GetNativeSystemInfoAvailable() not yet called"); 5555 5556 _GetNativeSystemInfo(lpSystemInfo); 5557 } 5558 5559 // PSAPI API 5560 5561 5562 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5563 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5564 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5565 5566 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5567 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5568 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5569 BOOL os::PSApiDll::initialized = FALSE; 5570 5571 void os::PSApiDll::initialize() { 5572 if (!initialized) { 5573 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5574 if (handle != NULL) { 5575 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5576 "EnumProcessModules"); 5577 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5578 "GetModuleFileNameExA"); 5579 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5580 "GetModuleInformation"); 5581 } 5582 initialized = TRUE; 5583 } 5584 } 5585 5586 5587 5588 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5589 assert(initialized && _EnumProcessModules != NULL, 5590 "PSApiAvailable() not yet called"); 5591 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5592 } 5593 5594 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5595 assert(initialized && _GetModuleFileNameEx != NULL, 5596 "PSApiAvailable() not yet called"); 5597 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5598 } 5599 5600 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5601 assert(initialized && _GetModuleInformation != NULL, 5602 "PSApiAvailable() not yet called"); 5603 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5604 } 5605 5606 BOOL os::PSApiDll::PSApiAvailable() { 5607 if (!initialized) { 5608 initialize(); 5609 } 5610 return _EnumProcessModules != NULL && 5611 _GetModuleFileNameEx != NULL && 5612 _GetModuleInformation != NULL; 5613 } 5614 5615 5616 // WinSock2 API 5617 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5618 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5619 5620 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5621 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5622 BOOL os::WinSock2Dll::initialized = FALSE; 5623 5624 void os::WinSock2Dll::initialize() { 5625 if (!initialized) { 5626 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5627 if (handle != NULL) { 5628 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5629 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5630 } 5631 initialized = TRUE; 5632 } 5633 } 5634 5635 5636 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5637 assert(initialized && _WSAStartup != NULL, 5638 "WinSock2Available() not yet called"); 5639 return _WSAStartup(wVersionRequested, lpWSAData); 5640 } 5641 5642 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5643 assert(initialized && _gethostbyname != NULL, 5644 "WinSock2Available() not yet called"); 5645 return _gethostbyname(name); 5646 } 5647 5648 BOOL os::WinSock2Dll::WinSock2Available() { 5649 if (!initialized) { 5650 initialize(); 5651 } 5652 return _WSAStartup != NULL && 5653 _gethostbyname != NULL; 5654 } 5655 5656 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5657 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5658 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5659 5660 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5661 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5662 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5663 BOOL os::Advapi32Dll::initialized = FALSE; 5664 5665 void os::Advapi32Dll::initialize() { 5666 if (!initialized) { 5667 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5668 if (handle != NULL) { 5669 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5670 "AdjustTokenPrivileges"); 5671 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5672 "OpenProcessToken"); 5673 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5674 "LookupPrivilegeValueA"); 5675 } 5676 initialized = TRUE; 5677 } 5678 } 5679 5680 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5681 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5682 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5683 assert(initialized && _AdjustTokenPrivileges != NULL, 5684 "AdvapiAvailable() not yet called"); 5685 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5686 BufferLength, PreviousState, ReturnLength); 5687 } 5688 5689 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5690 PHANDLE TokenHandle) { 5691 assert(initialized && _OpenProcessToken != NULL, 5692 "AdvapiAvailable() not yet called"); 5693 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5694 } 5695 5696 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5697 assert(initialized && _LookupPrivilegeValue != NULL, 5698 "AdvapiAvailable() not yet called"); 5699 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5700 } 5701 5702 BOOL os::Advapi32Dll::AdvapiAvailable() { 5703 if (!initialized) { 5704 initialize(); 5705 } 5706 return _AdjustTokenPrivileges != NULL && 5707 _OpenProcessToken != NULL && 5708 _LookupPrivilegeValue != NULL; 5709 } 5710 5711 #endif 5712 5713 #ifndef PRODUCT 5714 5715 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5716 // contiguous memory block at a particular address. 5717 // The test first tries to find a good approximate address to allocate at by using the same 5718 // method to allocate some memory at any address. The test then tries to allocate memory in 5719 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5720 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5721 // the previously allocated memory is available for allocation. The only actual failure 5722 // that is reported is when the test tries to allocate at a particular location but gets a 5723 // different valid one. A NULL return value at this point is not considered an error but may 5724 // be legitimate. 5725 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5726 void TestReserveMemorySpecial_test() { 5727 if (!UseLargePages) { 5728 if (VerboseInternalVMTests) { 5729 gclog_or_tty->print("Skipping test because large pages are disabled"); 5730 } 5731 return; 5732 } 5733 // save current value of globals 5734 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5735 bool old_use_numa_interleaving = UseNUMAInterleaving; 5736 5737 // set globals to make sure we hit the correct code path 5738 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5739 5740 // do an allocation at an address selected by the OS to get a good one. 5741 const size_t large_allocation_size = os::large_page_size() * 4; 5742 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5743 if (result == NULL) { 5744 if (VerboseInternalVMTests) { 5745 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5746 large_allocation_size); 5747 } 5748 } else { 5749 os::release_memory_special(result, large_allocation_size); 5750 5751 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5752 // we managed to get it once. 5753 const size_t expected_allocation_size = os::large_page_size(); 5754 char* expected_location = result + os::large_page_size(); 5755 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5756 if (actual_location == NULL) { 5757 if (VerboseInternalVMTests) { 5758 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5759 expected_location, large_allocation_size); 5760 } 5761 } else { 5762 // release memory 5763 os::release_memory_special(actual_location, expected_allocation_size); 5764 // only now check, after releasing any memory to avoid any leaks. 5765 assert(actual_location == expected_location, 5766 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5767 expected_location, expected_allocation_size, actual_location)); 5768 } 5769 } 5770 5771 // restore globals 5772 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5773 UseNUMAInterleaving = old_use_numa_interleaving; 5774 } 5775 #endif // PRODUCT 5776