1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/extendedPC.hpp" 48 #include "runtime/globals.hpp" 49 #include "runtime/interfaceSupport.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/javaCalls.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/objectMonitor.hpp" 54 #include "runtime/orderAccess.inline.hpp" 55 #include "runtime/osThread.hpp" 56 #include "runtime/perfMemory.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/statSampler.hpp" 59 #include "runtime/stubRoutines.hpp" 60 #include "runtime/thread.inline.hpp" 61 #include "runtime/threadCritical.hpp" 62 #include "runtime/timer.hpp" 63 #include "services/attachListener.hpp" 64 #include "services/memTracker.hpp" 65 #include "services/runtimeService.hpp" 66 #include "utilities/decoder.hpp" 67 #include "utilities/defaultStream.hpp" 68 #include "utilities/events.hpp" 69 #include "utilities/growableArray.hpp" 70 #include "utilities/vmError.hpp" 71 72 #ifdef _DEBUG 73 #include <crtdbg.h> 74 #endif 75 76 77 #include <windows.h> 78 #include <sys/types.h> 79 #include <sys/stat.h> 80 #include <sys/timeb.h> 81 #include <objidl.h> 82 #include <shlobj.h> 83 84 #include <malloc.h> 85 #include <signal.h> 86 #include <direct.h> 87 #include <errno.h> 88 #include <fcntl.h> 89 #include <io.h> 90 #include <process.h> // For _beginthreadex(), _endthreadex() 91 #include <imagehlp.h> // For os::dll_address_to_function_name 92 /* for enumerating dll libraries */ 93 #include <vdmdbg.h> 94 95 // for timer info max values which include all bits 96 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 97 98 // For DLL loading/load error detection 99 // Values of PE COFF 100 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 101 #define IMAGE_FILE_SIGNATURE_LENGTH 4 102 103 static HANDLE main_process; 104 static HANDLE main_thread; 105 static int main_thread_id; 106 107 static FILETIME process_creation_time; 108 static FILETIME process_exit_time; 109 static FILETIME process_user_time; 110 static FILETIME process_kernel_time; 111 112 #ifdef _M_IA64 113 #define __CPU__ ia64 114 #else 115 #ifdef _M_AMD64 116 #define __CPU__ amd64 117 #else 118 #define __CPU__ i486 119 #endif 120 #endif 121 122 // save DLL module handle, used by GetModuleFileName 123 124 HINSTANCE vm_lib_handle; 125 126 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 127 switch (reason) { 128 case DLL_PROCESS_ATTACH: 129 vm_lib_handle = hinst; 130 if(ForceTimeHighResolution) 131 timeBeginPeriod(1L); 132 break; 133 case DLL_PROCESS_DETACH: 134 if(ForceTimeHighResolution) 135 timeEndPeriod(1L); 136 137 break; 138 default: 139 break; 140 } 141 return true; 142 } 143 144 static inline double fileTimeAsDouble(FILETIME* time) { 145 const double high = (double) ((unsigned int) ~0); 146 const double split = 10000000.0; 147 double result = (time->dwLowDateTime / split) + 148 time->dwHighDateTime * (high/split); 149 return result; 150 } 151 152 // Implementation of os 153 154 bool os::getenv(const char* name, char* buffer, int len) { 155 int result = GetEnvironmentVariable(name, buffer, len); 156 return result > 0 && result < len; 157 } 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 #ifndef _WIN64 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 #endif 183 void os::init_system_properties_values() { 184 /* sysclasspath, java_home, dll_dir */ 185 { 186 char *home_path; 187 char *dll_path; 188 char *pslash; 189 char *bin = "\\bin"; 190 char home_dir[MAX_PATH]; 191 192 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 193 os::jvm_path(home_dir, sizeof(home_dir)); 194 // Found the full path to jvm.dll. 195 // Now cut the path to <java_home>/jre if we can. 196 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 197 pslash = strrchr(home_dir, '\\'); 198 if (pslash != NULL) { 199 *pslash = '\0'; /* get rid of \{client|server} */ 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) 202 *pslash = '\0'; /* get rid of \bin */ 203 } 204 } 205 206 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 207 if (home_path == NULL) 208 return; 209 strcpy(home_path, home_dir); 210 Arguments::set_java_home(home_path); 211 212 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 213 if (dll_path == NULL) 214 return; 215 strcpy(dll_path, home_dir); 216 strcat(dll_path, bin); 217 Arguments::set_dll_dir(dll_path); 218 219 if (!set_boot_path('\\', ';')) 220 return; 221 } 222 223 /* library_path */ 224 #define EXT_DIR "\\lib\\ext" 225 #define BIN_DIR "\\bin" 226 #define PACKAGE_DIR "\\Sun\\Java" 227 { 228 /* Win32 library search order (See the documentation for LoadLibrary): 229 * 230 * 1. The directory from which application is loaded. 231 * 2. The system wide Java Extensions directory (Java only) 232 * 3. System directory (GetSystemDirectory) 233 * 4. Windows directory (GetWindowsDirectory) 234 * 5. The PATH environment variable 235 * 6. The current directory 236 */ 237 238 char *library_path; 239 char tmp[MAX_PATH]; 240 char *path_str = ::getenv("PATH"); 241 242 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 243 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 244 245 library_path[0] = '\0'; 246 247 GetModuleFileName(NULL, tmp, sizeof(tmp)); 248 *(strrchr(tmp, '\\')) = '\0'; 249 strcat(library_path, tmp); 250 251 GetWindowsDirectory(tmp, sizeof(tmp)); 252 strcat(library_path, ";"); 253 strcat(library_path, tmp); 254 strcat(library_path, PACKAGE_DIR BIN_DIR); 255 256 GetSystemDirectory(tmp, sizeof(tmp)); 257 strcat(library_path, ";"); 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 264 if (path_str) { 265 strcat(library_path, ";"); 266 strcat(library_path, path_str); 267 } 268 269 strcat(library_path, ";."); 270 271 Arguments::set_library_path(library_path); 272 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 273 } 274 275 /* Default extensions directory */ 276 { 277 char path[MAX_PATH]; 278 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 279 GetWindowsDirectory(path, MAX_PATH); 280 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 281 path, PACKAGE_DIR, EXT_DIR); 282 Arguments::set_ext_dirs(buf); 283 } 284 #undef EXT_DIR 285 #undef BIN_DIR 286 #undef PACKAGE_DIR 287 288 /* Default endorsed standards directory. */ 289 { 290 #define ENDORSED_DIR "\\lib\\endorsed" 291 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 292 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 293 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 294 Arguments::set_endorsed_dirs(buf); 295 #undef ENDORSED_DIR 296 } 297 298 #ifndef _WIN64 299 // set our UnhandledExceptionFilter and save any previous one 300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 301 #endif 302 303 // Done 304 return; 305 } 306 307 void os::breakpoint() { 308 DebugBreak(); 309 } 310 311 // Invoked from the BREAKPOINT Macro 312 extern "C" void breakpoint() { 313 os::breakpoint(); 314 } 315 316 /* 317 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 318 * So far, this method is only used by Native Memory Tracking, which is 319 * only supported on Windows XP or later. 320 */ 321 322 int os::get_native_stack(address* stack, int frames, int toSkip) { 323 #ifdef _NMT_NOINLINE_ 324 toSkip ++; 325 #endif 326 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 327 (PVOID*)stack, NULL); 328 for (int index = captured; index < frames; index ++) { 329 stack[index] = NULL; 330 } 331 return captured; 332 } 333 334 335 // os::current_stack_base() 336 // 337 // Returns the base of the stack, which is the stack's 338 // starting address. This function must be called 339 // while running on the stack of the thread being queried. 340 341 address os::current_stack_base() { 342 MEMORY_BASIC_INFORMATION minfo; 343 address stack_bottom; 344 size_t stack_size; 345 346 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 347 stack_bottom = (address)minfo.AllocationBase; 348 stack_size = minfo.RegionSize; 349 350 // Add up the sizes of all the regions with the same 351 // AllocationBase. 352 while( 1 ) 353 { 354 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 355 if ( stack_bottom == (address)minfo.AllocationBase ) 356 stack_size += minfo.RegionSize; 357 else 358 break; 359 } 360 361 #ifdef _M_IA64 362 // IA64 has memory and register stacks 363 // 364 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 365 // at thread creation (1MB backing store growing upwards, 1MB memory stack 366 // growing downwards, 2MB summed up) 367 // 368 // ... 369 // ------- top of stack (high address) ----- 370 // | 371 // | 1MB 372 // | Backing Store (Register Stack) 373 // | 374 // | / \ 375 // | | 376 // | | 377 // | | 378 // ------------------------ stack base ----- 379 // | 1MB 380 // | Memory Stack 381 // | 382 // | | 383 // | | 384 // | | 385 // | \ / 386 // | 387 // ----- bottom of stack (low address) ----- 388 // ... 389 390 stack_size = stack_size / 2; 391 #endif 392 return stack_bottom + stack_size; 393 } 394 395 size_t os::current_stack_size() { 396 size_t sz; 397 MEMORY_BASIC_INFORMATION minfo; 398 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 399 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 400 return sz; 401 } 402 403 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 404 const struct tm* time_struct_ptr = localtime(clock); 405 if (time_struct_ptr != NULL) { 406 *res = *time_struct_ptr; 407 return res; 408 } 409 return NULL; 410 } 411 412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 413 414 // Thread start routine for all new Java threads 415 static unsigned __stdcall java_start(Thread* thread) { 416 // Try to randomize the cache line index of hot stack frames. 417 // This helps when threads of the same stack traces evict each other's 418 // cache lines. The threads can be either from the same JVM instance, or 419 // from different JVM instances. The benefit is especially true for 420 // processors with hyperthreading technology. 421 static int counter = 0; 422 int pid = os::current_process_id(); 423 _alloca(((pid ^ counter++) & 7) * 128); 424 425 OSThread* osthr = thread->osthread(); 426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 427 428 if (UseNUMA) { 429 int lgrp_id = os::numa_get_group_id(); 430 if (lgrp_id != -1) { 431 thread->set_lgrp_id(lgrp_id); 432 } 433 } 434 435 436 // Install a win32 structured exception handler around every thread created 437 // by VM, so VM can genrate error dump when an exception occurred in non- 438 // Java thread (e.g. VM thread). 439 __try { 440 thread->run(); 441 } __except(topLevelExceptionFilter( 442 (_EXCEPTION_POINTERS*)_exception_info())) { 443 // Nothing to do. 444 } 445 446 // One less thread is executing 447 // When the VMThread gets here, the main thread may have already exited 448 // which frees the CodeHeap containing the Atomic::add code 449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 451 } 452 453 return 0; 454 } 455 456 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 457 // Allocate the OSThread object 458 OSThread* osthread = new OSThread(NULL, NULL); 459 if (osthread == NULL) return NULL; 460 461 // Initialize support for Java interrupts 462 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 463 if (interrupt_event == NULL) { 464 delete osthread; 465 return NULL; 466 } 467 osthread->set_interrupt_event(interrupt_event); 468 469 // Store info on the Win32 thread into the OSThread 470 osthread->set_thread_handle(thread_handle); 471 osthread->set_thread_id(thread_id); 472 473 if (UseNUMA) { 474 int lgrp_id = os::numa_get_group_id(); 475 if (lgrp_id != -1) { 476 thread->set_lgrp_id(lgrp_id); 477 } 478 } 479 480 // Initial thread state is INITIALIZED, not SUSPENDED 481 osthread->set_state(INITIALIZED); 482 483 return osthread; 484 } 485 486 487 bool os::create_attached_thread(JavaThread* thread) { 488 #ifdef ASSERT 489 thread->verify_not_published(); 490 #endif 491 HANDLE thread_h; 492 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 493 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 494 fatal("DuplicateHandle failed\n"); 495 } 496 OSThread* osthread = create_os_thread(thread, thread_h, 497 (int)current_thread_id()); 498 if (osthread == NULL) { 499 return false; 500 } 501 502 // Initial thread state is RUNNABLE 503 osthread->set_state(RUNNABLE); 504 505 thread->set_osthread(osthread); 506 return true; 507 } 508 509 bool os::create_main_thread(JavaThread* thread) { 510 #ifdef ASSERT 511 thread->verify_not_published(); 512 #endif 513 if (_starting_thread == NULL) { 514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 515 if (_starting_thread == NULL) { 516 return false; 517 } 518 } 519 520 // The primordial thread is runnable from the start) 521 _starting_thread->set_state(RUNNABLE); 522 523 thread->set_osthread(_starting_thread); 524 return true; 525 } 526 527 // Allocate and initialize a new OSThread 528 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 529 unsigned thread_id; 530 531 // Allocate the OSThread object 532 OSThread* osthread = new OSThread(NULL, NULL); 533 if (osthread == NULL) { 534 return false; 535 } 536 537 // Initialize support for Java interrupts 538 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 539 if (interrupt_event == NULL) { 540 delete osthread; 541 return NULL; 542 } 543 osthread->set_interrupt_event(interrupt_event); 544 osthread->set_interrupted(false); 545 546 thread->set_osthread(osthread); 547 548 if (stack_size == 0) { 549 switch (thr_type) { 550 case os::java_thread: 551 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 552 if (JavaThread::stack_size_at_create() > 0) 553 stack_size = JavaThread::stack_size_at_create(); 554 break; 555 case os::compiler_thread: 556 if (CompilerThreadStackSize > 0) { 557 stack_size = (size_t)(CompilerThreadStackSize * K); 558 break; 559 } // else fall through: 560 // use VMThreadStackSize if CompilerThreadStackSize is not defined 561 case os::vm_thread: 562 case os::pgc_thread: 563 case os::cgc_thread: 564 case os::watcher_thread: 565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 566 break; 567 } 568 } 569 570 // Create the Win32 thread 571 // 572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 573 // does not specify stack size. Instead, it specifies the size of 574 // initially committed space. The stack size is determined by 575 // PE header in the executable. If the committed "stack_size" is larger 576 // than default value in the PE header, the stack is rounded up to the 577 // nearest multiple of 1MB. For example if the launcher has default 578 // stack size of 320k, specifying any size less than 320k does not 579 // affect the actual stack size at all, it only affects the initial 580 // commitment. On the other hand, specifying 'stack_size' larger than 581 // default value may cause significant increase in memory usage, because 582 // not only the stack space will be rounded up to MB, but also the 583 // entire space is committed upfront. 584 // 585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 586 // for CreateThread() that can treat 'stack_size' as stack size. However we 587 // are not supposed to call CreateThread() directly according to MSDN 588 // document because JVM uses C runtime library. The good news is that the 589 // flag appears to work with _beginthredex() as well. 590 591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 593 #endif 594 595 HANDLE thread_handle = 596 (HANDLE)_beginthreadex(NULL, 597 (unsigned)stack_size, 598 (unsigned (__stdcall *)(void*)) java_start, 599 thread, 600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 601 &thread_id); 602 if (thread_handle == NULL) { 603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 604 // without the flag. 605 thread_handle = 606 (HANDLE)_beginthreadex(NULL, 607 (unsigned)stack_size, 608 (unsigned (__stdcall *)(void*)) java_start, 609 thread, 610 CREATE_SUSPENDED, 611 &thread_id); 612 } 613 if (thread_handle == NULL) { 614 // Need to clean up stuff we've allocated so far 615 CloseHandle(osthread->interrupt_event()); 616 thread->set_osthread(NULL); 617 delete osthread; 618 return NULL; 619 } 620 621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 622 623 // Store info on the Win32 thread into the OSThread 624 osthread->set_thread_handle(thread_handle); 625 osthread->set_thread_id(thread_id); 626 627 // Initial thread state is INITIALIZED, not SUSPENDED 628 osthread->set_state(INITIALIZED); 629 630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 631 return true; 632 } 633 634 635 // Free Win32 resources related to the OSThread 636 void os::free_thread(OSThread* osthread) { 637 assert(osthread != NULL, "osthread not set"); 638 CloseHandle(osthread->thread_handle()); 639 CloseHandle(osthread->interrupt_event()); 640 delete osthread; 641 } 642 643 644 static int has_performance_count = 0; 645 static jlong first_filetime; 646 static jlong initial_performance_count; 647 static jlong performance_frequency; 648 649 650 jlong as_long(LARGE_INTEGER x) { 651 jlong result = 0; // initialization to avoid warning 652 set_high(&result, x.HighPart); 653 set_low(&result, x.LowPart); 654 return result; 655 } 656 657 658 jlong os::elapsed_counter() { 659 LARGE_INTEGER count; 660 if (has_performance_count) { 661 QueryPerformanceCounter(&count); 662 return as_long(count) - initial_performance_count; 663 } else { 664 FILETIME wt; 665 GetSystemTimeAsFileTime(&wt); 666 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 667 } 668 } 669 670 671 jlong os::elapsed_frequency() { 672 if (has_performance_count) { 673 return performance_frequency; 674 } else { 675 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 676 return 10000000; 677 } 678 } 679 680 681 julong os::available_memory() { 682 return win32::available_memory(); 683 } 684 685 julong os::win32::available_memory() { 686 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 687 // value if total memory is larger than 4GB 688 MEMORYSTATUSEX ms; 689 ms.dwLength = sizeof(ms); 690 GlobalMemoryStatusEx(&ms); 691 692 return (julong)ms.ullAvailPhys; 693 } 694 695 julong os::physical_memory() { 696 return win32::physical_memory(); 697 } 698 699 bool os::has_allocatable_memory_limit(julong* limit) { 700 MEMORYSTATUSEX ms; 701 ms.dwLength = sizeof(ms); 702 GlobalMemoryStatusEx(&ms); 703 #ifdef _LP64 704 *limit = (julong)ms.ullAvailVirtual; 705 return true; 706 #else 707 // Limit to 1400m because of the 2gb address space wall 708 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 709 return true; 710 #endif 711 } 712 713 // VC6 lacks DWORD_PTR 714 #if _MSC_VER < 1300 715 typedef UINT_PTR DWORD_PTR; 716 #endif 717 718 int os::active_processor_count() { 719 // User has overridden the number of active processors 720 if (ActiveProcessorCount > 0) { 721 if (PrintActiveCpus) { 722 tty->print_cr("active_processor_count: " 723 "active processor count set by user : %d", 724 ActiveProcessorCount); 725 } 726 return ActiveProcessorCount; 727 } 728 729 DWORD_PTR lpProcessAffinityMask = 0; 730 DWORD_PTR lpSystemAffinityMask = 0; 731 int proc_count = processor_count(); 732 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 733 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 734 // Nof active processors is number of bits in process affinity mask 735 int bitcount = 0; 736 while (lpProcessAffinityMask != 0) { 737 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 738 bitcount++; 739 } 740 return bitcount; 741 } else { 742 return proc_count; 743 } 744 } 745 746 void os::set_native_thread_name(const char *name) { 747 // Not yet implemented. 748 return; 749 } 750 751 bool os::distribute_processes(uint length, uint* distribution) { 752 // Not yet implemented. 753 return false; 754 } 755 756 bool os::bind_to_processor(uint processor_id) { 757 // Not yet implemented. 758 return false; 759 } 760 761 static void initialize_performance_counter() { 762 LARGE_INTEGER count; 763 if (QueryPerformanceFrequency(&count)) { 764 has_performance_count = 1; 765 performance_frequency = as_long(count); 766 QueryPerformanceCounter(&count); 767 initial_performance_count = as_long(count); 768 } else { 769 has_performance_count = 0; 770 FILETIME wt; 771 GetSystemTimeAsFileTime(&wt); 772 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 773 } 774 } 775 776 777 double os::elapsedTime() { 778 return (double) elapsed_counter() / (double) elapsed_frequency(); 779 } 780 781 782 // Windows format: 783 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 784 // Java format: 785 // Java standards require the number of milliseconds since 1/1/1970 786 787 // Constant offset - calculated using offset() 788 static jlong _offset = 116444736000000000; 789 // Fake time counter for reproducible results when debugging 790 static jlong fake_time = 0; 791 792 #ifdef ASSERT 793 // Just to be safe, recalculate the offset in debug mode 794 static jlong _calculated_offset = 0; 795 static int _has_calculated_offset = 0; 796 797 jlong offset() { 798 if (_has_calculated_offset) return _calculated_offset; 799 SYSTEMTIME java_origin; 800 java_origin.wYear = 1970; 801 java_origin.wMonth = 1; 802 java_origin.wDayOfWeek = 0; // ignored 803 java_origin.wDay = 1; 804 java_origin.wHour = 0; 805 java_origin.wMinute = 0; 806 java_origin.wSecond = 0; 807 java_origin.wMilliseconds = 0; 808 FILETIME jot; 809 if (!SystemTimeToFileTime(&java_origin, &jot)) { 810 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 811 } 812 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 813 _has_calculated_offset = 1; 814 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 815 return _calculated_offset; 816 } 817 #else 818 jlong offset() { 819 return _offset; 820 } 821 #endif 822 823 jlong windows_to_java_time(FILETIME wt) { 824 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 825 return (a - offset()) / 10000; 826 } 827 828 FILETIME java_to_windows_time(jlong l) { 829 jlong a = (l * 10000) + offset(); 830 FILETIME result; 831 result.dwHighDateTime = high(a); 832 result.dwLowDateTime = low(a); 833 return result; 834 } 835 836 bool os::supports_vtime() { return true; } 837 bool os::enable_vtime() { return false; } 838 bool os::vtime_enabled() { return false; } 839 840 double os::elapsedVTime() { 841 FILETIME created; 842 FILETIME exited; 843 FILETIME kernel; 844 FILETIME user; 845 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 846 // the resolution of windows_to_java_time() should be sufficient (ms) 847 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 848 } else { 849 return elapsedTime(); 850 } 851 } 852 853 jlong os::javaTimeMillis() { 854 if (UseFakeTimers) { 855 return fake_time++; 856 } else { 857 FILETIME wt; 858 GetSystemTimeAsFileTime(&wt); 859 return windows_to_java_time(wt); 860 } 861 } 862 863 jlong os::javaTimeNanos() { 864 if (!has_performance_count) { 865 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 866 } else { 867 LARGE_INTEGER current_count; 868 QueryPerformanceCounter(¤t_count); 869 double current = as_long(current_count); 870 double freq = performance_frequency; 871 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 872 return time; 873 } 874 } 875 876 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 877 if (!has_performance_count) { 878 // javaTimeMillis() doesn't have much percision, 879 // but it is not going to wrap -- so all 64 bits 880 info_ptr->max_value = ALL_64_BITS; 881 882 // this is a wall clock timer, so may skip 883 info_ptr->may_skip_backward = true; 884 info_ptr->may_skip_forward = true; 885 } else { 886 jlong freq = performance_frequency; 887 if (freq < NANOSECS_PER_SEC) { 888 // the performance counter is 64 bits and we will 889 // be multiplying it -- so no wrap in 64 bits 890 info_ptr->max_value = ALL_64_BITS; 891 } else if (freq > NANOSECS_PER_SEC) { 892 // use the max value the counter can reach to 893 // determine the max value which could be returned 894 julong max_counter = (julong)ALL_64_BITS; 895 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 896 } else { 897 // the performance counter is 64 bits and we will 898 // be using it directly -- so no wrap in 64 bits 899 info_ptr->max_value = ALL_64_BITS; 900 } 901 902 // using a counter, so no skipping 903 info_ptr->may_skip_backward = false; 904 info_ptr->may_skip_forward = false; 905 } 906 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 907 } 908 909 char* os::local_time_string(char *buf, size_t buflen) { 910 SYSTEMTIME st; 911 GetLocalTime(&st); 912 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 913 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 914 return buf; 915 } 916 917 bool os::getTimesSecs(double* process_real_time, 918 double* process_user_time, 919 double* process_system_time) { 920 HANDLE h_process = GetCurrentProcess(); 921 FILETIME create_time, exit_time, kernel_time, user_time; 922 BOOL result = GetProcessTimes(h_process, 923 &create_time, 924 &exit_time, 925 &kernel_time, 926 &user_time); 927 if (result != 0) { 928 FILETIME wt; 929 GetSystemTimeAsFileTime(&wt); 930 jlong rtc_millis = windows_to_java_time(wt); 931 jlong user_millis = windows_to_java_time(user_time); 932 jlong system_millis = windows_to_java_time(kernel_time); 933 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 934 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 935 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 936 return true; 937 } else { 938 return false; 939 } 940 } 941 942 void os::shutdown() { 943 944 // allow PerfMemory to attempt cleanup of any persistent resources 945 perfMemory_exit(); 946 947 // flush buffered output, finish log files 948 ostream_abort(); 949 950 // Check for abort hook 951 abort_hook_t abort_hook = Arguments::abort_hook(); 952 if (abort_hook != NULL) { 953 abort_hook(); 954 } 955 } 956 957 958 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 959 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 960 961 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 962 HINSTANCE dbghelp; 963 EXCEPTION_POINTERS ep; 964 MINIDUMP_EXCEPTION_INFORMATION mei; 965 MINIDUMP_EXCEPTION_INFORMATION* pmei; 966 967 HANDLE hProcess = GetCurrentProcess(); 968 DWORD processId = GetCurrentProcessId(); 969 HANDLE dumpFile; 970 MINIDUMP_TYPE dumpType; 971 static const char* cwd; 972 973 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 974 #ifndef ASSERT 975 // If running on a client version of Windows and user has not explicitly enabled dumping 976 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 977 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 978 return; 979 // If running on a server version of Windows and user has explictly disabled dumping 980 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 981 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 982 return; 983 } 984 #else 985 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 986 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 987 return; 988 } 989 #endif 990 991 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 992 993 if (dbghelp == NULL) { 994 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 995 return; 996 } 997 998 _MiniDumpWriteDump = CAST_TO_FN_PTR( 999 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 1000 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 1001 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 1002 1003 if (_MiniDumpWriteDump == NULL) { 1004 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1005 return; 1006 } 1007 1008 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1009 1010 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1011 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1012 #if API_VERSION_NUMBER >= 11 1013 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1014 MiniDumpWithUnloadedModules); 1015 #endif 1016 1017 cwd = get_current_directory(NULL, 0); 1018 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id()); 1019 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1020 1021 if (dumpFile == INVALID_HANDLE_VALUE) { 1022 VMError::report_coredump_status("Failed to create file for dumping", false); 1023 return; 1024 } 1025 if (exceptionRecord != NULL && contextRecord != NULL) { 1026 ep.ContextRecord = (PCONTEXT) contextRecord; 1027 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1028 1029 mei.ThreadId = GetCurrentThreadId(); 1030 mei.ExceptionPointers = &ep; 1031 pmei = &mei; 1032 } else { 1033 pmei = NULL; 1034 } 1035 1036 1037 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1038 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1039 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1040 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1041 DWORD error = GetLastError(); 1042 LPTSTR msgbuf = NULL; 1043 1044 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1045 FORMAT_MESSAGE_FROM_SYSTEM | 1046 FORMAT_MESSAGE_IGNORE_INSERTS, 1047 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1048 1049 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1050 LocalFree(msgbuf); 1051 } else { 1052 // Call to FormatMessage failed, just include the result from GetLastError 1053 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1054 } 1055 VMError::report_coredump_status(buffer, false); 1056 } else { 1057 VMError::report_coredump_status(buffer, true); 1058 } 1059 1060 CloseHandle(dumpFile); 1061 } 1062 1063 1064 1065 void os::abort(bool dump_core) 1066 { 1067 os::shutdown(); 1068 // no core dump on Windows 1069 ::exit(1); 1070 } 1071 1072 // Die immediately, no exit hook, no abort hook, no cleanup. 1073 void os::die() { 1074 _exit(-1); 1075 } 1076 1077 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1078 // * dirent_md.c 1.15 00/02/02 1079 // 1080 // The declarations for DIR and struct dirent are in jvm_win32.h. 1081 1082 /* Caller must have already run dirname through JVM_NativePath, which removes 1083 duplicate slashes and converts all instances of '/' into '\\'. */ 1084 1085 DIR * 1086 os::opendir(const char *dirname) 1087 { 1088 assert(dirname != NULL, "just checking"); // hotspot change 1089 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1090 DWORD fattr; // hotspot change 1091 char alt_dirname[4] = { 0, 0, 0, 0 }; 1092 1093 if (dirp == 0) { 1094 errno = ENOMEM; 1095 return 0; 1096 } 1097 1098 /* 1099 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1100 * as a directory in FindFirstFile(). We detect this case here and 1101 * prepend the current drive name. 1102 */ 1103 if (dirname[1] == '\0' && dirname[0] == '\\') { 1104 alt_dirname[0] = _getdrive() + 'A' - 1; 1105 alt_dirname[1] = ':'; 1106 alt_dirname[2] = '\\'; 1107 alt_dirname[3] = '\0'; 1108 dirname = alt_dirname; 1109 } 1110 1111 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1112 if (dirp->path == 0) { 1113 free(dirp, mtInternal); 1114 errno = ENOMEM; 1115 return 0; 1116 } 1117 strcpy(dirp->path, dirname); 1118 1119 fattr = GetFileAttributes(dirp->path); 1120 if (fattr == 0xffffffff) { 1121 free(dirp->path, mtInternal); 1122 free(dirp, mtInternal); 1123 errno = ENOENT; 1124 return 0; 1125 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1126 free(dirp->path, mtInternal); 1127 free(dirp, mtInternal); 1128 errno = ENOTDIR; 1129 return 0; 1130 } 1131 1132 /* Append "*.*", or possibly "\\*.*", to path */ 1133 if (dirp->path[1] == ':' 1134 && (dirp->path[2] == '\0' 1135 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1136 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1137 strcat(dirp->path, "*.*"); 1138 } else { 1139 strcat(dirp->path, "\\*.*"); 1140 } 1141 1142 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1143 if (dirp->handle == INVALID_HANDLE_VALUE) { 1144 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1145 free(dirp->path, mtInternal); 1146 free(dirp, mtInternal); 1147 errno = EACCES; 1148 return 0; 1149 } 1150 } 1151 return dirp; 1152 } 1153 1154 /* parameter dbuf unused on Windows */ 1155 1156 struct dirent * 1157 os::readdir(DIR *dirp, dirent *dbuf) 1158 { 1159 assert(dirp != NULL, "just checking"); // hotspot change 1160 if (dirp->handle == INVALID_HANDLE_VALUE) { 1161 return 0; 1162 } 1163 1164 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1165 1166 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1167 if (GetLastError() == ERROR_INVALID_HANDLE) { 1168 errno = EBADF; 1169 return 0; 1170 } 1171 FindClose(dirp->handle); 1172 dirp->handle = INVALID_HANDLE_VALUE; 1173 } 1174 1175 return &dirp->dirent; 1176 } 1177 1178 int 1179 os::closedir(DIR *dirp) 1180 { 1181 assert(dirp != NULL, "just checking"); // hotspot change 1182 if (dirp->handle != INVALID_HANDLE_VALUE) { 1183 if (!FindClose(dirp->handle)) { 1184 errno = EBADF; 1185 return -1; 1186 } 1187 dirp->handle = INVALID_HANDLE_VALUE; 1188 } 1189 free(dirp->path, mtInternal); 1190 free(dirp, mtInternal); 1191 return 0; 1192 } 1193 1194 // This must be hard coded because it's the system's temporary 1195 // directory not the java application's temp directory, ala java.io.tmpdir. 1196 const char* os::get_temp_directory() { 1197 static char path_buf[MAX_PATH]; 1198 if (GetTempPath(MAX_PATH, path_buf)>0) 1199 return path_buf; 1200 else{ 1201 path_buf[0]='\0'; 1202 return path_buf; 1203 } 1204 } 1205 1206 static bool file_exists(const char* filename) { 1207 if (filename == NULL || strlen(filename) == 0) { 1208 return false; 1209 } 1210 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1211 } 1212 1213 bool os::dll_build_name(char *buffer, size_t buflen, 1214 const char* pname, const char* fname) { 1215 bool retval = false; 1216 const size_t pnamelen = pname ? strlen(pname) : 0; 1217 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1218 1219 // Return error on buffer overflow. 1220 if (pnamelen + strlen(fname) + 10 > buflen) { 1221 return retval; 1222 } 1223 1224 if (pnamelen == 0) { 1225 jio_snprintf(buffer, buflen, "%s.dll", fname); 1226 retval = true; 1227 } else if (c == ':' || c == '\\') { 1228 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1229 retval = true; 1230 } else if (strchr(pname, *os::path_separator()) != NULL) { 1231 int n; 1232 char** pelements = split_path(pname, &n); 1233 if (pelements == NULL) { 1234 return false; 1235 } 1236 for (int i = 0 ; i < n ; i++) { 1237 char* path = pelements[i]; 1238 // Really shouldn't be NULL, but check can't hurt 1239 size_t plen = (path == NULL) ? 0 : strlen(path); 1240 if (plen == 0) { 1241 continue; // skip the empty path values 1242 } 1243 const char lastchar = path[plen - 1]; 1244 if (lastchar == ':' || lastchar == '\\') { 1245 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1246 } else { 1247 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1248 } 1249 if (file_exists(buffer)) { 1250 retval = true; 1251 break; 1252 } 1253 } 1254 // release the storage 1255 for (int i = 0 ; i < n ; i++) { 1256 if (pelements[i] != NULL) { 1257 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1258 } 1259 } 1260 if (pelements != NULL) { 1261 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1262 } 1263 } else { 1264 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1265 retval = true; 1266 } 1267 return retval; 1268 } 1269 1270 // Needs to be in os specific directory because windows requires another 1271 // header file <direct.h> 1272 const char* os::get_current_directory(char *buf, size_t buflen) { 1273 int n = static_cast<int>(buflen); 1274 if (buflen > INT_MAX) n = INT_MAX; 1275 return _getcwd(buf, n); 1276 } 1277 1278 //----------------------------------------------------------- 1279 // Helper functions for fatal error handler 1280 #ifdef _WIN64 1281 // Helper routine which returns true if address in 1282 // within the NTDLL address space. 1283 // 1284 static bool _addr_in_ntdll( address addr ) 1285 { 1286 HMODULE hmod; 1287 MODULEINFO minfo; 1288 1289 hmod = GetModuleHandle("NTDLL.DLL"); 1290 if ( hmod == NULL ) return false; 1291 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1292 &minfo, sizeof(MODULEINFO)) ) 1293 return false; 1294 1295 if ( (addr >= minfo.lpBaseOfDll) && 1296 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1297 return true; 1298 else 1299 return false; 1300 } 1301 #endif 1302 1303 1304 // Enumerate all modules for a given process ID 1305 // 1306 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1307 // different API for doing this. We use PSAPI.DLL on NT based 1308 // Windows and ToolHelp on 95/98/Me. 1309 1310 // Callback function that is called by enumerate_modules() on 1311 // every DLL module. 1312 // Input parameters: 1313 // int pid, 1314 // char* module_file_name, 1315 // address module_base_addr, 1316 // unsigned module_size, 1317 // void* param 1318 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1319 1320 // enumerate_modules for Windows NT, using PSAPI 1321 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1322 { 1323 HANDLE hProcess ; 1324 1325 # define MAX_NUM_MODULES 128 1326 HMODULE modules[MAX_NUM_MODULES]; 1327 static char filename[ MAX_PATH ]; 1328 int result = 0; 1329 1330 if (!os::PSApiDll::PSApiAvailable()) { 1331 return 0; 1332 } 1333 1334 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1335 FALSE, pid ) ; 1336 if (hProcess == NULL) return 0; 1337 1338 DWORD size_needed; 1339 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1340 sizeof(modules), &size_needed)) { 1341 CloseHandle( hProcess ); 1342 return 0; 1343 } 1344 1345 // number of modules that are currently loaded 1346 int num_modules = size_needed / sizeof(HMODULE); 1347 1348 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1349 // Get Full pathname: 1350 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1351 filename, sizeof(filename))) { 1352 filename[0] = '\0'; 1353 } 1354 1355 MODULEINFO modinfo; 1356 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1357 &modinfo, sizeof(modinfo))) { 1358 modinfo.lpBaseOfDll = NULL; 1359 modinfo.SizeOfImage = 0; 1360 } 1361 1362 // Invoke callback function 1363 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1364 modinfo.SizeOfImage, param); 1365 if (result) break; 1366 } 1367 1368 CloseHandle( hProcess ) ; 1369 return result; 1370 } 1371 1372 1373 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1374 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1375 { 1376 HANDLE hSnapShot ; 1377 static MODULEENTRY32 modentry ; 1378 int result = 0; 1379 1380 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1381 return 0; 1382 } 1383 1384 // Get a handle to a Toolhelp snapshot of the system 1385 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; 1386 if( hSnapShot == INVALID_HANDLE_VALUE ) { 1387 return FALSE ; 1388 } 1389 1390 // iterate through all modules 1391 modentry.dwSize = sizeof(MODULEENTRY32) ; 1392 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1393 1394 while( not_done ) { 1395 // invoke the callback 1396 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1397 modentry.modBaseSize, param); 1398 if (result) break; 1399 1400 modentry.dwSize = sizeof(MODULEENTRY32) ; 1401 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1402 } 1403 1404 CloseHandle(hSnapShot); 1405 return result; 1406 } 1407 1408 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1409 { 1410 // Get current process ID if caller doesn't provide it. 1411 if (!pid) pid = os::current_process_id(); 1412 1413 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1414 else return _enumerate_modules_windows(pid, func, param); 1415 } 1416 1417 struct _modinfo { 1418 address addr; 1419 char* full_path; // point to a char buffer 1420 int buflen; // size of the buffer 1421 address base_addr; 1422 }; 1423 1424 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1425 unsigned size, void * param) { 1426 struct _modinfo *pmod = (struct _modinfo *)param; 1427 if (!pmod) return -1; 1428 1429 if (base_addr <= pmod->addr && 1430 base_addr+size > pmod->addr) { 1431 // if a buffer is provided, copy path name to the buffer 1432 if (pmod->full_path) { 1433 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1434 } 1435 pmod->base_addr = base_addr; 1436 return 1; 1437 } 1438 return 0; 1439 } 1440 1441 bool os::dll_address_to_library_name(address addr, char* buf, 1442 int buflen, int* offset) { 1443 // buf is not optional, but offset is optional 1444 assert(buf != NULL, "sanity check"); 1445 1446 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1447 // return the full path to the DLL file, sometimes it returns path 1448 // to the corresponding PDB file (debug info); sometimes it only 1449 // returns partial path, which makes life painful. 1450 1451 struct _modinfo mi; 1452 mi.addr = addr; 1453 mi.full_path = buf; 1454 mi.buflen = buflen; 1455 int pid = os::current_process_id(); 1456 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1457 // buf already contains path name 1458 if (offset) *offset = addr - mi.base_addr; 1459 return true; 1460 } 1461 1462 buf[0] = '\0'; 1463 if (offset) *offset = -1; 1464 return false; 1465 } 1466 1467 bool os::dll_address_to_function_name(address addr, char *buf, 1468 int buflen, int *offset) { 1469 // buf is not optional, but offset is optional 1470 assert(buf != NULL, "sanity check"); 1471 1472 if (Decoder::decode(addr, buf, buflen, offset)) { 1473 return true; 1474 } 1475 if (offset != NULL) *offset = -1; 1476 buf[0] = '\0'; 1477 return false; 1478 } 1479 1480 // save the start and end address of jvm.dll into param[0] and param[1] 1481 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1482 unsigned size, void * param) { 1483 if (!param) return -1; 1484 1485 if (base_addr <= (address)_locate_jvm_dll && 1486 base_addr+size > (address)_locate_jvm_dll) { 1487 ((address*)param)[0] = base_addr; 1488 ((address*)param)[1] = base_addr + size; 1489 return 1; 1490 } 1491 return 0; 1492 } 1493 1494 address vm_lib_location[2]; // start and end address of jvm.dll 1495 1496 // check if addr is inside jvm.dll 1497 bool os::address_is_in_vm(address addr) { 1498 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1499 int pid = os::current_process_id(); 1500 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1501 assert(false, "Can't find jvm module."); 1502 return false; 1503 } 1504 } 1505 1506 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1507 } 1508 1509 // print module info; param is outputStream* 1510 static int _print_module(int pid, char* fname, address base, 1511 unsigned size, void* param) { 1512 if (!param) return -1; 1513 1514 outputStream* st = (outputStream*)param; 1515 1516 address end_addr = base + size; 1517 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1518 return 0; 1519 } 1520 1521 // Loads .dll/.so and 1522 // in case of error it checks if .dll/.so was built for the 1523 // same architecture as Hotspot is running on 1524 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1525 { 1526 void * result = LoadLibrary(name); 1527 if (result != NULL) 1528 { 1529 return result; 1530 } 1531 1532 DWORD errcode = GetLastError(); 1533 if (errcode == ERROR_MOD_NOT_FOUND) { 1534 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1535 ebuf[ebuflen-1]='\0'; 1536 return NULL; 1537 } 1538 1539 // Parsing dll below 1540 // If we can read dll-info and find that dll was built 1541 // for an architecture other than Hotspot is running in 1542 // - then print to buffer "DLL was built for a different architecture" 1543 // else call os::lasterror to obtain system error message 1544 1545 // Read system error message into ebuf 1546 // It may or may not be overwritten below (in the for loop and just above) 1547 lasterror(ebuf, (size_t) ebuflen); 1548 ebuf[ebuflen-1]='\0'; 1549 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1550 if (file_descriptor<0) 1551 { 1552 return NULL; 1553 } 1554 1555 uint32_t signature_offset; 1556 uint16_t lib_arch=0; 1557 bool failed_to_get_lib_arch= 1558 ( 1559 //Go to position 3c in the dll 1560 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1561 || 1562 // Read loacation of signature 1563 (sizeof(signature_offset)!= 1564 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1565 || 1566 //Go to COFF File Header in dll 1567 //that is located after"signature" (4 bytes long) 1568 (os::seek_to_file_offset(file_descriptor, 1569 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1570 || 1571 //Read field that contains code of architecture 1572 // that dll was build for 1573 (sizeof(lib_arch)!= 1574 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1575 ); 1576 1577 ::close(file_descriptor); 1578 if (failed_to_get_lib_arch) 1579 { 1580 // file i/o error - report os::lasterror(...) msg 1581 return NULL; 1582 } 1583 1584 typedef struct 1585 { 1586 uint16_t arch_code; 1587 char* arch_name; 1588 } arch_t; 1589 1590 static const arch_t arch_array[]={ 1591 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1592 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1593 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1594 }; 1595 #if (defined _M_IA64) 1596 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1597 #elif (defined _M_AMD64) 1598 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1599 #elif (defined _M_IX86) 1600 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1601 #else 1602 #error Method os::dll_load requires that one of following \ 1603 is defined :_M_IA64,_M_AMD64 or _M_IX86 1604 #endif 1605 1606 1607 // Obtain a string for printf operation 1608 // lib_arch_str shall contain string what platform this .dll was built for 1609 // running_arch_str shall string contain what platform Hotspot was built for 1610 char *running_arch_str=NULL,*lib_arch_str=NULL; 1611 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1612 { 1613 if (lib_arch==arch_array[i].arch_code) 1614 lib_arch_str=arch_array[i].arch_name; 1615 if (running_arch==arch_array[i].arch_code) 1616 running_arch_str=arch_array[i].arch_name; 1617 } 1618 1619 assert(running_arch_str, 1620 "Didn't find runing architecture code in arch_array"); 1621 1622 // If the architure is right 1623 // but some other error took place - report os::lasterror(...) msg 1624 if (lib_arch == running_arch) 1625 { 1626 return NULL; 1627 } 1628 1629 if (lib_arch_str!=NULL) 1630 { 1631 ::_snprintf(ebuf, ebuflen-1, 1632 "Can't load %s-bit .dll on a %s-bit platform", 1633 lib_arch_str,running_arch_str); 1634 } 1635 else 1636 { 1637 // don't know what architecture this dll was build for 1638 ::_snprintf(ebuf, ebuflen-1, 1639 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1640 lib_arch,running_arch_str); 1641 } 1642 1643 return NULL; 1644 } 1645 1646 1647 void os::print_dll_info(outputStream *st) { 1648 int pid = os::current_process_id(); 1649 st->print_cr("Dynamic libraries:"); 1650 enumerate_modules(pid, _print_module, (void *)st); 1651 } 1652 1653 void os::print_os_info_brief(outputStream* st) { 1654 os::print_os_info(st); 1655 } 1656 1657 void os::print_os_info(outputStream* st) { 1658 st->print("OS:"); 1659 1660 os::win32::print_windows_version(st); 1661 } 1662 1663 void os::win32::print_windows_version(outputStream* st) { 1664 OSVERSIONINFOEX osvi; 1665 VS_FIXEDFILEINFO *file_info; 1666 TCHAR kernel32_path[MAX_PATH]; 1667 UINT len, ret; 1668 1669 // Use the GetVersionEx information to see if we're on a server or 1670 // workstation edition of Windows. Starting with Windows 8.1 we can't 1671 // trust the OS version information returned by this API. 1672 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1673 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1674 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1675 st->print_cr("Call to GetVersionEx failed"); 1676 return; 1677 } 1678 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1679 1680 // Get the full path to \Windows\System32\kernel32.dll and use that for 1681 // determining what version of Windows we're running on. 1682 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1683 ret = GetSystemDirectory(kernel32_path, len); 1684 if (ret == 0 || ret > len) { 1685 st->print_cr("Call to GetSystemDirectory failed"); 1686 return; 1687 } 1688 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1689 1690 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1691 if (version_size == 0) { 1692 st->print_cr("Call to GetFileVersionInfoSize failed"); 1693 return; 1694 } 1695 1696 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1697 if (version_info == NULL) { 1698 st->print_cr("Failed to allocate version_info"); 1699 return; 1700 } 1701 1702 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1703 os::free(version_info); 1704 st->print_cr("Call to GetFileVersionInfo failed"); 1705 return; 1706 } 1707 1708 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1709 os::free(version_info); 1710 st->print_cr("Call to VerQueryValue failed"); 1711 return; 1712 } 1713 1714 int major_version = HIWORD(file_info->dwProductVersionMS); 1715 int minor_version = LOWORD(file_info->dwProductVersionMS); 1716 int build_number = HIWORD(file_info->dwProductVersionLS); 1717 int build_minor = LOWORD(file_info->dwProductVersionLS); 1718 int os_vers = major_version * 1000 + minor_version; 1719 os::free(version_info); 1720 1721 st->print(" Windows "); 1722 switch (os_vers) { 1723 1724 case 6000: 1725 if (is_workstation) { 1726 st->print("Vista"); 1727 } else { 1728 st->print("Server 2008"); 1729 } 1730 break; 1731 1732 case 6001: 1733 if (is_workstation) { 1734 st->print("7"); 1735 } else { 1736 st->print("Server 2008 R2"); 1737 } 1738 break; 1739 1740 case 6002: 1741 if (is_workstation) { 1742 st->print("8"); 1743 } else { 1744 st->print("Server 2012"); 1745 } 1746 break; 1747 1748 case 6003: 1749 if (is_workstation) { 1750 st->print("8.1"); 1751 } else { 1752 st->print("Server 2012 R2"); 1753 } 1754 break; 1755 1756 case 6004: 1757 if (is_workstation) { 1758 st->print("10"); 1759 } else { 1760 // distinguish Windows Server 2016 and 2019 by build number 1761 // Windows server 2019 GA 10/2018 build number is 17763 1762 if (build_number > 17762) { 1763 st->print("Server 2019"); 1764 } else { 1765 st->print("Server 2016"); 1766 } 1767 } 1768 break; 1769 1770 default: 1771 // Unrecognized windows, print out its major and minor versions 1772 st->print("%d.%d", major_version, minor_version); 1773 break; 1774 } 1775 1776 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1777 // find out whether we are running on 64 bit processor or not 1778 SYSTEM_INFO si; 1779 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1780 os::Kernel32Dll::GetNativeSystemInfo(&si); 1781 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1782 st->print(" , 64 bit"); 1783 } 1784 1785 st->print(" Build %d", build_number); 1786 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1787 st->cr(); 1788 } 1789 1790 void os::pd_print_cpu_info(outputStream* st) { 1791 // Nothing to do for now. 1792 } 1793 1794 void os::print_memory_info(outputStream* st) { 1795 st->print("Memory:"); 1796 st->print(" %dk page", os::vm_page_size()>>10); 1797 1798 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1799 // value if total memory is larger than 4GB 1800 MEMORYSTATUSEX ms; 1801 ms.dwLength = sizeof(ms); 1802 GlobalMemoryStatusEx(&ms); 1803 1804 st->print(", physical %uk", os::physical_memory() >> 10); 1805 st->print("(%uk free)", os::available_memory() >> 10); 1806 1807 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1808 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1809 st->cr(); 1810 } 1811 1812 void os::print_siginfo(outputStream *st, void *siginfo) { 1813 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1814 st->print("siginfo:"); 1815 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1816 1817 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1818 er->NumberParameters >= 2) { 1819 switch (er->ExceptionInformation[0]) { 1820 case 0: st->print(", reading address"); break; 1821 case 1: st->print(", writing address"); break; 1822 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1823 er->ExceptionInformation[0]); 1824 } 1825 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1826 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1827 er->NumberParameters >= 2 && UseSharedSpaces) { 1828 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1829 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1830 st->print("\n\nError accessing class data sharing archive." \ 1831 " Mapped file inaccessible during execution, " \ 1832 " possible disk/network problem."); 1833 } 1834 } else { 1835 int num = er->NumberParameters; 1836 if (num > 0) { 1837 st->print(", ExceptionInformation="); 1838 for (int i = 0; i < num; i++) { 1839 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1840 } 1841 } 1842 } 1843 st->cr(); 1844 } 1845 1846 1847 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1848 #if _MSC_VER >= 1900 1849 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1850 int result = ::vsnprintf(buf, len, fmt, args); 1851 // If an encoding error occurred (result < 0) then it's not clear 1852 // whether the buffer is NUL terminated, so ensure it is. 1853 if ((result < 0) && (len > 0)) { 1854 buf[len - 1] = '\0'; 1855 } 1856 return result; 1857 #else 1858 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1859 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1860 // versions. However, when len == 0, avoid _vsnprintf too, and just 1861 // go straight to _vscprintf. The output is going to be truncated in 1862 // that case, except in the unusual case of empty output. More 1863 // importantly, the documentation for various versions of Visual Studio 1864 // are inconsistent about the behavior of _vsnprintf when len == 0, 1865 // including it possibly being an error. 1866 int result = -1; 1867 if (len > 0) { 1868 result = _vsnprintf(buf, len, fmt, args); 1869 // If output (including NUL terminator) is truncated, the buffer 1870 // won't be NUL terminated. Add the trailing NUL specified by C99. 1871 if ((result < 0) || (result >= (int) len)) { 1872 buf[len - 1] = '\0'; 1873 } 1874 } 1875 if (result < 0) { 1876 result = _vscprintf(fmt, args); 1877 } 1878 return result; 1879 #endif // _MSC_VER dispatch 1880 } 1881 1882 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1883 // do nothing 1884 } 1885 1886 static char saved_jvm_path[MAX_PATH] = {0}; 1887 1888 // Find the full path to the current module, jvm.dll 1889 void os::jvm_path(char *buf, jint buflen) { 1890 // Error checking. 1891 if (buflen < MAX_PATH) { 1892 assert(false, "must use a large-enough buffer"); 1893 buf[0] = '\0'; 1894 return; 1895 } 1896 // Lazy resolve the path to current module. 1897 if (saved_jvm_path[0] != 0) { 1898 strcpy(buf, saved_jvm_path); 1899 return; 1900 } 1901 1902 buf[0] = '\0'; 1903 if (Arguments::created_by_gamma_launcher()) { 1904 // Support for the gamma launcher. Check for an 1905 // JAVA_HOME environment variable 1906 // and fix up the path so it looks like 1907 // libjvm.so is installed there (append a fake suffix 1908 // hotspot/libjvm.so). 1909 char* java_home_var = ::getenv("JAVA_HOME"); 1910 if (java_home_var != NULL && java_home_var[0] != 0 && 1911 strlen(java_home_var) < (size_t)buflen) { 1912 1913 strncpy(buf, java_home_var, buflen); 1914 1915 // determine if this is a legacy image or modules image 1916 // modules image doesn't have "jre" subdirectory 1917 size_t len = strlen(buf); 1918 char* jrebin_p = buf + len; 1919 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1920 if (0 != _access(buf, 0)) { 1921 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1922 } 1923 len = strlen(buf); 1924 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1925 } 1926 } 1927 1928 if(buf[0] == '\0') { 1929 GetModuleFileName(vm_lib_handle, buf, buflen); 1930 } 1931 strncpy(saved_jvm_path, buf, MAX_PATH); 1932 } 1933 1934 1935 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1936 #ifndef _WIN64 1937 st->print("_"); 1938 #endif 1939 } 1940 1941 1942 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1943 #ifndef _WIN64 1944 st->print("@%d", args_size * sizeof(int)); 1945 #endif 1946 } 1947 1948 // This method is a copy of JDK's sysGetLastErrorString 1949 // from src/windows/hpi/src/system_md.c 1950 1951 size_t os::lasterror(char* buf, size_t len) { 1952 DWORD errval; 1953 1954 if ((errval = GetLastError()) != 0) { 1955 // DOS error 1956 size_t n = (size_t)FormatMessage( 1957 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1958 NULL, 1959 errval, 1960 0, 1961 buf, 1962 (DWORD)len, 1963 NULL); 1964 if (n > 3) { 1965 // Drop final '.', CR, LF 1966 if (buf[n - 1] == '\n') n--; 1967 if (buf[n - 1] == '\r') n--; 1968 if (buf[n - 1] == '.') n--; 1969 buf[n] = '\0'; 1970 } 1971 return n; 1972 } 1973 1974 if (errno != 0) { 1975 // C runtime error that has no corresponding DOS error code 1976 const char* s = strerror(errno); 1977 size_t n = strlen(s); 1978 if (n >= len) n = len - 1; 1979 strncpy(buf, s, n); 1980 buf[n] = '\0'; 1981 return n; 1982 } 1983 1984 return 0; 1985 } 1986 1987 int os::get_last_error() { 1988 DWORD error = GetLastError(); 1989 if (error == 0) 1990 error = errno; 1991 return (int)error; 1992 } 1993 1994 // sun.misc.Signal 1995 // NOTE that this is a workaround for an apparent kernel bug where if 1996 // a signal handler for SIGBREAK is installed then that signal handler 1997 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1998 // See bug 4416763. 1999 static void (*sigbreakHandler)(int) = NULL; 2000 2001 static void UserHandler(int sig, void *siginfo, void *context) { 2002 os::signal_notify(sig); 2003 // We need to reinstate the signal handler each time... 2004 os::signal(sig, (void*)UserHandler); 2005 } 2006 2007 void* os::user_handler() { 2008 return (void*) UserHandler; 2009 } 2010 2011 void* os::signal(int signal_number, void* handler) { 2012 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 2013 void (*oldHandler)(int) = sigbreakHandler; 2014 sigbreakHandler = (void (*)(int)) handler; 2015 return (void*) oldHandler; 2016 } else { 2017 return (void*)::signal(signal_number, (void (*)(int))handler); 2018 } 2019 } 2020 2021 void os::signal_raise(int signal_number) { 2022 raise(signal_number); 2023 } 2024 2025 // The Win32 C runtime library maps all console control events other than ^C 2026 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 2027 // logoff, and shutdown events. We therefore install our own console handler 2028 // that raises SIGTERM for the latter cases. 2029 // 2030 static BOOL WINAPI consoleHandler(DWORD event) { 2031 switch(event) { 2032 case CTRL_C_EVENT: 2033 if (is_error_reported()) { 2034 // Ctrl-C is pressed during error reporting, likely because the error 2035 // handler fails to abort. Let VM die immediately. 2036 os::die(); 2037 } 2038 2039 os::signal_raise(SIGINT); 2040 return TRUE; 2041 break; 2042 case CTRL_BREAK_EVENT: 2043 if (sigbreakHandler != NULL) { 2044 (*sigbreakHandler)(SIGBREAK); 2045 } 2046 return TRUE; 2047 break; 2048 case CTRL_LOGOFF_EVENT: { 2049 // Don't terminate JVM if it is running in a non-interactive session, 2050 // such as a service process. 2051 USEROBJECTFLAGS flags; 2052 HANDLE handle = GetProcessWindowStation(); 2053 if (handle != NULL && 2054 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 2055 sizeof( USEROBJECTFLAGS), NULL)) { 2056 // If it is a non-interactive session, let next handler to deal 2057 // with it. 2058 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 2059 return FALSE; 2060 } 2061 } 2062 } 2063 case CTRL_CLOSE_EVENT: 2064 case CTRL_SHUTDOWN_EVENT: 2065 os::signal_raise(SIGTERM); 2066 return TRUE; 2067 break; 2068 default: 2069 break; 2070 } 2071 return FALSE; 2072 } 2073 2074 /* 2075 * The following code is moved from os.cpp for making this 2076 * code platform specific, which it is by its very nature. 2077 */ 2078 2079 // Return maximum OS signal used + 1 for internal use only 2080 // Used as exit signal for signal_thread 2081 int os::sigexitnum_pd(){ 2082 return NSIG; 2083 } 2084 2085 // a counter for each possible signal value, including signal_thread exit signal 2086 static volatile jint pending_signals[NSIG+1] = { 0 }; 2087 static HANDLE sig_sem = NULL; 2088 2089 void os::signal_init_pd() { 2090 // Initialize signal structures 2091 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2092 2093 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2094 2095 // Programs embedding the VM do not want it to attempt to receive 2096 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2097 // shutdown hooks mechanism introduced in 1.3. For example, when 2098 // the VM is run as part of a Windows NT service (i.e., a servlet 2099 // engine in a web server), the correct behavior is for any console 2100 // control handler to return FALSE, not TRUE, because the OS's 2101 // "final" handler for such events allows the process to continue if 2102 // it is a service (while terminating it if it is not a service). 2103 // To make this behavior uniform and the mechanism simpler, we 2104 // completely disable the VM's usage of these console events if -Xrs 2105 // (=ReduceSignalUsage) is specified. This means, for example, that 2106 // the CTRL-BREAK thread dump mechanism is also disabled in this 2107 // case. See bugs 4323062, 4345157, and related bugs. 2108 2109 if (!ReduceSignalUsage) { 2110 // Add a CTRL-C handler 2111 SetConsoleCtrlHandler(consoleHandler, TRUE); 2112 } 2113 } 2114 2115 void os::signal_notify(int signal_number) { 2116 BOOL ret; 2117 if (sig_sem != NULL) { 2118 Atomic::inc(&pending_signals[signal_number]); 2119 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2120 assert(ret != 0, "ReleaseSemaphore() failed"); 2121 } 2122 } 2123 2124 static int check_pending_signals(bool wait_for_signal) { 2125 DWORD ret; 2126 while (true) { 2127 for (int i = 0; i < NSIG + 1; i++) { 2128 jint n = pending_signals[i]; 2129 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2130 return i; 2131 } 2132 } 2133 if (!wait_for_signal) { 2134 return -1; 2135 } 2136 2137 JavaThread *thread = JavaThread::current(); 2138 2139 ThreadBlockInVM tbivm(thread); 2140 2141 bool threadIsSuspended; 2142 do { 2143 thread->set_suspend_equivalent(); 2144 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2145 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2146 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2147 2148 // were we externally suspended while we were waiting? 2149 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2150 if (threadIsSuspended) { 2151 // 2152 // The semaphore has been incremented, but while we were waiting 2153 // another thread suspended us. We don't want to continue running 2154 // while suspended because that would surprise the thread that 2155 // suspended us. 2156 // 2157 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2158 assert(ret != 0, "ReleaseSemaphore() failed"); 2159 2160 thread->java_suspend_self(); 2161 } 2162 } while (threadIsSuspended); 2163 } 2164 } 2165 2166 int os::signal_lookup() { 2167 return check_pending_signals(false); 2168 } 2169 2170 int os::signal_wait() { 2171 return check_pending_signals(true); 2172 } 2173 2174 // Implicit OS exception handling 2175 2176 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2177 JavaThread* thread = JavaThread::current(); 2178 // Save pc in thread 2179 #ifdef _M_IA64 2180 // Do not blow up if no thread info available. 2181 if (thread) { 2182 // Saving PRECISE pc (with slot information) in thread. 2183 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2184 // Convert precise PC into "Unix" format 2185 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2186 thread->set_saved_exception_pc((address)precise_pc); 2187 } 2188 // Set pc to handler 2189 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2190 // Clear out psr.ri (= Restart Instruction) in order to continue 2191 // at the beginning of the target bundle. 2192 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2193 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2194 #else 2195 #ifdef _M_AMD64 2196 // Do not blow up if no thread info available. 2197 if (thread) { 2198 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2199 } 2200 // Set pc to handler 2201 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2202 #else 2203 // Do not blow up if no thread info available. 2204 if (thread) { 2205 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2206 } 2207 // Set pc to handler 2208 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2209 #endif 2210 #endif 2211 2212 // Continue the execution 2213 return EXCEPTION_CONTINUE_EXECUTION; 2214 } 2215 2216 2217 // Used for PostMortemDump 2218 extern "C" void safepoints(); 2219 extern "C" void find(int x); 2220 extern "C" void events(); 2221 2222 // According to Windows API documentation, an illegal instruction sequence should generate 2223 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2224 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2225 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2226 2227 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2228 2229 // From "Execution Protection in the Windows Operating System" draft 0.35 2230 // Once a system header becomes available, the "real" define should be 2231 // included or copied here. 2232 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2233 2234 // Handle NAT Bit consumption on IA64. 2235 #ifdef _M_IA64 2236 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2237 #endif 2238 2239 // Windows Vista/2008 heap corruption check 2240 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2241 2242 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2243 // C++ compiler contain this error code. Because this is a compiler-generated 2244 // error, the code is not listed in the Win32 API header files. 2245 // The code is actually a cryptic mnemonic device, with the initial "E" 2246 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2247 // ASCII values of "msc". 2248 2249 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2250 2251 #define def_excpt(val) { #val, (val) } 2252 2253 static const struct { char* name; uint number; } exceptlabels[] = { 2254 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2255 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2256 def_excpt(EXCEPTION_BREAKPOINT), 2257 def_excpt(EXCEPTION_SINGLE_STEP), 2258 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2259 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2260 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2261 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2262 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2263 def_excpt(EXCEPTION_FLT_OVERFLOW), 2264 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2265 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2266 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2267 def_excpt(EXCEPTION_INT_OVERFLOW), 2268 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2269 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2270 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2271 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2272 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2273 def_excpt(EXCEPTION_STACK_OVERFLOW), 2274 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2275 def_excpt(EXCEPTION_GUARD_PAGE), 2276 def_excpt(EXCEPTION_INVALID_HANDLE), 2277 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2278 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2279 #ifdef _M_IA64 2280 , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION) 2281 #endif 2282 }; 2283 2284 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2285 uint code = static_cast<uint>(exception_code); 2286 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2287 if (exceptlabels[i].number == code) { 2288 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2289 return buf; 2290 } 2291 } 2292 2293 return NULL; 2294 } 2295 2296 //----------------------------------------------------------------------------- 2297 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2298 // handle exception caused by idiv; should only happen for -MinInt/-1 2299 // (division by zero is handled explicitly) 2300 #ifdef _M_IA64 2301 assert(0, "Fix Handle_IDiv_Exception"); 2302 #else 2303 #ifdef _M_AMD64 2304 PCONTEXT ctx = exceptionInfo->ContextRecord; 2305 address pc = (address)ctx->Rip; 2306 assert(pc[0] == 0xF7, "not an idiv opcode"); 2307 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2308 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2309 // set correct result values and continue after idiv instruction 2310 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2311 ctx->Rax = (DWORD64)min_jint; // result 2312 ctx->Rdx = (DWORD64)0; // remainder 2313 // Continue the execution 2314 #else 2315 PCONTEXT ctx = exceptionInfo->ContextRecord; 2316 address pc = (address)ctx->Eip; 2317 assert(pc[0] == 0xF7, "not an idiv opcode"); 2318 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2319 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2320 // set correct result values and continue after idiv instruction 2321 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2322 ctx->Eax = (DWORD)min_jint; // result 2323 ctx->Edx = (DWORD)0; // remainder 2324 // Continue the execution 2325 #endif 2326 #endif 2327 return EXCEPTION_CONTINUE_EXECUTION; 2328 } 2329 2330 #ifndef _WIN64 2331 //----------------------------------------------------------------------------- 2332 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2333 // handle exception caused by native method modifying control word 2334 PCONTEXT ctx = exceptionInfo->ContextRecord; 2335 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2336 2337 switch (exception_code) { 2338 case EXCEPTION_FLT_DENORMAL_OPERAND: 2339 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2340 case EXCEPTION_FLT_INEXACT_RESULT: 2341 case EXCEPTION_FLT_INVALID_OPERATION: 2342 case EXCEPTION_FLT_OVERFLOW: 2343 case EXCEPTION_FLT_STACK_CHECK: 2344 case EXCEPTION_FLT_UNDERFLOW: 2345 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2346 if (fp_control_word != ctx->FloatSave.ControlWord) { 2347 // Restore FPCW and mask out FLT exceptions 2348 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2349 // Mask out pending FLT exceptions 2350 ctx->FloatSave.StatusWord &= 0xffffff00; 2351 return EXCEPTION_CONTINUE_EXECUTION; 2352 } 2353 } 2354 2355 if (prev_uef_handler != NULL) { 2356 // We didn't handle this exception so pass it to the previous 2357 // UnhandledExceptionFilter. 2358 return (prev_uef_handler)(exceptionInfo); 2359 } 2360 2361 return EXCEPTION_CONTINUE_SEARCH; 2362 } 2363 #else //_WIN64 2364 /* 2365 On Windows, the mxcsr control bits are non-volatile across calls 2366 See also CR 6192333 2367 If EXCEPTION_FLT_* happened after some native method modified 2368 mxcsr - it is not a jvm fault. 2369 However should we decide to restore of mxcsr after a faulty 2370 native method we can uncomment following code 2371 jint MxCsr = INITIAL_MXCSR; 2372 // we can't use StubRoutines::addr_mxcsr_std() 2373 // because in Win64 mxcsr is not saved there 2374 if (MxCsr != ctx->MxCsr) { 2375 ctx->MxCsr = MxCsr; 2376 return EXCEPTION_CONTINUE_EXECUTION; 2377 } 2378 2379 */ 2380 #endif // _WIN64 2381 2382 2383 static inline void report_error(Thread* t, DWORD exception_code, 2384 address addr, void* siginfo, void* context) { 2385 VMError err(t, exception_code, addr, siginfo, context); 2386 err.report_and_die(); 2387 2388 // If UseOsErrorReporting, this will return here and save the error file 2389 // somewhere where we can find it in the minidump. 2390 } 2391 2392 //----------------------------------------------------------------------------- 2393 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2394 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2395 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2396 #ifdef _M_IA64 2397 // On Itanium, we need the "precise pc", which has the slot number coded 2398 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2399 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2400 // Convert the pc to "Unix format", which has the slot number coded 2401 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2402 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2403 // information is saved in the Unix format. 2404 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2405 #else 2406 #ifdef _M_AMD64 2407 address pc = (address) exceptionInfo->ContextRecord->Rip; 2408 #else 2409 address pc = (address) exceptionInfo->ContextRecord->Eip; 2410 #endif 2411 #endif 2412 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2413 2414 // Handle SafeFetch32 and SafeFetchN exceptions. 2415 if (StubRoutines::is_safefetch_fault(pc)) { 2416 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2417 } 2418 2419 #ifndef _WIN64 2420 // Execution protection violation - win32 running on AMD64 only 2421 // Handled first to avoid misdiagnosis as a "normal" access violation; 2422 // This is safe to do because we have a new/unique ExceptionInformation 2423 // code for this condition. 2424 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2425 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2426 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2427 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2428 2429 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2430 int page_size = os::vm_page_size(); 2431 2432 // Make sure the pc and the faulting address are sane. 2433 // 2434 // If an instruction spans a page boundary, and the page containing 2435 // the beginning of the instruction is executable but the following 2436 // page is not, the pc and the faulting address might be slightly 2437 // different - we still want to unguard the 2nd page in this case. 2438 // 2439 // 15 bytes seems to be a (very) safe value for max instruction size. 2440 bool pc_is_near_addr = 2441 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2442 bool instr_spans_page_boundary = 2443 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2444 (intptr_t) page_size) > 0); 2445 2446 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2447 static volatile address last_addr = 2448 (address) os::non_memory_address_word(); 2449 2450 // In conservative mode, don't unguard unless the address is in the VM 2451 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2452 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2453 2454 // Set memory to RWX and retry 2455 address page_start = 2456 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2457 bool res = os::protect_memory((char*) page_start, page_size, 2458 os::MEM_PROT_RWX); 2459 2460 if (PrintMiscellaneous && Verbose) { 2461 char buf[256]; 2462 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2463 "at " INTPTR_FORMAT 2464 ", unguarding " INTPTR_FORMAT ": %s", addr, 2465 page_start, (res ? "success" : strerror(errno))); 2466 tty->print_raw_cr(buf); 2467 } 2468 2469 // Set last_addr so if we fault again at the same address, we don't 2470 // end up in an endless loop. 2471 // 2472 // There are two potential complications here. Two threads trapping 2473 // at the same address at the same time could cause one of the 2474 // threads to think it already unguarded, and abort the VM. Likely 2475 // very rare. 2476 // 2477 // The other race involves two threads alternately trapping at 2478 // different addresses and failing to unguard the page, resulting in 2479 // an endless loop. This condition is probably even more unlikely 2480 // than the first. 2481 // 2482 // Although both cases could be avoided by using locks or thread 2483 // local last_addr, these solutions are unnecessary complication: 2484 // this handler is a best-effort safety net, not a complete solution. 2485 // It is disabled by default and should only be used as a workaround 2486 // in case we missed any no-execute-unsafe VM code. 2487 2488 last_addr = addr; 2489 2490 return EXCEPTION_CONTINUE_EXECUTION; 2491 } 2492 } 2493 2494 // Last unguard failed or not unguarding 2495 tty->print_raw_cr("Execution protection violation"); 2496 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2497 exceptionInfo->ContextRecord); 2498 return EXCEPTION_CONTINUE_SEARCH; 2499 } 2500 } 2501 #endif // _WIN64 2502 2503 // Check to see if we caught the safepoint code in the 2504 // process of write protecting the memory serialization page. 2505 // It write enables the page immediately after protecting it 2506 // so just return. 2507 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 2508 JavaThread* thread = (JavaThread*) t; 2509 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2510 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2511 if ( os::is_memory_serialize_page(thread, addr) ) { 2512 // Block current thread until the memory serialize page permission restored. 2513 os::block_on_serialize_page_trap(); 2514 return EXCEPTION_CONTINUE_EXECUTION; 2515 } 2516 } 2517 2518 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2519 VM_Version::is_cpuinfo_segv_addr(pc)) { 2520 // Verify that OS save/restore AVX registers. 2521 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2522 } 2523 2524 if (t != NULL && t->is_Java_thread()) { 2525 JavaThread* thread = (JavaThread*) t; 2526 bool in_java = thread->thread_state() == _thread_in_Java; 2527 2528 // Handle potential stack overflows up front. 2529 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2530 if (os::uses_stack_guard_pages()) { 2531 #ifdef _M_IA64 2532 // Use guard page for register stack. 2533 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2534 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2535 // Check for a register stack overflow on Itanium 2536 if (thread->addr_inside_register_stack_red_zone(addr)) { 2537 // Fatal red zone violation happens if the Java program 2538 // catches a StackOverflow error and does so much processing 2539 // that it runs beyond the unprotected yellow guard zone. As 2540 // a result, we are out of here. 2541 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2542 } else if(thread->addr_inside_register_stack(addr)) { 2543 // Disable the yellow zone which sets the state that 2544 // we've got a stack overflow problem. 2545 if (thread->stack_yellow_zone_enabled()) { 2546 thread->disable_stack_yellow_zone(); 2547 } 2548 // Give us some room to process the exception. 2549 thread->disable_register_stack_guard(); 2550 // Tracing with +Verbose. 2551 if (Verbose) { 2552 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2553 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2554 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2555 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2556 thread->register_stack_base(), 2557 thread->register_stack_base() + thread->stack_size()); 2558 } 2559 2560 // Reguard the permanent register stack red zone just to be sure. 2561 // We saw Windows silently disabling this without telling us. 2562 thread->enable_register_stack_red_zone(); 2563 2564 return Handle_Exception(exceptionInfo, 2565 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2566 } 2567 #endif 2568 if (thread->stack_yellow_zone_enabled()) { 2569 // Yellow zone violation. The o/s has unprotected the first yellow 2570 // zone page for us. Note: must call disable_stack_yellow_zone to 2571 // update the enabled status, even if the zone contains only one page. 2572 thread->disable_stack_yellow_zone(); 2573 // If not in java code, return and hope for the best. 2574 return in_java ? Handle_Exception(exceptionInfo, 2575 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2576 : EXCEPTION_CONTINUE_EXECUTION; 2577 } else { 2578 // Fatal red zone violation. 2579 thread->disable_stack_red_zone(); 2580 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2581 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2582 exceptionInfo->ContextRecord); 2583 return EXCEPTION_CONTINUE_SEARCH; 2584 } 2585 } else if (in_java) { 2586 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2587 // a one-time-only guard page, which it has released to us. The next 2588 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2589 return Handle_Exception(exceptionInfo, 2590 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2591 } else { 2592 // Can only return and hope for the best. Further stack growth will 2593 // result in an ACCESS_VIOLATION. 2594 return EXCEPTION_CONTINUE_EXECUTION; 2595 } 2596 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2597 // Either stack overflow or null pointer exception. 2598 if (in_java) { 2599 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2600 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2601 address stack_end = thread->stack_base() - thread->stack_size(); 2602 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2603 // Stack overflow. 2604 assert(!os::uses_stack_guard_pages(), 2605 "should be caught by red zone code above."); 2606 return Handle_Exception(exceptionInfo, 2607 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2608 } 2609 // 2610 // Check for safepoint polling and implicit null 2611 // We only expect null pointers in the stubs (vtable) 2612 // the rest are checked explicitly now. 2613 // 2614 CodeBlob* cb = CodeCache::find_blob(pc); 2615 if (cb != NULL) { 2616 if (os::is_poll_address(addr)) { 2617 address stub = SharedRuntime::get_poll_stub(pc); 2618 return Handle_Exception(exceptionInfo, stub); 2619 } 2620 } 2621 { 2622 #ifdef _WIN64 2623 // 2624 // If it's a legal stack address map the entire region in 2625 // 2626 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2627 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2628 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2629 addr = (address)((uintptr_t)addr & 2630 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2631 os::commit_memory((char *)addr, thread->stack_base() - addr, 2632 !ExecMem); 2633 return EXCEPTION_CONTINUE_EXECUTION; 2634 } 2635 else 2636 #endif 2637 { 2638 // Null pointer exception. 2639 #ifdef _M_IA64 2640 // Process implicit null checks in compiled code. Note: Implicit null checks 2641 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2642 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2643 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2644 // Handle implicit null check in UEP method entry 2645 if (cb && (cb->is_frame_complete_at(pc) || 2646 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2647 if (Verbose) { 2648 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2649 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2650 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2651 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2652 *(bundle_start + 1), *bundle_start); 2653 } 2654 return Handle_Exception(exceptionInfo, 2655 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2656 } 2657 } 2658 2659 // Implicit null checks were processed above. Hence, we should not reach 2660 // here in the usual case => die! 2661 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2662 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2663 exceptionInfo->ContextRecord); 2664 return EXCEPTION_CONTINUE_SEARCH; 2665 2666 #else // !IA64 2667 2668 // Windows 98 reports faulting addresses incorrectly 2669 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2670 !os::win32::is_nt()) { 2671 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2672 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2673 } 2674 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2675 exceptionInfo->ContextRecord); 2676 return EXCEPTION_CONTINUE_SEARCH; 2677 #endif 2678 } 2679 } 2680 } 2681 2682 #ifdef _WIN64 2683 // Special care for fast JNI field accessors. 2684 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2685 // in and the heap gets shrunk before the field access. 2686 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2687 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2688 if (addr != (address)-1) { 2689 return Handle_Exception(exceptionInfo, addr); 2690 } 2691 } 2692 #endif 2693 2694 // Stack overflow or null pointer exception in native code. 2695 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2696 exceptionInfo->ContextRecord); 2697 return EXCEPTION_CONTINUE_SEARCH; 2698 } // /EXCEPTION_ACCESS_VIOLATION 2699 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2700 #if defined _M_IA64 2701 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2702 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2703 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2704 2705 // Compiled method patched to be non entrant? Following conditions must apply: 2706 // 1. must be first instruction in bundle 2707 // 2. must be a break instruction with appropriate code 2708 if((((uint64_t) pc & 0x0F) == 0) && 2709 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2710 return Handle_Exception(exceptionInfo, 2711 (address)SharedRuntime::get_handle_wrong_method_stub()); 2712 } 2713 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2714 #endif 2715 2716 2717 if (in_java) { 2718 switch (exception_code) { 2719 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2720 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2721 2722 case EXCEPTION_INT_OVERFLOW: 2723 return Handle_IDiv_Exception(exceptionInfo); 2724 2725 } // switch 2726 } 2727 #ifndef _WIN64 2728 if (((thread->thread_state() == _thread_in_Java) || 2729 (thread->thread_state() == _thread_in_native)) && 2730 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2731 { 2732 LONG result=Handle_FLT_Exception(exceptionInfo); 2733 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2734 } 2735 #endif //_WIN64 2736 } 2737 2738 if (exception_code != EXCEPTION_BREAKPOINT) { 2739 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2740 exceptionInfo->ContextRecord); 2741 } 2742 return EXCEPTION_CONTINUE_SEARCH; 2743 } 2744 2745 #ifndef _WIN64 2746 // Special care for fast JNI accessors. 2747 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2748 // the heap gets shrunk before the field access. 2749 // Need to install our own structured exception handler since native code may 2750 // install its own. 2751 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2752 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2753 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2754 address pc = (address) exceptionInfo->ContextRecord->Eip; 2755 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2756 if (addr != (address)-1) { 2757 return Handle_Exception(exceptionInfo, addr); 2758 } 2759 } 2760 return EXCEPTION_CONTINUE_SEARCH; 2761 } 2762 2763 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2764 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2765 __try { \ 2766 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2767 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2768 } \ 2769 return 0; \ 2770 } 2771 2772 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2773 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2774 DEFINE_FAST_GETFIELD(jchar, char, Char) 2775 DEFINE_FAST_GETFIELD(jshort, short, Short) 2776 DEFINE_FAST_GETFIELD(jint, int, Int) 2777 DEFINE_FAST_GETFIELD(jlong, long, Long) 2778 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2779 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2780 2781 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2782 switch (type) { 2783 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2784 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2785 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2786 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2787 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2788 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2789 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2790 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2791 default: ShouldNotReachHere(); 2792 } 2793 return (address)-1; 2794 } 2795 #endif 2796 2797 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2798 // Install a win32 structured exception handler around the test 2799 // function call so the VM can generate an error dump if needed. 2800 __try { 2801 (*funcPtr)(); 2802 } __except(topLevelExceptionFilter( 2803 (_EXCEPTION_POINTERS*)_exception_info())) { 2804 // Nothing to do. 2805 } 2806 } 2807 2808 // Virtual Memory 2809 2810 int os::vm_page_size() { return os::win32::vm_page_size(); } 2811 int os::vm_allocation_granularity() { 2812 return os::win32::vm_allocation_granularity(); 2813 } 2814 2815 // Windows large page support is available on Windows 2003. In order to use 2816 // large page memory, the administrator must first assign additional privilege 2817 // to the user: 2818 // + select Control Panel -> Administrative Tools -> Local Security Policy 2819 // + select Local Policies -> User Rights Assignment 2820 // + double click "Lock pages in memory", add users and/or groups 2821 // + reboot 2822 // Note the above steps are needed for administrator as well, as administrators 2823 // by default do not have the privilege to lock pages in memory. 2824 // 2825 // Note about Windows 2003: although the API supports committing large page 2826 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2827 // scenario, I found through experiment it only uses large page if the entire 2828 // memory region is reserved and committed in a single VirtualAlloc() call. 2829 // This makes Windows large page support more or less like Solaris ISM, in 2830 // that the entire heap must be committed upfront. This probably will change 2831 // in the future, if so the code below needs to be revisited. 2832 2833 #ifndef MEM_LARGE_PAGES 2834 #define MEM_LARGE_PAGES 0x20000000 2835 #endif 2836 2837 static HANDLE _hProcess; 2838 static HANDLE _hToken; 2839 2840 // Container for NUMA node list info 2841 class NUMANodeListHolder { 2842 private: 2843 int *_numa_used_node_list; // allocated below 2844 int _numa_used_node_count; 2845 2846 void free_node_list() { 2847 if (_numa_used_node_list != NULL) { 2848 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2849 } 2850 } 2851 2852 public: 2853 NUMANodeListHolder() { 2854 _numa_used_node_count = 0; 2855 _numa_used_node_list = NULL; 2856 // do rest of initialization in build routine (after function pointers are set up) 2857 } 2858 2859 ~NUMANodeListHolder() { 2860 free_node_list(); 2861 } 2862 2863 bool build() { 2864 DWORD_PTR proc_aff_mask; 2865 DWORD_PTR sys_aff_mask; 2866 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2867 ULONG highest_node_number; 2868 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2869 free_node_list(); 2870 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2871 for (unsigned int i = 0; i <= highest_node_number; i++) { 2872 ULONGLONG proc_mask_numa_node; 2873 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2874 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2875 _numa_used_node_list[_numa_used_node_count++] = i; 2876 } 2877 } 2878 return (_numa_used_node_count > 1); 2879 } 2880 2881 int get_count() {return _numa_used_node_count;} 2882 int get_node_list_entry(int n) { 2883 // for indexes out of range, returns -1 2884 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2885 } 2886 2887 } numa_node_list_holder; 2888 2889 2890 2891 static size_t _large_page_size = 0; 2892 2893 static bool resolve_functions_for_large_page_init() { 2894 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2895 os::Advapi32Dll::AdvapiAvailable(); 2896 } 2897 2898 static bool request_lock_memory_privilege() { 2899 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2900 os::current_process_id()); 2901 2902 LUID luid; 2903 if (_hProcess != NULL && 2904 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2905 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2906 2907 TOKEN_PRIVILEGES tp; 2908 tp.PrivilegeCount = 1; 2909 tp.Privileges[0].Luid = luid; 2910 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2911 2912 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2913 // privilege. Check GetLastError() too. See MSDN document. 2914 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2915 (GetLastError() == ERROR_SUCCESS)) { 2916 return true; 2917 } 2918 } 2919 2920 return false; 2921 } 2922 2923 static void cleanup_after_large_page_init() { 2924 if (_hProcess) CloseHandle(_hProcess); 2925 _hProcess = NULL; 2926 if (_hToken) CloseHandle(_hToken); 2927 _hToken = NULL; 2928 } 2929 2930 static bool numa_interleaving_init() { 2931 bool success = false; 2932 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2933 2934 // print a warning if UseNUMAInterleaving flag is specified on command line 2935 bool warn_on_failure = use_numa_interleaving_specified; 2936 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2937 2938 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2939 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2940 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2941 2942 if (os::Kernel32Dll::NumaCallsAvailable()) { 2943 if (numa_node_list_holder.build()) { 2944 if (PrintMiscellaneous && Verbose) { 2945 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2946 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2947 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2948 } 2949 tty->print("\n"); 2950 } 2951 success = true; 2952 } else { 2953 WARN("Process does not cover multiple NUMA nodes."); 2954 } 2955 } else { 2956 WARN("NUMA Interleaving is not supported by the operating system."); 2957 } 2958 if (!success) { 2959 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2960 } 2961 return success; 2962 #undef WARN 2963 } 2964 2965 // this routine is used whenever we need to reserve a contiguous VA range 2966 // but we need to make separate VirtualAlloc calls for each piece of the range 2967 // Reasons for doing this: 2968 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2969 // * UseNUMAInterleaving requires a separate node for each piece 2970 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2971 bool should_inject_error=false) { 2972 char * p_buf; 2973 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2974 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2975 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2976 2977 // first reserve enough address space in advance since we want to be 2978 // able to break a single contiguous virtual address range into multiple 2979 // large page commits but WS2003 does not allow reserving large page space 2980 // so we just use 4K pages for reserve, this gives us a legal contiguous 2981 // address space. then we will deallocate that reservation, and re alloc 2982 // using large pages 2983 const size_t size_of_reserve = bytes + chunk_size; 2984 if (bytes > size_of_reserve) { 2985 // Overflowed. 2986 return NULL; 2987 } 2988 p_buf = (char *) VirtualAlloc(addr, 2989 size_of_reserve, // size of Reserve 2990 MEM_RESERVE, 2991 PAGE_READWRITE); 2992 // If reservation failed, return NULL 2993 if (p_buf == NULL) return NULL; 2994 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2995 os::release_memory(p_buf, bytes + chunk_size); 2996 2997 // we still need to round up to a page boundary (in case we are using large pages) 2998 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2999 // instead we handle this in the bytes_to_rq computation below 3000 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 3001 3002 // now go through and allocate one chunk at a time until all bytes are 3003 // allocated 3004 size_t bytes_remaining = bytes; 3005 // An overflow of align_size_up() would have been caught above 3006 // in the calculation of size_of_reserve. 3007 char * next_alloc_addr = p_buf; 3008 HANDLE hProc = GetCurrentProcess(); 3009 3010 #ifdef ASSERT 3011 // Variable for the failure injection 3012 long ran_num = os::random(); 3013 size_t fail_after = ran_num % bytes; 3014 #endif 3015 3016 int count=0; 3017 while (bytes_remaining) { 3018 // select bytes_to_rq to get to the next chunk_size boundary 3019 3020 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 3021 // Note allocate and commit 3022 char * p_new; 3023 3024 #ifdef ASSERT 3025 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 3026 #else 3027 const bool inject_error_now = false; 3028 #endif 3029 3030 if (inject_error_now) { 3031 p_new = NULL; 3032 } else { 3033 if (!UseNUMAInterleaving) { 3034 p_new = (char *) VirtualAlloc(next_alloc_addr, 3035 bytes_to_rq, 3036 flags, 3037 prot); 3038 } else { 3039 // get the next node to use from the used_node_list 3040 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 3041 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 3042 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 3043 next_alloc_addr, 3044 bytes_to_rq, 3045 flags, 3046 prot, 3047 node); 3048 } 3049 } 3050 3051 if (p_new == NULL) { 3052 // Free any allocated pages 3053 if (next_alloc_addr > p_buf) { 3054 // Some memory was committed so release it. 3055 size_t bytes_to_release = bytes - bytes_remaining; 3056 // NMT has yet to record any individual blocks, so it 3057 // need to create a dummy 'reserve' record to match 3058 // the release. 3059 MemTracker::record_virtual_memory_reserve((address)p_buf, 3060 bytes_to_release, CALLER_PC); 3061 os::release_memory(p_buf, bytes_to_release); 3062 } 3063 #ifdef ASSERT 3064 if (should_inject_error) { 3065 if (TracePageSizes && Verbose) { 3066 tty->print_cr("Reserving pages individually failed."); 3067 } 3068 } 3069 #endif 3070 return NULL; 3071 } 3072 3073 bytes_remaining -= bytes_to_rq; 3074 next_alloc_addr += bytes_to_rq; 3075 count++; 3076 } 3077 // Although the memory is allocated individually, it is returned as one. 3078 // NMT records it as one block. 3079 if ((flags & MEM_COMMIT) != 0) { 3080 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3081 } else { 3082 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3083 } 3084 3085 // made it this far, success 3086 return p_buf; 3087 } 3088 3089 3090 3091 void os::large_page_init() { 3092 if (!UseLargePages) return; 3093 3094 // print a warning if any large page related flag is specified on command line 3095 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3096 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3097 bool success = false; 3098 3099 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3100 if (resolve_functions_for_large_page_init()) { 3101 if (request_lock_memory_privilege()) { 3102 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3103 if (s) { 3104 #if defined(IA32) || defined(AMD64) 3105 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3106 WARN("JVM cannot use large pages bigger than 4mb."); 3107 } else { 3108 #endif 3109 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3110 _large_page_size = LargePageSizeInBytes; 3111 } else { 3112 _large_page_size = s; 3113 } 3114 success = true; 3115 #if defined(IA32) || defined(AMD64) 3116 } 3117 #endif 3118 } else { 3119 WARN("Large page is not supported by the processor."); 3120 } 3121 } else { 3122 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3123 } 3124 } else { 3125 WARN("Large page is not supported by the operating system."); 3126 } 3127 #undef WARN 3128 3129 const size_t default_page_size = (size_t) vm_page_size(); 3130 if (success && _large_page_size > default_page_size) { 3131 _page_sizes[0] = _large_page_size; 3132 _page_sizes[1] = default_page_size; 3133 _page_sizes[2] = 0; 3134 } 3135 3136 cleanup_after_large_page_init(); 3137 UseLargePages = success; 3138 } 3139 3140 // On win32, one cannot release just a part of reserved memory, it's an 3141 // all or nothing deal. When we split a reservation, we must break the 3142 // reservation into two reservations. 3143 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3144 bool realloc) { 3145 if (size > 0) { 3146 release_memory(base, size); 3147 if (realloc) { 3148 reserve_memory(split, base); 3149 } 3150 if (size != split) { 3151 reserve_memory(size - split, base + split); 3152 } 3153 } 3154 } 3155 3156 // Multiple threads can race in this code but it's not possible to unmap small sections of 3157 // virtual space to get requested alignment, like posix-like os's. 3158 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3159 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3160 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3161 "Alignment must be a multiple of allocation granularity (page size)"); 3162 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3163 3164 size_t extra_size = size + alignment; 3165 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3166 3167 char* aligned_base = NULL; 3168 3169 do { 3170 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3171 if (extra_base == NULL) { 3172 return NULL; 3173 } 3174 // Do manual alignment 3175 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3176 3177 os::release_memory(extra_base, extra_size); 3178 3179 aligned_base = os::reserve_memory(size, aligned_base); 3180 3181 } while (aligned_base == NULL); 3182 3183 return aligned_base; 3184 } 3185 3186 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3187 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3188 "reserve alignment"); 3189 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3190 char* res; 3191 // note that if UseLargePages is on, all the areas that require interleaving 3192 // will go thru reserve_memory_special rather than thru here. 3193 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3194 if (!use_individual) { 3195 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3196 } else { 3197 elapsedTimer reserveTimer; 3198 if( Verbose && PrintMiscellaneous ) reserveTimer.start(); 3199 // in numa interleaving, we have to allocate pages individually 3200 // (well really chunks of NUMAInterleaveGranularity size) 3201 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3202 if (res == NULL) { 3203 warning("NUMA page allocation failed"); 3204 } 3205 if( Verbose && PrintMiscellaneous ) { 3206 reserveTimer.stop(); 3207 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3208 reserveTimer.milliseconds(), reserveTimer.ticks()); 3209 } 3210 } 3211 assert(res == NULL || addr == NULL || addr == res, 3212 "Unexpected address from reserve."); 3213 3214 return res; 3215 } 3216 3217 // Reserve memory at an arbitrary address, only if that area is 3218 // available (and not reserved for something else). 3219 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3220 // Windows os::reserve_memory() fails of the requested address range is 3221 // not avilable. 3222 return reserve_memory(bytes, requested_addr); 3223 } 3224 3225 size_t os::large_page_size() { 3226 return _large_page_size; 3227 } 3228 3229 bool os::can_commit_large_page_memory() { 3230 // Windows only uses large page memory when the entire region is reserved 3231 // and committed in a single VirtualAlloc() call. This may change in the 3232 // future, but with Windows 2003 it's not possible to commit on demand. 3233 return false; 3234 } 3235 3236 bool os::can_execute_large_page_memory() { 3237 return true; 3238 } 3239 3240 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3241 assert(UseLargePages, "only for large pages"); 3242 3243 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3244 return NULL; // Fallback to small pages. 3245 } 3246 3247 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3248 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3249 3250 // with large pages, there are two cases where we need to use Individual Allocation 3251 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3252 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3253 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3254 if (TracePageSizes && Verbose) { 3255 tty->print_cr("Reserving large pages individually."); 3256 } 3257 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3258 if (p_buf == NULL) { 3259 // give an appropriate warning message 3260 if (UseNUMAInterleaving) { 3261 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3262 } 3263 if (UseLargePagesIndividualAllocation) { 3264 warning("Individually allocated large pages failed, " 3265 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3266 } 3267 return NULL; 3268 } 3269 3270 return p_buf; 3271 3272 } else { 3273 if (TracePageSizes && Verbose) { 3274 tty->print_cr("Reserving large pages in a single large chunk."); 3275 } 3276 // normal policy just allocate it all at once 3277 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3278 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3279 if (res != NULL) { 3280 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3281 } 3282 3283 return res; 3284 } 3285 } 3286 3287 bool os::release_memory_special(char* base, size_t bytes) { 3288 assert(base != NULL, "Sanity check"); 3289 return release_memory(base, bytes); 3290 } 3291 3292 void os::print_statistics() { 3293 } 3294 3295 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3296 int err = os::get_last_error(); 3297 char buf[256]; 3298 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3299 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3300 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3301 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3302 } 3303 3304 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3305 if (bytes == 0) { 3306 // Don't bother the OS with noops. 3307 return true; 3308 } 3309 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3310 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3311 // Don't attempt to print anything if the OS call fails. We're 3312 // probably low on resources, so the print itself may cause crashes. 3313 3314 // unless we have NUMAInterleaving enabled, the range of a commit 3315 // is always within a reserve covered by a single VirtualAlloc 3316 // in that case we can just do a single commit for the requested size 3317 if (!UseNUMAInterleaving) { 3318 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3319 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3320 return false; 3321 } 3322 if (exec) { 3323 DWORD oldprot; 3324 // Windows doc says to use VirtualProtect to get execute permissions 3325 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3326 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3327 return false; 3328 } 3329 } 3330 return true; 3331 } else { 3332 3333 // when NUMAInterleaving is enabled, the commit might cover a range that 3334 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3335 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3336 // returns represents the number of bytes that can be committed in one step. 3337 size_t bytes_remaining = bytes; 3338 char * next_alloc_addr = addr; 3339 while (bytes_remaining > 0) { 3340 MEMORY_BASIC_INFORMATION alloc_info; 3341 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3342 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3343 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3344 PAGE_READWRITE) == NULL) { 3345 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3346 exec);) 3347 return false; 3348 } 3349 if (exec) { 3350 DWORD oldprot; 3351 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3352 PAGE_EXECUTE_READWRITE, &oldprot)) { 3353 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3354 exec);) 3355 return false; 3356 } 3357 } 3358 bytes_remaining -= bytes_to_rq; 3359 next_alloc_addr += bytes_to_rq; 3360 } 3361 } 3362 // if we made it this far, return true 3363 return true; 3364 } 3365 3366 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3367 bool exec) { 3368 // alignment_hint is ignored on this OS 3369 return pd_commit_memory(addr, size, exec); 3370 } 3371 3372 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3373 const char* mesg) { 3374 assert(mesg != NULL, "mesg must be specified"); 3375 if (!pd_commit_memory(addr, size, exec)) { 3376 warn_fail_commit_memory(addr, size, exec); 3377 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3378 } 3379 } 3380 3381 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3382 size_t alignment_hint, bool exec, 3383 const char* mesg) { 3384 // alignment_hint is ignored on this OS 3385 pd_commit_memory_or_exit(addr, size, exec, mesg); 3386 } 3387 3388 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3389 if (bytes == 0) { 3390 // Don't bother the OS with noops. 3391 return true; 3392 } 3393 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3394 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3395 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3396 } 3397 3398 bool os::pd_release_memory(char* addr, size_t bytes) { 3399 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3400 } 3401 3402 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3403 return os::commit_memory(addr, size, !ExecMem); 3404 } 3405 3406 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3407 return os::uncommit_memory(addr, size); 3408 } 3409 3410 // Set protections specified 3411 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3412 bool is_committed) { 3413 unsigned int p = 0; 3414 switch (prot) { 3415 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3416 case MEM_PROT_READ: p = PAGE_READONLY; break; 3417 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3418 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3419 default: 3420 ShouldNotReachHere(); 3421 } 3422 3423 DWORD old_status; 3424 3425 // Strange enough, but on Win32 one can change protection only for committed 3426 // memory, not a big deal anyway, as bytes less or equal than 64K 3427 if (!is_committed) { 3428 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3429 "cannot commit protection page"); 3430 } 3431 // One cannot use os::guard_memory() here, as on Win32 guard page 3432 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3433 // 3434 // Pages in the region become guard pages. Any attempt to access a guard page 3435 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3436 // the guard page status. Guard pages thus act as a one-time access alarm. 3437 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3438 } 3439 3440 bool os::guard_memory(char* addr, size_t bytes) { 3441 DWORD old_status; 3442 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3443 } 3444 3445 bool os::unguard_memory(char* addr, size_t bytes) { 3446 DWORD old_status; 3447 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3448 } 3449 3450 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3451 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3452 void os::numa_make_global(char *addr, size_t bytes) { } 3453 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3454 bool os::numa_topology_changed() { return false; } 3455 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3456 int os::numa_get_group_id() { return 0; } 3457 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3458 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3459 // Provide an answer for UMA systems 3460 ids[0] = 0; 3461 return 1; 3462 } else { 3463 // check for size bigger than actual groups_num 3464 size = MIN2(size, numa_get_groups_num()); 3465 for (int i = 0; i < (int)size; i++) { 3466 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3467 } 3468 return size; 3469 } 3470 } 3471 3472 bool os::get_page_info(char *start, page_info* info) { 3473 return false; 3474 } 3475 3476 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3477 return end; 3478 } 3479 3480 char* os::non_memory_address_word() { 3481 // Must never look like an address returned by reserve_memory, 3482 // even in its subfields (as defined by the CPU immediate fields, 3483 // if the CPU splits constants across multiple instructions). 3484 return (char*)-1; 3485 } 3486 3487 #define MAX_ERROR_COUNT 100 3488 #define SYS_THREAD_ERROR 0xffffffffUL 3489 3490 void os::pd_start_thread(Thread* thread) { 3491 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3492 // Returns previous suspend state: 3493 // 0: Thread was not suspended 3494 // 1: Thread is running now 3495 // >1: Thread is still suspended. 3496 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3497 } 3498 3499 class HighResolutionInterval : public CHeapObj<mtThread> { 3500 // The default timer resolution seems to be 10 milliseconds. 3501 // (Where is this written down?) 3502 // If someone wants to sleep for only a fraction of the default, 3503 // then we set the timer resolution down to 1 millisecond for 3504 // the duration of their interval. 3505 // We carefully set the resolution back, since otherwise we 3506 // seem to incur an overhead (3%?) that we don't need. 3507 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3508 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3509 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3510 // timeBeginPeriod() if the relative error exceeded some threshold. 3511 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3512 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3513 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3514 // resolution timers running. 3515 private: 3516 jlong resolution; 3517 public: 3518 HighResolutionInterval(jlong ms) { 3519 resolution = ms % 10L; 3520 if (resolution != 0) { 3521 MMRESULT result = timeBeginPeriod(1L); 3522 } 3523 } 3524 ~HighResolutionInterval() { 3525 if (resolution != 0) { 3526 MMRESULT result = timeEndPeriod(1L); 3527 } 3528 resolution = 0L; 3529 } 3530 }; 3531 3532 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3533 jlong limit = (jlong) MAXDWORD; 3534 3535 while(ms > limit) { 3536 int res; 3537 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3538 return res; 3539 ms -= limit; 3540 } 3541 3542 assert(thread == Thread::current(), "thread consistency check"); 3543 OSThread* osthread = thread->osthread(); 3544 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3545 int result; 3546 if (interruptable) { 3547 assert(thread->is_Java_thread(), "must be java thread"); 3548 JavaThread *jt = (JavaThread *) thread; 3549 ThreadBlockInVM tbivm(jt); 3550 3551 jt->set_suspend_equivalent(); 3552 // cleared by handle_special_suspend_equivalent_condition() or 3553 // java_suspend_self() via check_and_wait_while_suspended() 3554 3555 HANDLE events[1]; 3556 events[0] = osthread->interrupt_event(); 3557 HighResolutionInterval *phri=NULL; 3558 if(!ForceTimeHighResolution) 3559 phri = new HighResolutionInterval( ms ); 3560 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3561 result = OS_TIMEOUT; 3562 } else { 3563 ResetEvent(osthread->interrupt_event()); 3564 osthread->set_interrupted(false); 3565 result = OS_INTRPT; 3566 } 3567 delete phri; //if it is NULL, harmless 3568 3569 // were we externally suspended while we were waiting? 3570 jt->check_and_wait_while_suspended(); 3571 } else { 3572 assert(!thread->is_Java_thread(), "must not be java thread"); 3573 Sleep((long) ms); 3574 result = OS_TIMEOUT; 3575 } 3576 return result; 3577 } 3578 3579 // 3580 // Short sleep, direct OS call. 3581 // 3582 // ms = 0, means allow others (if any) to run. 3583 // 3584 void os::naked_short_sleep(jlong ms) { 3585 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3586 Sleep(ms); 3587 } 3588 3589 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3590 void os::infinite_sleep() { 3591 while (true) { // sleep forever ... 3592 Sleep(100000); // ... 100 seconds at a time 3593 } 3594 } 3595 3596 typedef BOOL (WINAPI * STTSignature)(void) ; 3597 3598 os::YieldResult os::NakedYield() { 3599 // Use either SwitchToThread() or Sleep(0) 3600 // Consider passing back the return value from SwitchToThread(). 3601 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3602 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; 3603 } else { 3604 Sleep(0); 3605 } 3606 return os::YIELD_UNKNOWN ; 3607 } 3608 3609 void os::yield() { os::NakedYield(); } 3610 3611 void os::yield_all(int attempts) { 3612 // Yields to all threads, including threads with lower priorities 3613 Sleep(1); 3614 } 3615 3616 // Win32 only gives you access to seven real priorities at a time, 3617 // so we compress Java's ten down to seven. It would be better 3618 // if we dynamically adjusted relative priorities. 3619 3620 int os::java_to_os_priority[CriticalPriority + 1] = { 3621 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3622 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3623 THREAD_PRIORITY_LOWEST, // 2 3624 THREAD_PRIORITY_BELOW_NORMAL, // 3 3625 THREAD_PRIORITY_BELOW_NORMAL, // 4 3626 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3627 THREAD_PRIORITY_NORMAL, // 6 3628 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3629 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3630 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3631 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3632 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3633 }; 3634 3635 int prio_policy1[CriticalPriority + 1] = { 3636 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3637 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3638 THREAD_PRIORITY_LOWEST, // 2 3639 THREAD_PRIORITY_BELOW_NORMAL, // 3 3640 THREAD_PRIORITY_BELOW_NORMAL, // 4 3641 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3642 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3643 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3644 THREAD_PRIORITY_HIGHEST, // 8 3645 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3646 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3647 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3648 }; 3649 3650 static int prio_init() { 3651 // If ThreadPriorityPolicy is 1, switch tables 3652 if (ThreadPriorityPolicy == 1) { 3653 int i; 3654 for (i = 0; i < CriticalPriority + 1; i++) { 3655 os::java_to_os_priority[i] = prio_policy1[i]; 3656 } 3657 } 3658 if (UseCriticalJavaThreadPriority) { 3659 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ; 3660 } 3661 return 0; 3662 } 3663 3664 OSReturn os::set_native_priority(Thread* thread, int priority) { 3665 if (!UseThreadPriorities) return OS_OK; 3666 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3667 return ret ? OS_OK : OS_ERR; 3668 } 3669 3670 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3671 if ( !UseThreadPriorities ) { 3672 *priority_ptr = java_to_os_priority[NormPriority]; 3673 return OS_OK; 3674 } 3675 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3676 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3677 assert(false, "GetThreadPriority failed"); 3678 return OS_ERR; 3679 } 3680 *priority_ptr = os_prio; 3681 return OS_OK; 3682 } 3683 3684 3685 // Hint to the underlying OS that a task switch would not be good. 3686 // Void return because it's a hint and can fail. 3687 void os::hint_no_preempt() {} 3688 3689 void os::interrupt(Thread* thread) { 3690 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3691 "possibility of dangling Thread pointer"); 3692 3693 OSThread* osthread = thread->osthread(); 3694 osthread->set_interrupted(true); 3695 // More than one thread can get here with the same value of osthread, 3696 // resulting in multiple notifications. We do, however, want the store 3697 // to interrupted() to be visible to other threads before we post 3698 // the interrupt event. 3699 OrderAccess::release(); 3700 SetEvent(osthread->interrupt_event()); 3701 // For JSR166: unpark after setting status 3702 if (thread->is_Java_thread()) 3703 ((JavaThread*)thread)->parker()->unpark(); 3704 3705 ParkEvent * ev = thread->_ParkEvent ; 3706 if (ev != NULL) ev->unpark() ; 3707 3708 } 3709 3710 3711 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3712 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3713 "possibility of dangling Thread pointer"); 3714 3715 OSThread* osthread = thread->osthread(); 3716 // There is no synchronization between the setting of the interrupt 3717 // and it being cleared here. It is critical - see 6535709 - that 3718 // we only clear the interrupt state, and reset the interrupt event, 3719 // if we are going to report that we were indeed interrupted - else 3720 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3721 // depending on the timing. By checking thread interrupt event to see 3722 // if the thread gets real interrupt thus prevent spurious wakeup. 3723 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3724 if (interrupted && clear_interrupted) { 3725 osthread->set_interrupted(false); 3726 ResetEvent(osthread->interrupt_event()); 3727 } // Otherwise leave the interrupted state alone 3728 3729 return interrupted; 3730 } 3731 3732 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3733 ExtendedPC os::get_thread_pc(Thread* thread) { 3734 CONTEXT context; 3735 context.ContextFlags = CONTEXT_CONTROL; 3736 HANDLE handle = thread->osthread()->thread_handle(); 3737 #ifdef _M_IA64 3738 assert(0, "Fix get_thread_pc"); 3739 return ExtendedPC(NULL); 3740 #else 3741 if (GetThreadContext(handle, &context)) { 3742 #ifdef _M_AMD64 3743 return ExtendedPC((address) context.Rip); 3744 #else 3745 return ExtendedPC((address) context.Eip); 3746 #endif 3747 } else { 3748 return ExtendedPC(NULL); 3749 } 3750 #endif 3751 } 3752 3753 // GetCurrentThreadId() returns DWORD 3754 intx os::current_thread_id() { return GetCurrentThreadId(); } 3755 3756 static int _initial_pid = 0; 3757 3758 int os::current_process_id() 3759 { 3760 return (_initial_pid ? _initial_pid : _getpid()); 3761 } 3762 3763 int os::win32::_vm_page_size = 0; 3764 int os::win32::_vm_allocation_granularity = 0; 3765 int os::win32::_processor_type = 0; 3766 // Processor level is not available on non-NT systems, use vm_version instead 3767 int os::win32::_processor_level = 0; 3768 julong os::win32::_physical_memory = 0; 3769 size_t os::win32::_default_stack_size = 0; 3770 3771 intx os::win32::_os_thread_limit = 0; 3772 volatile intx os::win32::_os_thread_count = 0; 3773 3774 bool os::win32::_is_nt = false; 3775 bool os::win32::_is_windows_2003 = false; 3776 bool os::win32::_is_windows_server = false; 3777 3778 void os::win32::initialize_system_info() { 3779 SYSTEM_INFO si; 3780 GetSystemInfo(&si); 3781 _vm_page_size = si.dwPageSize; 3782 _vm_allocation_granularity = si.dwAllocationGranularity; 3783 _processor_type = si.dwProcessorType; 3784 _processor_level = si.wProcessorLevel; 3785 set_processor_count(si.dwNumberOfProcessors); 3786 3787 MEMORYSTATUSEX ms; 3788 ms.dwLength = sizeof(ms); 3789 3790 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3791 // dwMemoryLoad (% of memory in use) 3792 GlobalMemoryStatusEx(&ms); 3793 _physical_memory = ms.ullTotalPhys; 3794 3795 OSVERSIONINFOEX oi; 3796 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3797 GetVersionEx((OSVERSIONINFO*)&oi); 3798 switch(oi.dwPlatformId) { 3799 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3800 case VER_PLATFORM_WIN32_NT: 3801 _is_nt = true; 3802 { 3803 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3804 if (os_vers == 5002) { 3805 _is_windows_2003 = true; 3806 } 3807 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3808 oi.wProductType == VER_NT_SERVER) { 3809 _is_windows_server = true; 3810 } 3811 } 3812 break; 3813 default: fatal("Unknown platform"); 3814 } 3815 3816 _default_stack_size = os::current_stack_size(); 3817 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3818 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3819 "stack size not a multiple of page size"); 3820 3821 initialize_performance_counter(); 3822 3823 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3824 // known to deadlock the system, if the VM issues to thread operations with 3825 // a too high frequency, e.g., such as changing the priorities. 3826 // The 6000 seems to work well - no deadlocks has been notices on the test 3827 // programs that we have seen experience this problem. 3828 if (!os::win32::is_nt()) { 3829 StarvationMonitorInterval = 6000; 3830 } 3831 } 3832 3833 3834 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3835 char path[MAX_PATH]; 3836 DWORD size; 3837 DWORD pathLen = (DWORD)sizeof(path); 3838 HINSTANCE result = NULL; 3839 3840 // only allow library name without path component 3841 assert(strchr(name, '\\') == NULL, "path not allowed"); 3842 assert(strchr(name, ':') == NULL, "path not allowed"); 3843 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3844 jio_snprintf(ebuf, ebuflen, 3845 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3846 return NULL; 3847 } 3848 3849 // search system directory 3850 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3851 strcat(path, "\\"); 3852 strcat(path, name); 3853 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3854 return result; 3855 } 3856 } 3857 3858 // try Windows directory 3859 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3860 strcat(path, "\\"); 3861 strcat(path, name); 3862 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3863 return result; 3864 } 3865 } 3866 3867 jio_snprintf(ebuf, ebuflen, 3868 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3869 return NULL; 3870 } 3871 3872 void os::win32::setmode_streams() { 3873 _setmode(_fileno(stdin), _O_BINARY); 3874 _setmode(_fileno(stdout), _O_BINARY); 3875 _setmode(_fileno(stderr), _O_BINARY); 3876 } 3877 3878 3879 bool os::is_debugger_attached() { 3880 return IsDebuggerPresent() ? true : false; 3881 } 3882 3883 3884 void os::wait_for_keypress_at_exit(void) { 3885 if (PauseAtExit) { 3886 fprintf(stderr, "Press any key to continue...\n"); 3887 fgetc(stdin); 3888 } 3889 } 3890 3891 3892 int os::message_box(const char* title, const char* message) { 3893 int result = MessageBox(NULL, message, title, 3894 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3895 return result == IDYES; 3896 } 3897 3898 int os::allocate_thread_local_storage() { 3899 return TlsAlloc(); 3900 } 3901 3902 3903 void os::free_thread_local_storage(int index) { 3904 TlsFree(index); 3905 } 3906 3907 3908 void os::thread_local_storage_at_put(int index, void* value) { 3909 TlsSetValue(index, value); 3910 assert(thread_local_storage_at(index) == value, "Just checking"); 3911 } 3912 3913 3914 void* os::thread_local_storage_at(int index) { 3915 return TlsGetValue(index); 3916 } 3917 3918 3919 #ifndef PRODUCT 3920 #ifndef _WIN64 3921 // Helpers to check whether NX protection is enabled 3922 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3923 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3924 pex->ExceptionRecord->NumberParameters > 0 && 3925 pex->ExceptionRecord->ExceptionInformation[0] == 3926 EXCEPTION_INFO_EXEC_VIOLATION) { 3927 return EXCEPTION_EXECUTE_HANDLER; 3928 } 3929 return EXCEPTION_CONTINUE_SEARCH; 3930 } 3931 3932 void nx_check_protection() { 3933 // If NX is enabled we'll get an exception calling into code on the stack 3934 char code[] = { (char)0xC3 }; // ret 3935 void *code_ptr = (void *)code; 3936 __try { 3937 __asm call code_ptr 3938 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3939 tty->print_raw_cr("NX protection detected."); 3940 } 3941 } 3942 #endif // _WIN64 3943 #endif // PRODUCT 3944 3945 // this is called _before_ the global arguments have been parsed 3946 void os::init(void) { 3947 _initial_pid = _getpid(); 3948 3949 init_random(1234567); 3950 3951 win32::initialize_system_info(); 3952 win32::setmode_streams(); 3953 init_page_sizes((size_t) win32::vm_page_size()); 3954 3955 // For better scalability on MP systems (must be called after initialize_system_info) 3956 #ifndef PRODUCT 3957 if (is_MP()) { 3958 NoYieldsInMicrolock = true; 3959 } 3960 #endif 3961 // This may be overridden later when argument processing is done. 3962 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3963 os::win32::is_windows_2003()); 3964 3965 // Initialize main_process and main_thread 3966 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3967 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3968 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3969 fatal("DuplicateHandle failed\n"); 3970 } 3971 main_thread_id = (int) GetCurrentThreadId(); 3972 } 3973 3974 // To install functions for atexit processing 3975 extern "C" { 3976 static void perfMemory_exit_helper() { 3977 perfMemory_exit(); 3978 } 3979 } 3980 3981 static jint initSock(); 3982 3983 // this is called _after_ the global arguments have been parsed 3984 jint os::init_2(void) { 3985 // Allocate a single page and mark it as readable for safepoint polling 3986 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3987 guarantee( polling_page != NULL, "Reserve Failed for polling page"); 3988 3989 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3990 guarantee( return_page != NULL, "Commit Failed for polling page"); 3991 3992 os::set_polling_page( polling_page ); 3993 3994 #ifndef PRODUCT 3995 if( Verbose && PrintMiscellaneous ) 3996 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3997 #endif 3998 3999 if (!UseMembar) { 4000 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4001 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4002 4003 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4004 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); 4005 4006 os::set_memory_serialize_page( mem_serialize_page ); 4007 4008 #ifndef PRODUCT 4009 if(Verbose && PrintMiscellaneous) 4010 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4011 #endif 4012 } 4013 4014 // Setup Windows Exceptions 4015 4016 // for debugging float code generation bugs 4017 if (ForceFloatExceptions) { 4018 #ifndef _WIN64 4019 static long fp_control_word = 0; 4020 __asm { fstcw fp_control_word } 4021 // see Intel PPro Manual, Vol. 2, p 7-16 4022 const long precision = 0x20; 4023 const long underflow = 0x10; 4024 const long overflow = 0x08; 4025 const long zero_div = 0x04; 4026 const long denorm = 0x02; 4027 const long invalid = 0x01; 4028 fp_control_word |= invalid; 4029 __asm { fldcw fp_control_word } 4030 #endif 4031 } 4032 4033 // If stack_commit_size is 0, windows will reserve the default size, 4034 // but only commit a small portion of it. 4035 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4036 size_t default_reserve_size = os::win32::default_stack_size(); 4037 size_t actual_reserve_size = stack_commit_size; 4038 if (stack_commit_size < default_reserve_size) { 4039 // If stack_commit_size == 0, we want this too 4040 actual_reserve_size = default_reserve_size; 4041 } 4042 4043 // Check minimum allowable stack size for thread creation and to initialize 4044 // the java system classes, including StackOverflowError - depends on page 4045 // size. Add a page for compiler2 recursion in main thread. 4046 // Add in 2*BytesPerWord times page size to account for VM stack during 4047 // class initialization depending on 32 or 64 bit VM. 4048 size_t min_stack_allowed = 4049 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4050 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4051 if (actual_reserve_size < min_stack_allowed) { 4052 tty->print_cr("\nThe stack size specified is too small, " 4053 "Specify at least %dk", 4054 min_stack_allowed / K); 4055 return JNI_ERR; 4056 } 4057 4058 JavaThread::set_stack_size_at_create(stack_commit_size); 4059 4060 // Calculate theoretical max. size of Threads to guard gainst artifical 4061 // out-of-memory situations, where all available address-space has been 4062 // reserved by thread stacks. 4063 assert(actual_reserve_size != 0, "Must have a stack"); 4064 4065 // Calculate the thread limit when we should start doing Virtual Memory 4066 // banging. Currently when the threads will have used all but 200Mb of space. 4067 // 4068 // TODO: consider performing a similar calculation for commit size instead 4069 // as reserve size, since on a 64-bit platform we'll run into that more 4070 // often than running out of virtual memory space. We can use the 4071 // lower value of the two calculations as the os_thread_limit. 4072 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4073 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4074 4075 // at exit methods are called in the reverse order of their registration. 4076 // there is no limit to the number of functions registered. atexit does 4077 // not set errno. 4078 4079 if (PerfAllowAtExitRegistration) { 4080 // only register atexit functions if PerfAllowAtExitRegistration is set. 4081 // atexit functions can be delayed until process exit time, which 4082 // can be problematic for embedded VM situations. Embedded VMs should 4083 // call DestroyJavaVM() to assure that VM resources are released. 4084 4085 // note: perfMemory_exit_helper atexit function may be removed in 4086 // the future if the appropriate cleanup code can be added to the 4087 // VM_Exit VMOperation's doit method. 4088 if (atexit(perfMemory_exit_helper) != 0) { 4089 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4090 } 4091 } 4092 4093 #ifndef _WIN64 4094 // Print something if NX is enabled (win32 on AMD64) 4095 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4096 #endif 4097 4098 // initialize thread priority policy 4099 prio_init(); 4100 4101 if (UseNUMA && !ForceNUMA) { 4102 UseNUMA = false; // We don't fully support this yet 4103 } 4104 4105 if (UseNUMAInterleaving) { 4106 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4107 bool success = numa_interleaving_init(); 4108 if (!success) UseNUMAInterleaving = false; 4109 } 4110 4111 if (initSock() != JNI_OK) { 4112 return JNI_ERR; 4113 } 4114 4115 return JNI_OK; 4116 } 4117 4118 // Mark the polling page as unreadable 4119 void os::make_polling_page_unreadable(void) { 4120 DWORD old_status; 4121 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) 4122 fatal("Could not disable polling page"); 4123 }; 4124 4125 // Mark the polling page as readable 4126 void os::make_polling_page_readable(void) { 4127 DWORD old_status; 4128 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) 4129 fatal("Could not enable polling page"); 4130 }; 4131 4132 4133 int os::stat(const char *path, struct stat *sbuf) { 4134 char pathbuf[MAX_PATH]; 4135 if (strlen(path) > MAX_PATH - 1) { 4136 errno = ENAMETOOLONG; 4137 return -1; 4138 } 4139 os::native_path(strcpy(pathbuf, path)); 4140 int ret = ::stat(pathbuf, sbuf); 4141 if (sbuf != NULL && UseUTCFileTimestamp) { 4142 // Fix for 6539723. st_mtime returned from stat() is dependent on 4143 // the system timezone and so can return different values for the 4144 // same file if/when daylight savings time changes. This adjustment 4145 // makes sure the same timestamp is returned regardless of the TZ. 4146 // 4147 // See: 4148 // http://msdn.microsoft.com/library/ 4149 // default.asp?url=/library/en-us/sysinfo/base/ 4150 // time_zone_information_str.asp 4151 // and 4152 // http://msdn.microsoft.com/library/default.asp?url= 4153 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4154 // 4155 // NOTE: there is a insidious bug here: If the timezone is changed 4156 // after the call to stat() but before 'GetTimeZoneInformation()', then 4157 // the adjustment we do here will be wrong and we'll return the wrong 4158 // value (which will likely end up creating an invalid class data 4159 // archive). Absent a better API for this, or some time zone locking 4160 // mechanism, we'll have to live with this risk. 4161 TIME_ZONE_INFORMATION tz; 4162 DWORD tzid = GetTimeZoneInformation(&tz); 4163 int daylightBias = 4164 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4165 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4166 } 4167 return ret; 4168 } 4169 4170 4171 #define FT2INT64(ft) \ 4172 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4173 4174 4175 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4176 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4177 // of a thread. 4178 // 4179 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4180 // the fast estimate available on the platform. 4181 4182 // current_thread_cpu_time() is not optimized for Windows yet 4183 jlong os::current_thread_cpu_time() { 4184 // return user + sys since the cost is the same 4185 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4186 } 4187 4188 jlong os::thread_cpu_time(Thread* thread) { 4189 // consistent with what current_thread_cpu_time() returns. 4190 return os::thread_cpu_time(thread, true /* user+sys */); 4191 } 4192 4193 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4194 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4195 } 4196 4197 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4198 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4199 // If this function changes, os::is_thread_cpu_time_supported() should too 4200 if (os::win32::is_nt()) { 4201 FILETIME CreationTime; 4202 FILETIME ExitTime; 4203 FILETIME KernelTime; 4204 FILETIME UserTime; 4205 4206 if ( GetThreadTimes(thread->osthread()->thread_handle(), 4207 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4208 return -1; 4209 else 4210 if (user_sys_cpu_time) { 4211 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4212 } else { 4213 return FT2INT64(UserTime) * 100; 4214 } 4215 } else { 4216 return (jlong) timeGetTime() * 1000000; 4217 } 4218 } 4219 4220 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4221 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4222 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4223 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4224 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4225 } 4226 4227 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4228 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4229 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4230 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4231 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4232 } 4233 4234 bool os::is_thread_cpu_time_supported() { 4235 // see os::thread_cpu_time 4236 if (os::win32::is_nt()) { 4237 FILETIME CreationTime; 4238 FILETIME ExitTime; 4239 FILETIME KernelTime; 4240 FILETIME UserTime; 4241 4242 if ( GetThreadTimes(GetCurrentThread(), 4243 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4244 return false; 4245 else 4246 return true; 4247 } else { 4248 return false; 4249 } 4250 } 4251 4252 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4253 // It does have primitives (PDH API) to get CPU usage and run queue length. 4254 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4255 // If we wanted to implement loadavg on Windows, we have a few options: 4256 // 4257 // a) Query CPU usage and run queue length and "fake" an answer by 4258 // returning the CPU usage if it's under 100%, and the run queue 4259 // length otherwise. It turns out that querying is pretty slow 4260 // on Windows, on the order of 200 microseconds on a fast machine. 4261 // Note that on the Windows the CPU usage value is the % usage 4262 // since the last time the API was called (and the first call 4263 // returns 100%), so we'd have to deal with that as well. 4264 // 4265 // b) Sample the "fake" answer using a sampling thread and store 4266 // the answer in a global variable. The call to loadavg would 4267 // just return the value of the global, avoiding the slow query. 4268 // 4269 // c) Sample a better answer using exponential decay to smooth the 4270 // value. This is basically the algorithm used by UNIX kernels. 4271 // 4272 // Note that sampling thread starvation could affect both (b) and (c). 4273 int os::loadavg(double loadavg[], int nelem) { 4274 return -1; 4275 } 4276 4277 4278 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4279 bool os::dont_yield() { 4280 return DontYieldALot; 4281 } 4282 4283 // This method is a slightly reworked copy of JDK's sysOpen 4284 // from src/windows/hpi/src/sys_api_md.c 4285 4286 int os::open(const char *path, int oflag, int mode) { 4287 char pathbuf[MAX_PATH]; 4288 4289 if (strlen(path) > MAX_PATH - 1) { 4290 errno = ENAMETOOLONG; 4291 return -1; 4292 } 4293 os::native_path(strcpy(pathbuf, path)); 4294 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4295 } 4296 4297 FILE* os::open(int fd, const char* mode) { 4298 return ::_fdopen(fd, mode); 4299 } 4300 4301 // Is a (classpath) directory empty? 4302 bool os::dir_is_empty(const char* path) { 4303 WIN32_FIND_DATA fd; 4304 HANDLE f = FindFirstFile(path, &fd); 4305 if (f == INVALID_HANDLE_VALUE) { 4306 return true; 4307 } 4308 FindClose(f); 4309 return false; 4310 } 4311 4312 // create binary file, rewriting existing file if required 4313 int os::create_binary_file(const char* path, bool rewrite_existing) { 4314 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4315 if (!rewrite_existing) { 4316 oflags |= _O_EXCL; 4317 } 4318 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4319 } 4320 4321 // return current position of file pointer 4322 jlong os::current_file_offset(int fd) { 4323 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4324 } 4325 4326 // move file pointer to the specified offset 4327 jlong os::seek_to_file_offset(int fd, jlong offset) { 4328 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4329 } 4330 4331 4332 jlong os::lseek(int fd, jlong offset, int whence) { 4333 return (jlong) ::_lseeki64(fd, offset, whence); 4334 } 4335 4336 // This method is a slightly reworked copy of JDK's sysNativePath 4337 // from src/windows/hpi/src/path_md.c 4338 4339 /* Convert a pathname to native format. On win32, this involves forcing all 4340 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4341 sometimes rejects '/') and removing redundant separators. The input path is 4342 assumed to have been converted into the character encoding used by the local 4343 system. Because this might be a double-byte encoding, care is taken to 4344 treat double-byte lead characters correctly. 4345 4346 This procedure modifies the given path in place, as the result is never 4347 longer than the original. There is no error return; this operation always 4348 succeeds. */ 4349 char * os::native_path(char *path) { 4350 char *src = path, *dst = path, *end = path; 4351 char *colon = NULL; /* If a drive specifier is found, this will 4352 point to the colon following the drive 4353 letter */ 4354 4355 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4356 assert(((!::IsDBCSLeadByte('/')) 4357 && (!::IsDBCSLeadByte('\\')) 4358 && (!::IsDBCSLeadByte(':'))), 4359 "Illegal lead byte"); 4360 4361 /* Check for leading separators */ 4362 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4363 while (isfilesep(*src)) { 4364 src++; 4365 } 4366 4367 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4368 /* Remove leading separators if followed by drive specifier. This 4369 hack is necessary to support file URLs containing drive 4370 specifiers (e.g., "file://c:/path"). As a side effect, 4371 "/c:/path" can be used as an alternative to "c:/path". */ 4372 *dst++ = *src++; 4373 colon = dst; 4374 *dst++ = ':'; 4375 src++; 4376 } else { 4377 src = path; 4378 if (isfilesep(src[0]) && isfilesep(src[1])) { 4379 /* UNC pathname: Retain first separator; leave src pointed at 4380 second separator so that further separators will be collapsed 4381 into the second separator. The result will be a pathname 4382 beginning with "\\\\" followed (most likely) by a host name. */ 4383 src = dst = path + 1; 4384 path[0] = '\\'; /* Force first separator to '\\' */ 4385 } 4386 } 4387 4388 end = dst; 4389 4390 /* Remove redundant separators from remainder of path, forcing all 4391 separators to be '\\' rather than '/'. Also, single byte space 4392 characters are removed from the end of the path because those 4393 are not legal ending characters on this operating system. 4394 */ 4395 while (*src != '\0') { 4396 if (isfilesep(*src)) { 4397 *dst++ = '\\'; src++; 4398 while (isfilesep(*src)) src++; 4399 if (*src == '\0') { 4400 /* Check for trailing separator */ 4401 end = dst; 4402 if (colon == dst - 2) break; /* "z:\\" */ 4403 if (dst == path + 1) break; /* "\\" */ 4404 if (dst == path + 2 && isfilesep(path[0])) { 4405 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4406 beginning of a UNC pathname. Even though it is not, by 4407 itself, a valid UNC pathname, we leave it as is in order 4408 to be consistent with the path canonicalizer as well 4409 as the win32 APIs, which treat this case as an invalid 4410 UNC pathname rather than as an alias for the root 4411 directory of the current drive. */ 4412 break; 4413 } 4414 end = --dst; /* Path does not denote a root directory, so 4415 remove trailing separator */ 4416 break; 4417 } 4418 end = dst; 4419 } else { 4420 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4421 *dst++ = *src++; 4422 if (*src) *dst++ = *src++; 4423 end = dst; 4424 } else { /* Copy a single-byte character */ 4425 char c = *src++; 4426 *dst++ = c; 4427 /* Space is not a legal ending character */ 4428 if (c != ' ') end = dst; 4429 } 4430 } 4431 } 4432 4433 *end = '\0'; 4434 4435 /* For "z:", add "." to work around a bug in the C runtime library */ 4436 if (colon == dst - 1) { 4437 path[2] = '.'; 4438 path[3] = '\0'; 4439 } 4440 4441 return path; 4442 } 4443 4444 // This code is a copy of JDK's sysSetLength 4445 // from src/windows/hpi/src/sys_api_md.c 4446 4447 int os::ftruncate(int fd, jlong length) { 4448 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4449 long high = (long)(length >> 32); 4450 DWORD ret; 4451 4452 if (h == (HANDLE)(-1)) { 4453 return -1; 4454 } 4455 4456 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4457 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4458 return -1; 4459 } 4460 4461 if (::SetEndOfFile(h) == FALSE) { 4462 return -1; 4463 } 4464 4465 return 0; 4466 } 4467 4468 4469 // This code is a copy of JDK's sysSync 4470 // from src/windows/hpi/src/sys_api_md.c 4471 // except for the legacy workaround for a bug in Win 98 4472 4473 int os::fsync(int fd) { 4474 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4475 4476 if ( (!::FlushFileBuffers(handle)) && 4477 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4478 /* from winerror.h */ 4479 return -1; 4480 } 4481 return 0; 4482 } 4483 4484 static int nonSeekAvailable(int, long *); 4485 static int stdinAvailable(int, long *); 4486 4487 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4488 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4489 4490 // This code is a copy of JDK's sysAvailable 4491 // from src/windows/hpi/src/sys_api_md.c 4492 4493 int os::available(int fd, jlong *bytes) { 4494 jlong cur, end; 4495 struct _stati64 stbuf64; 4496 4497 if (::_fstati64(fd, &stbuf64) >= 0) { 4498 int mode = stbuf64.st_mode; 4499 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4500 int ret; 4501 long lpbytes; 4502 if (fd == 0) { 4503 ret = stdinAvailable(fd, &lpbytes); 4504 } else { 4505 ret = nonSeekAvailable(fd, &lpbytes); 4506 } 4507 (*bytes) = (jlong)(lpbytes); 4508 return ret; 4509 } 4510 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4511 return FALSE; 4512 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4513 return FALSE; 4514 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4515 return FALSE; 4516 } 4517 *bytes = end - cur; 4518 return TRUE; 4519 } else { 4520 return FALSE; 4521 } 4522 } 4523 4524 // This code is a copy of JDK's nonSeekAvailable 4525 // from src/windows/hpi/src/sys_api_md.c 4526 4527 static int nonSeekAvailable(int fd, long *pbytes) { 4528 /* This is used for available on non-seekable devices 4529 * (like both named and anonymous pipes, such as pipes 4530 * connected to an exec'd process). 4531 * Standard Input is a special case. 4532 * 4533 */ 4534 HANDLE han; 4535 4536 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4537 return FALSE; 4538 } 4539 4540 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4541 /* PeekNamedPipe fails when at EOF. In that case we 4542 * simply make *pbytes = 0 which is consistent with the 4543 * behavior we get on Solaris when an fd is at EOF. 4544 * The only alternative is to raise an Exception, 4545 * which isn't really warranted. 4546 */ 4547 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4548 return FALSE; 4549 } 4550 *pbytes = 0; 4551 } 4552 return TRUE; 4553 } 4554 4555 #define MAX_INPUT_EVENTS 2000 4556 4557 // This code is a copy of JDK's stdinAvailable 4558 // from src/windows/hpi/src/sys_api_md.c 4559 4560 static int stdinAvailable(int fd, long *pbytes) { 4561 HANDLE han; 4562 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4563 DWORD numEvents = 0; /* Number of events in buffer */ 4564 DWORD i = 0; /* Loop index */ 4565 DWORD curLength = 0; /* Position marker */ 4566 DWORD actualLength = 0; /* Number of bytes readable */ 4567 BOOL error = FALSE; /* Error holder */ 4568 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4569 4570 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4571 return FALSE; 4572 } 4573 4574 /* Construct an array of input records in the console buffer */ 4575 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4576 if (error == 0) { 4577 return nonSeekAvailable(fd, pbytes); 4578 } 4579 4580 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4581 if (numEvents > MAX_INPUT_EVENTS) { 4582 numEvents = MAX_INPUT_EVENTS; 4583 } 4584 4585 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4586 if (lpBuffer == NULL) { 4587 return FALSE; 4588 } 4589 4590 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4591 if (error == 0) { 4592 os::free(lpBuffer, mtInternal); 4593 return FALSE; 4594 } 4595 4596 /* Examine input records for the number of bytes available */ 4597 for(i=0; i<numEvents; i++) { 4598 if (lpBuffer[i].EventType == KEY_EVENT) { 4599 4600 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4601 &(lpBuffer[i].Event); 4602 if (keyRecord->bKeyDown == TRUE) { 4603 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4604 curLength++; 4605 if (*keyPressed == '\r') { 4606 actualLength = curLength; 4607 } 4608 } 4609 } 4610 } 4611 4612 if(lpBuffer != NULL) { 4613 os::free(lpBuffer, mtInternal); 4614 } 4615 4616 *pbytes = (long) actualLength; 4617 return TRUE; 4618 } 4619 4620 // Map a block of memory. 4621 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4622 char *addr, size_t bytes, bool read_only, 4623 bool allow_exec) { 4624 HANDLE hFile; 4625 char* base; 4626 4627 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4628 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4629 if (hFile == NULL) { 4630 if (PrintMiscellaneous && Verbose) { 4631 DWORD err = GetLastError(); 4632 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4633 } 4634 return NULL; 4635 } 4636 4637 if (allow_exec) { 4638 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4639 // unless it comes from a PE image (which the shared archive is not.) 4640 // Even VirtualProtect refuses to give execute access to mapped memory 4641 // that was not previously executable. 4642 // 4643 // Instead, stick the executable region in anonymous memory. Yuck. 4644 // Penalty is that ~4 pages will not be shareable - in the future 4645 // we might consider DLLizing the shared archive with a proper PE 4646 // header so that mapping executable + sharing is possible. 4647 4648 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4649 PAGE_READWRITE); 4650 if (base == NULL) { 4651 if (PrintMiscellaneous && Verbose) { 4652 DWORD err = GetLastError(); 4653 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4654 } 4655 CloseHandle(hFile); 4656 return NULL; 4657 } 4658 4659 DWORD bytes_read; 4660 OVERLAPPED overlapped; 4661 overlapped.Offset = (DWORD)file_offset; 4662 overlapped.OffsetHigh = 0; 4663 overlapped.hEvent = NULL; 4664 // ReadFile guarantees that if the return value is true, the requested 4665 // number of bytes were read before returning. 4666 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4667 if (!res) { 4668 if (PrintMiscellaneous && Verbose) { 4669 DWORD err = GetLastError(); 4670 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4671 } 4672 release_memory(base, bytes); 4673 CloseHandle(hFile); 4674 return NULL; 4675 } 4676 } else { 4677 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4678 NULL /*file_name*/); 4679 if (hMap == NULL) { 4680 if (PrintMiscellaneous && Verbose) { 4681 DWORD err = GetLastError(); 4682 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4683 } 4684 CloseHandle(hFile); 4685 return NULL; 4686 } 4687 4688 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4689 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4690 (DWORD)bytes, addr); 4691 if (base == NULL) { 4692 if (PrintMiscellaneous && Verbose) { 4693 DWORD err = GetLastError(); 4694 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4695 } 4696 CloseHandle(hMap); 4697 CloseHandle(hFile); 4698 return NULL; 4699 } 4700 4701 if (CloseHandle(hMap) == 0) { 4702 if (PrintMiscellaneous && Verbose) { 4703 DWORD err = GetLastError(); 4704 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4705 } 4706 CloseHandle(hFile); 4707 return base; 4708 } 4709 } 4710 4711 if (allow_exec) { 4712 DWORD old_protect; 4713 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4714 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4715 4716 if (!res) { 4717 if (PrintMiscellaneous && Verbose) { 4718 DWORD err = GetLastError(); 4719 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4720 } 4721 // Don't consider this a hard error, on IA32 even if the 4722 // VirtualProtect fails, we should still be able to execute 4723 CloseHandle(hFile); 4724 return base; 4725 } 4726 } 4727 4728 if (CloseHandle(hFile) == 0) { 4729 if (PrintMiscellaneous && Verbose) { 4730 DWORD err = GetLastError(); 4731 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4732 } 4733 return base; 4734 } 4735 4736 return base; 4737 } 4738 4739 4740 // Remap a block of memory. 4741 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4742 char *addr, size_t bytes, bool read_only, 4743 bool allow_exec) { 4744 // This OS does not allow existing memory maps to be remapped so we 4745 // have to unmap the memory before we remap it. 4746 if (!os::unmap_memory(addr, bytes)) { 4747 return NULL; 4748 } 4749 4750 // There is a very small theoretical window between the unmap_memory() 4751 // call above and the map_memory() call below where a thread in native 4752 // code may be able to access an address that is no longer mapped. 4753 4754 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4755 read_only, allow_exec); 4756 } 4757 4758 4759 // Unmap a block of memory. 4760 // Returns true=success, otherwise false. 4761 4762 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4763 BOOL result = UnmapViewOfFile(addr); 4764 if (result == 0) { 4765 if (PrintMiscellaneous && Verbose) { 4766 DWORD err = GetLastError(); 4767 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4768 } 4769 return false; 4770 } 4771 return true; 4772 } 4773 4774 void os::pause() { 4775 char filename[MAX_PATH]; 4776 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4777 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4778 } else { 4779 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4780 } 4781 4782 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4783 if (fd != -1) { 4784 struct stat buf; 4785 ::close(fd); 4786 while (::stat(filename, &buf) == 0) { 4787 Sleep(100); 4788 } 4789 } else { 4790 jio_fprintf(stderr, 4791 "Could not open pause file '%s', continuing immediately.\n", filename); 4792 } 4793 } 4794 4795 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4796 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4797 } 4798 4799 /* 4800 * See the caveats for this class in os_windows.hpp 4801 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4802 * into this method and returns false. If no OS EXCEPTION was raised, returns 4803 * true. 4804 * The callback is supposed to provide the method that should be protected. 4805 */ 4806 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4807 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4808 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4809 "crash_protection already set?"); 4810 4811 bool success = true; 4812 __try { 4813 WatcherThread::watcher_thread()->set_crash_protection(this); 4814 cb.call(); 4815 } __except(EXCEPTION_EXECUTE_HANDLER) { 4816 // only for protection, nothing to do 4817 success = false; 4818 } 4819 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4820 return success; 4821 } 4822 4823 // An Event wraps a win32 "CreateEvent" kernel handle. 4824 // 4825 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4826 // 4827 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4828 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4829 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4830 // In addition, an unpark() operation might fetch the handle field, but the 4831 // event could recycle between the fetch and the SetEvent() operation. 4832 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4833 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4834 // on an stale but recycled handle would be harmless, but in practice this might 4835 // confuse other non-Sun code, so it's not a viable approach. 4836 // 4837 // 2: Once a win32 event handle is associated with an Event, it remains associated 4838 // with the Event. The event handle is never closed. This could be construed 4839 // as handle leakage, but only up to the maximum # of threads that have been extant 4840 // at any one time. This shouldn't be an issue, as windows platforms typically 4841 // permit a process to have hundreds of thousands of open handles. 4842 // 4843 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4844 // and release unused handles. 4845 // 4846 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4847 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4848 // 4849 // 5. Use an RCU-like mechanism (Read-Copy Update). 4850 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4851 // 4852 // We use (2). 4853 // 4854 // TODO-FIXME: 4855 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4856 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4857 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4858 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4859 // into a single win32 CreateEvent() handle. 4860 // 4861 // _Event transitions in park() 4862 // -1 => -1 : illegal 4863 // 1 => 0 : pass - return immediately 4864 // 0 => -1 : block 4865 // 4866 // _Event serves as a restricted-range semaphore : 4867 // -1 : thread is blocked 4868 // 0 : neutral - thread is running or ready 4869 // 1 : signaled - thread is running or ready 4870 // 4871 // Another possible encoding of _Event would be 4872 // with explicit "PARKED" and "SIGNALED" bits. 4873 4874 int os::PlatformEvent::park (jlong Millis) { 4875 guarantee (_ParkHandle != NULL , "Invariant") ; 4876 guarantee (Millis > 0 , "Invariant") ; 4877 int v ; 4878 4879 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4880 // the initial park() operation. 4881 4882 for (;;) { 4883 v = _Event ; 4884 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4885 } 4886 guarantee ((v == 0) || (v == 1), "invariant") ; 4887 if (v != 0) return OS_OK ; 4888 4889 // Do this the hard way by blocking ... 4890 // TODO: consider a brief spin here, gated on the success of recent 4891 // spin attempts by this thread. 4892 // 4893 // We decompose long timeouts into series of shorter timed waits. 4894 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4895 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4896 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4897 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4898 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4899 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4900 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4901 // for the already waited time. This policy does not admit any new outcomes. 4902 // In the future, however, we might want to track the accumulated wait time and 4903 // adjust Millis accordingly if we encounter a spurious wakeup. 4904 4905 const int MAXTIMEOUT = 0x10000000 ; 4906 DWORD rv = WAIT_TIMEOUT ; 4907 while (_Event < 0 && Millis > 0) { 4908 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) 4909 if (Millis > MAXTIMEOUT) { 4910 prd = MAXTIMEOUT ; 4911 } 4912 rv = ::WaitForSingleObject (_ParkHandle, prd) ; 4913 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; 4914 if (rv == WAIT_TIMEOUT) { 4915 Millis -= prd ; 4916 } 4917 } 4918 v = _Event ; 4919 _Event = 0 ; 4920 // see comment at end of os::PlatformEvent::park() below: 4921 OrderAccess::fence() ; 4922 // If we encounter a nearly simultanous timeout expiry and unpark() 4923 // we return OS_OK indicating we awoke via unpark(). 4924 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4925 return (v >= 0) ? OS_OK : OS_TIMEOUT ; 4926 } 4927 4928 void os::PlatformEvent::park () { 4929 guarantee (_ParkHandle != NULL, "Invariant") ; 4930 // Invariant: Only the thread associated with the Event/PlatformEvent 4931 // may call park(). 4932 int v ; 4933 for (;;) { 4934 v = _Event ; 4935 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4936 } 4937 guarantee ((v == 0) || (v == 1), "invariant") ; 4938 if (v != 0) return ; 4939 4940 // Do this the hard way by blocking ... 4941 // TODO: consider a brief spin here, gated on the success of recent 4942 // spin attempts by this thread. 4943 while (_Event < 0) { 4944 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; 4945 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; 4946 } 4947 4948 // Usually we'll find _Event == 0 at this point, but as 4949 // an optional optimization we clear it, just in case can 4950 // multiple unpark() operations drove _Event up to 1. 4951 _Event = 0 ; 4952 OrderAccess::fence() ; 4953 guarantee (_Event >= 0, "invariant") ; 4954 } 4955 4956 void os::PlatformEvent::unpark() { 4957 guarantee (_ParkHandle != NULL, "Invariant") ; 4958 4959 // Transitions for _Event: 4960 // 0 :=> 1 4961 // 1 :=> 1 4962 // -1 :=> either 0 or 1; must signal target thread 4963 // That is, we can safely transition _Event from -1 to either 4964 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 4965 // unpark() calls. 4966 // See also: "Semaphores in Plan 9" by Mullender & Cox 4967 // 4968 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4969 // that it will take two back-to-back park() calls for the owning 4970 // thread to block. This has the benefit of forcing a spurious return 4971 // from the first park() call after an unpark() call which will help 4972 // shake out uses of park() and unpark() without condition variables. 4973 4974 if (Atomic::xchg(1, &_Event) >= 0) return; 4975 4976 ::SetEvent(_ParkHandle); 4977 } 4978 4979 4980 // JSR166 4981 // ------------------------------------------------------- 4982 4983 /* 4984 * The Windows implementation of Park is very straightforward: Basic 4985 * operations on Win32 Events turn out to have the right semantics to 4986 * use them directly. We opportunistically resuse the event inherited 4987 * from Monitor. 4988 */ 4989 4990 4991 void Parker::park(bool isAbsolute, jlong time) { 4992 guarantee (_ParkEvent != NULL, "invariant") ; 4993 // First, demultiplex/decode time arguments 4994 if (time < 0) { // don't wait 4995 return; 4996 } 4997 else if (time == 0 && !isAbsolute) { 4998 time = INFINITE; 4999 } 5000 else if (isAbsolute) { 5001 time -= os::javaTimeMillis(); // convert to relative time 5002 if (time <= 0) // already elapsed 5003 return; 5004 } 5005 else { // relative 5006 time /= 1000000; // Must coarsen from nanos to millis 5007 if (time == 0) // Wait for the minimal time unit if zero 5008 time = 1; 5009 } 5010 5011 JavaThread* thread = (JavaThread*)(Thread::current()); 5012 assert(thread->is_Java_thread(), "Must be JavaThread"); 5013 JavaThread *jt = (JavaThread *)thread; 5014 5015 // Don't wait if interrupted or already triggered 5016 if (Thread::is_interrupted(thread, false) || 5017 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5018 ResetEvent(_ParkEvent); 5019 return; 5020 } 5021 else { 5022 ThreadBlockInVM tbivm(jt); 5023 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5024 jt->set_suspend_equivalent(); 5025 5026 WaitForSingleObject(_ParkEvent, time); 5027 ResetEvent(_ParkEvent); 5028 5029 // If externally suspended while waiting, re-suspend 5030 if (jt->handle_special_suspend_equivalent_condition()) { 5031 jt->java_suspend_self(); 5032 } 5033 } 5034 } 5035 5036 void Parker::unpark() { 5037 guarantee (_ParkEvent != NULL, "invariant") ; 5038 SetEvent(_ParkEvent); 5039 } 5040 5041 // Run the specified command in a separate process. Return its exit value, 5042 // or -1 on failure (e.g. can't create a new process). 5043 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { 5044 STARTUPINFO si; 5045 PROCESS_INFORMATION pi; 5046 5047 memset(&si, 0, sizeof(si)); 5048 si.cb = sizeof(si); 5049 memset(&pi, 0, sizeof(pi)); 5050 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5051 cmd, // command line 5052 NULL, // process security attribute 5053 NULL, // thread security attribute 5054 TRUE, // inherits system handles 5055 0, // no creation flags 5056 NULL, // use parent's environment block 5057 NULL, // use parent's starting directory 5058 &si, // (in) startup information 5059 &pi); // (out) process information 5060 5061 if (rslt) { 5062 // Wait until child process exits. 5063 WaitForSingleObject(pi.hProcess, INFINITE); 5064 5065 DWORD exit_code; 5066 GetExitCodeProcess(pi.hProcess, &exit_code); 5067 5068 // Close process and thread handles. 5069 CloseHandle(pi.hProcess); 5070 CloseHandle(pi.hThread); 5071 5072 return (int)exit_code; 5073 } else { 5074 return -1; 5075 } 5076 } 5077 5078 //-------------------------------------------------------------------------------------------------- 5079 // Non-product code 5080 5081 static int mallocDebugIntervalCounter = 0; 5082 static int mallocDebugCounter = 0; 5083 bool os::check_heap(bool force) { 5084 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5085 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5086 // Note: HeapValidate executes two hardware breakpoints when it finds something 5087 // wrong; at these points, eax contains the address of the offending block (I think). 5088 // To get to the exlicit error message(s) below, just continue twice. 5089 HANDLE heap = GetProcessHeap(); 5090 { HeapLock(heap); 5091 PROCESS_HEAP_ENTRY phe; 5092 phe.lpData = NULL; 5093 while (HeapWalk(heap, &phe) != 0) { 5094 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5095 !HeapValidate(heap, 0, phe.lpData)) { 5096 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5097 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5098 fatal("corrupted C heap"); 5099 } 5100 } 5101 DWORD err = GetLastError(); 5102 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5103 fatal(err_msg("heap walk aborted with error %d", err)); 5104 } 5105 HeapUnlock(heap); 5106 } 5107 mallocDebugIntervalCounter = 0; 5108 } 5109 return true; 5110 } 5111 5112 5113 bool os::find(address addr, outputStream* st) { 5114 // Nothing yet 5115 return false; 5116 } 5117 5118 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5119 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5120 5121 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 5122 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5123 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5124 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5125 5126 if (os::is_memory_serialize_page(thread, addr)) 5127 return EXCEPTION_CONTINUE_EXECUTION; 5128 } 5129 5130 return EXCEPTION_CONTINUE_SEARCH; 5131 } 5132 5133 // We don't build a headless jre for Windows 5134 bool os::is_headless_jre() { return false; } 5135 5136 static jint initSock() { 5137 WSADATA wsadata; 5138 5139 if (!os::WinSock2Dll::WinSock2Available()) { 5140 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5141 ::GetLastError()); 5142 return JNI_ERR; 5143 } 5144 5145 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5146 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5147 ::GetLastError()); 5148 return JNI_ERR; 5149 } 5150 return JNI_OK; 5151 } 5152 5153 struct hostent* os::get_host_by_name(char* name) { 5154 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5155 } 5156 5157 int os::socket_close(int fd) { 5158 return ::closesocket(fd); 5159 } 5160 5161 int os::socket_available(int fd, jint *pbytes) { 5162 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5163 return (ret < 0) ? 0 : 1; 5164 } 5165 5166 int os::socket(int domain, int type, int protocol) { 5167 return ::socket(domain, type, protocol); 5168 } 5169 5170 int os::listen(int fd, int count) { 5171 return ::listen(fd, count); 5172 } 5173 5174 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5175 return ::connect(fd, him, len); 5176 } 5177 5178 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5179 return ::accept(fd, him, len); 5180 } 5181 5182 int os::sendto(int fd, char* buf, size_t len, uint flags, 5183 struct sockaddr* to, socklen_t tolen) { 5184 5185 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5186 } 5187 5188 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5189 sockaddr* from, socklen_t* fromlen) { 5190 5191 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5192 } 5193 5194 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5195 return ::recv(fd, buf, (int)nBytes, flags); 5196 } 5197 5198 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5199 return ::send(fd, buf, (int)nBytes, flags); 5200 } 5201 5202 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5203 return ::send(fd, buf, (int)nBytes, flags); 5204 } 5205 5206 int os::timeout(int fd, long timeout) { 5207 fd_set tbl; 5208 struct timeval t; 5209 5210 t.tv_sec = timeout / 1000; 5211 t.tv_usec = (timeout % 1000) * 1000; 5212 5213 tbl.fd_count = 1; 5214 tbl.fd_array[0] = fd; 5215 5216 return ::select(1, &tbl, 0, 0, &t); 5217 } 5218 5219 int os::get_host_name(char* name, int namelen) { 5220 return ::gethostname(name, namelen); 5221 } 5222 5223 int os::socket_shutdown(int fd, int howto) { 5224 return ::shutdown(fd, howto); 5225 } 5226 5227 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5228 return ::bind(fd, him, len); 5229 } 5230 5231 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5232 return ::getsockname(fd, him, len); 5233 } 5234 5235 int os::get_sock_opt(int fd, int level, int optname, 5236 char* optval, socklen_t* optlen) { 5237 return ::getsockopt(fd, level, optname, optval, optlen); 5238 } 5239 5240 int os::set_sock_opt(int fd, int level, int optname, 5241 const char* optval, socklen_t optlen) { 5242 return ::setsockopt(fd, level, optname, optval, optlen); 5243 } 5244 5245 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5246 #if defined(IA32) 5247 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5248 #elif defined (AMD64) 5249 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5250 #endif 5251 5252 // returns true if thread could be suspended, 5253 // false otherwise 5254 static bool do_suspend(HANDLE* h) { 5255 if (h != NULL) { 5256 if (SuspendThread(*h) != ~0) { 5257 return true; 5258 } 5259 } 5260 return false; 5261 } 5262 5263 // resume the thread 5264 // calling resume on an active thread is a no-op 5265 static void do_resume(HANDLE* h) { 5266 if (h != NULL) { 5267 ResumeThread(*h); 5268 } 5269 } 5270 5271 // retrieve a suspend/resume context capable handle 5272 // from the tid. Caller validates handle return value. 5273 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5274 if (h != NULL) { 5275 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5276 } 5277 } 5278 5279 // 5280 // Thread sampling implementation 5281 // 5282 void os::SuspendedThreadTask::internal_do_task() { 5283 CONTEXT ctxt; 5284 HANDLE h = NULL; 5285 5286 // get context capable handle for thread 5287 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5288 5289 // sanity 5290 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5291 return; 5292 } 5293 5294 // suspend the thread 5295 if (do_suspend(&h)) { 5296 ctxt.ContextFlags = sampling_context_flags; 5297 // get thread context 5298 GetThreadContext(h, &ctxt); 5299 SuspendedThreadTaskContext context(_thread, &ctxt); 5300 // pass context to Thread Sampling impl 5301 do_task(context); 5302 // resume thread 5303 do_resume(&h); 5304 } 5305 5306 // close handle 5307 CloseHandle(h); 5308 } 5309 5310 5311 // Kernel32 API 5312 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5313 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5314 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5315 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5316 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5317 5318 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5319 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5320 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5321 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5322 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5323 5324 5325 BOOL os::Kernel32Dll::initialized = FALSE; 5326 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5327 assert(initialized && _GetLargePageMinimum != NULL, 5328 "GetLargePageMinimumAvailable() not yet called"); 5329 return _GetLargePageMinimum(); 5330 } 5331 5332 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5333 if (!initialized) { 5334 initialize(); 5335 } 5336 return _GetLargePageMinimum != NULL; 5337 } 5338 5339 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5340 if (!initialized) { 5341 initialize(); 5342 } 5343 return _VirtualAllocExNuma != NULL; 5344 } 5345 5346 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5347 assert(initialized && _VirtualAllocExNuma != NULL, 5348 "NUMACallsAvailable() not yet called"); 5349 5350 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5351 } 5352 5353 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5354 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5355 "NUMACallsAvailable() not yet called"); 5356 5357 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5358 } 5359 5360 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5361 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5362 "NUMACallsAvailable() not yet called"); 5363 5364 return _GetNumaNodeProcessorMask(node, proc_mask); 5365 } 5366 5367 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5368 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5369 if (!initialized) { 5370 initialize(); 5371 } 5372 5373 if (_RtlCaptureStackBackTrace != NULL) { 5374 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5375 BackTrace, BackTraceHash); 5376 } else { 5377 return 0; 5378 } 5379 } 5380 5381 void os::Kernel32Dll::initializeCommon() { 5382 if (!initialized) { 5383 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5384 assert(handle != NULL, "Just check"); 5385 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5386 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5387 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5388 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5389 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5390 initialized = TRUE; 5391 } 5392 } 5393 5394 5395 5396 #ifndef JDK6_OR_EARLIER 5397 5398 void os::Kernel32Dll::initialize() { 5399 initializeCommon(); 5400 } 5401 5402 5403 // Kernel32 API 5404 inline BOOL os::Kernel32Dll::SwitchToThread() { 5405 return ::SwitchToThread(); 5406 } 5407 5408 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5409 return true; 5410 } 5411 5412 // Help tools 5413 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5414 return true; 5415 } 5416 5417 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5418 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5419 } 5420 5421 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5422 return ::Module32First(hSnapshot, lpme); 5423 } 5424 5425 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5426 return ::Module32Next(hSnapshot, lpme); 5427 } 5428 5429 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5430 ::GetNativeSystemInfo(lpSystemInfo); 5431 } 5432 5433 // PSAPI API 5434 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5435 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5436 } 5437 5438 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5439 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5440 } 5441 5442 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5443 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5444 } 5445 5446 inline BOOL os::PSApiDll::PSApiAvailable() { 5447 return true; 5448 } 5449 5450 5451 // WinSock2 API 5452 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5453 return ::WSAStartup(wVersionRequested, lpWSAData); 5454 } 5455 5456 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5457 return ::gethostbyname(name); 5458 } 5459 5460 inline BOOL os::WinSock2Dll::WinSock2Available() { 5461 return true; 5462 } 5463 5464 // Advapi API 5465 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5466 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5467 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5468 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5469 BufferLength, PreviousState, ReturnLength); 5470 } 5471 5472 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5473 PHANDLE TokenHandle) { 5474 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5475 } 5476 5477 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5478 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5479 } 5480 5481 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5482 return true; 5483 } 5484 5485 void* os::get_default_process_handle() { 5486 return (void*)GetModuleHandle(NULL); 5487 } 5488 5489 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5490 // which is used to find statically linked in agents. 5491 // Additionally for windows, takes into account __stdcall names. 5492 // Parameters: 5493 // sym_name: Symbol in library we are looking for 5494 // lib_name: Name of library to look in, NULL for shared libs. 5495 // is_absolute_path == true if lib_name is absolute path to agent 5496 // such as "C:/a/b/L.dll" 5497 // == false if only the base name of the library is passed in 5498 // such as "L" 5499 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5500 bool is_absolute_path) { 5501 char *agent_entry_name; 5502 size_t len; 5503 size_t name_len; 5504 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5505 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5506 const char *start; 5507 5508 if (lib_name != NULL) { 5509 len = name_len = strlen(lib_name); 5510 if (is_absolute_path) { 5511 // Need to strip path, prefix and suffix 5512 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5513 lib_name = ++start; 5514 } else { 5515 // Need to check for drive prefix 5516 if ((start = strchr(lib_name, ':')) != NULL) { 5517 lib_name = ++start; 5518 } 5519 } 5520 if (len <= (prefix_len + suffix_len)) { 5521 return NULL; 5522 } 5523 lib_name += prefix_len; 5524 name_len = strlen(lib_name) - suffix_len; 5525 } 5526 } 5527 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5528 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5529 if (agent_entry_name == NULL) { 5530 return NULL; 5531 } 5532 if (lib_name != NULL) { 5533 const char *p = strrchr(sym_name, '@'); 5534 if (p != NULL && p != sym_name) { 5535 // sym_name == _Agent_OnLoad@XX 5536 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5537 agent_entry_name[(p-sym_name)] = '\0'; 5538 // agent_entry_name == _Agent_OnLoad 5539 strcat(agent_entry_name, "_"); 5540 strncat(agent_entry_name, lib_name, name_len); 5541 strcat(agent_entry_name, p); 5542 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5543 } else { 5544 strcpy(agent_entry_name, sym_name); 5545 strcat(agent_entry_name, "_"); 5546 strncat(agent_entry_name, lib_name, name_len); 5547 } 5548 } else { 5549 strcpy(agent_entry_name, sym_name); 5550 } 5551 return agent_entry_name; 5552 } 5553 5554 #else 5555 // Kernel32 API 5556 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5557 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5558 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5559 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5560 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5561 5562 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5563 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5564 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5565 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5566 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5567 5568 void os::Kernel32Dll::initialize() { 5569 if (!initialized) { 5570 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5571 assert(handle != NULL, "Just check"); 5572 5573 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5574 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5575 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5576 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5577 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5578 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5579 initializeCommon(); // resolve the functions that always need resolving 5580 5581 initialized = TRUE; 5582 } 5583 } 5584 5585 BOOL os::Kernel32Dll::SwitchToThread() { 5586 assert(initialized && _SwitchToThread != NULL, 5587 "SwitchToThreadAvailable() not yet called"); 5588 return _SwitchToThread(); 5589 } 5590 5591 5592 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5593 if (!initialized) { 5594 initialize(); 5595 } 5596 return _SwitchToThread != NULL; 5597 } 5598 5599 // Help tools 5600 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5601 if (!initialized) { 5602 initialize(); 5603 } 5604 return _CreateToolhelp32Snapshot != NULL && 5605 _Module32First != NULL && 5606 _Module32Next != NULL; 5607 } 5608 5609 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5610 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5611 "HelpToolsAvailable() not yet called"); 5612 5613 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5614 } 5615 5616 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5617 assert(initialized && _Module32First != NULL, 5618 "HelpToolsAvailable() not yet called"); 5619 5620 return _Module32First(hSnapshot, lpme); 5621 } 5622 5623 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5624 assert(initialized && _Module32Next != NULL, 5625 "HelpToolsAvailable() not yet called"); 5626 5627 return _Module32Next(hSnapshot, lpme); 5628 } 5629 5630 5631 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5632 if (!initialized) { 5633 initialize(); 5634 } 5635 return _GetNativeSystemInfo != NULL; 5636 } 5637 5638 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5639 assert(initialized && _GetNativeSystemInfo != NULL, 5640 "GetNativeSystemInfoAvailable() not yet called"); 5641 5642 _GetNativeSystemInfo(lpSystemInfo); 5643 } 5644 5645 // PSAPI API 5646 5647 5648 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5649 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5650 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5651 5652 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5653 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5654 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5655 BOOL os::PSApiDll::initialized = FALSE; 5656 5657 void os::PSApiDll::initialize() { 5658 if (!initialized) { 5659 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5660 if (handle != NULL) { 5661 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5662 "EnumProcessModules"); 5663 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5664 "GetModuleFileNameExA"); 5665 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5666 "GetModuleInformation"); 5667 } 5668 initialized = TRUE; 5669 } 5670 } 5671 5672 5673 5674 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5675 assert(initialized && _EnumProcessModules != NULL, 5676 "PSApiAvailable() not yet called"); 5677 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5678 } 5679 5680 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5681 assert(initialized && _GetModuleFileNameEx != NULL, 5682 "PSApiAvailable() not yet called"); 5683 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5684 } 5685 5686 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5687 assert(initialized && _GetModuleInformation != NULL, 5688 "PSApiAvailable() not yet called"); 5689 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5690 } 5691 5692 BOOL os::PSApiDll::PSApiAvailable() { 5693 if (!initialized) { 5694 initialize(); 5695 } 5696 return _EnumProcessModules != NULL && 5697 _GetModuleFileNameEx != NULL && 5698 _GetModuleInformation != NULL; 5699 } 5700 5701 5702 // WinSock2 API 5703 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5704 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5705 5706 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5707 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5708 BOOL os::WinSock2Dll::initialized = FALSE; 5709 5710 void os::WinSock2Dll::initialize() { 5711 if (!initialized) { 5712 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5713 if (handle != NULL) { 5714 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5715 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5716 } 5717 initialized = TRUE; 5718 } 5719 } 5720 5721 5722 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5723 assert(initialized && _WSAStartup != NULL, 5724 "WinSock2Available() not yet called"); 5725 return _WSAStartup(wVersionRequested, lpWSAData); 5726 } 5727 5728 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5729 assert(initialized && _gethostbyname != NULL, 5730 "WinSock2Available() not yet called"); 5731 return _gethostbyname(name); 5732 } 5733 5734 BOOL os::WinSock2Dll::WinSock2Available() { 5735 if (!initialized) { 5736 initialize(); 5737 } 5738 return _WSAStartup != NULL && 5739 _gethostbyname != NULL; 5740 } 5741 5742 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5743 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5744 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5745 5746 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5747 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5748 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5749 BOOL os::Advapi32Dll::initialized = FALSE; 5750 5751 void os::Advapi32Dll::initialize() { 5752 if (!initialized) { 5753 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5754 if (handle != NULL) { 5755 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5756 "AdjustTokenPrivileges"); 5757 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5758 "OpenProcessToken"); 5759 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5760 "LookupPrivilegeValueA"); 5761 } 5762 initialized = TRUE; 5763 } 5764 } 5765 5766 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5767 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5768 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5769 assert(initialized && _AdjustTokenPrivileges != NULL, 5770 "AdvapiAvailable() not yet called"); 5771 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5772 BufferLength, PreviousState, ReturnLength); 5773 } 5774 5775 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5776 PHANDLE TokenHandle) { 5777 assert(initialized && _OpenProcessToken != NULL, 5778 "AdvapiAvailable() not yet called"); 5779 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5780 } 5781 5782 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5783 assert(initialized && _LookupPrivilegeValue != NULL, 5784 "AdvapiAvailable() not yet called"); 5785 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5786 } 5787 5788 BOOL os::Advapi32Dll::AdvapiAvailable() { 5789 if (!initialized) { 5790 initialize(); 5791 } 5792 return _AdjustTokenPrivileges != NULL && 5793 _OpenProcessToken != NULL && 5794 _LookupPrivilegeValue != NULL; 5795 } 5796 5797 #endif 5798 5799 #ifndef PRODUCT 5800 5801 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5802 // contiguous memory block at a particular address. 5803 // The test first tries to find a good approximate address to allocate at by using the same 5804 // method to allocate some memory at any address. The test then tries to allocate memory in 5805 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5806 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5807 // the previously allocated memory is available for allocation. The only actual failure 5808 // that is reported is when the test tries to allocate at a particular location but gets a 5809 // different valid one. A NULL return value at this point is not considered an error but may 5810 // be legitimate. 5811 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5812 void TestReserveMemorySpecial_test() { 5813 if (!UseLargePages) { 5814 if (VerboseInternalVMTests) { 5815 gclog_or_tty->print("Skipping test because large pages are disabled"); 5816 } 5817 return; 5818 } 5819 // save current value of globals 5820 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5821 bool old_use_numa_interleaving = UseNUMAInterleaving; 5822 5823 // set globals to make sure we hit the correct code path 5824 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5825 5826 // do an allocation at an address selected by the OS to get a good one. 5827 const size_t large_allocation_size = os::large_page_size() * 4; 5828 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5829 if (result == NULL) { 5830 if (VerboseInternalVMTests) { 5831 gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5832 large_allocation_size); 5833 } 5834 } else { 5835 os::release_memory_special(result, large_allocation_size); 5836 5837 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5838 // we managed to get it once. 5839 const size_t expected_allocation_size = os::large_page_size(); 5840 char* expected_location = result + os::large_page_size(); 5841 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5842 if (actual_location == NULL) { 5843 if (VerboseInternalVMTests) { 5844 gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5845 expected_location, large_allocation_size); 5846 } 5847 } else { 5848 // release memory 5849 os::release_memory_special(actual_location, expected_allocation_size); 5850 // only now check, after releasing any memory to avoid any leaks. 5851 assert(actual_location == expected_location, 5852 err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5853 expected_location, expected_allocation_size, actual_location)); 5854 } 5855 } 5856 5857 // restore globals 5858 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5859 UseNUMAInterleaving = old_use_numa_interleaving; 5860 } 5861 #endif // PRODUCT 5862