1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/extendedPC.hpp" 48 #include "runtime/globals.hpp" 49 #include "runtime/interfaceSupport.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/javaCalls.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/objectMonitor.hpp" 54 #include "runtime/orderAccess.inline.hpp" 55 #include "runtime/osThread.hpp" 56 #include "runtime/perfMemory.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/statSampler.hpp" 59 #include "runtime/stubRoutines.hpp" 60 #include "runtime/thread.inline.hpp" 61 #include "runtime/threadCritical.hpp" 62 #include "runtime/timer.hpp" 63 #include "services/attachListener.hpp" 64 #include "services/memTracker.hpp" 65 #include "services/runtimeService.hpp" 66 #include "utilities/decoder.hpp" 67 #include "utilities/defaultStream.hpp" 68 #include "utilities/events.hpp" 69 #include "utilities/growableArray.hpp" 70 #include "utilities/vmError.hpp" 71 72 #ifdef _DEBUG 73 #include <crtdbg.h> 74 #endif 75 76 77 #include <windows.h> 78 #include <sys/types.h> 79 #include <sys/stat.h> 80 #include <sys/timeb.h> 81 #include <objidl.h> 82 #include <shlobj.h> 83 84 #include <malloc.h> 85 #include <signal.h> 86 #include <direct.h> 87 #include <errno.h> 88 #include <fcntl.h> 89 #include <io.h> 90 #include <process.h> // For _beginthreadex(), _endthreadex() 91 #include <imagehlp.h> // For os::dll_address_to_function_name 92 /* for enumerating dll libraries */ 93 #include <vdmdbg.h> 94 95 // for timer info max values which include all bits 96 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 97 98 // For DLL loading/load error detection 99 // Values of PE COFF 100 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 101 #define IMAGE_FILE_SIGNATURE_LENGTH 4 102 103 static HANDLE main_process; 104 static HANDLE main_thread; 105 static int main_thread_id; 106 107 static FILETIME process_creation_time; 108 static FILETIME process_exit_time; 109 static FILETIME process_user_time; 110 static FILETIME process_kernel_time; 111 112 #ifdef _M_IA64 113 #define __CPU__ ia64 114 #else 115 #ifdef _M_AMD64 116 #define __CPU__ amd64 117 #else 118 #define __CPU__ i486 119 #endif 120 #endif 121 122 // save DLL module handle, used by GetModuleFileName 123 124 HINSTANCE vm_lib_handle; 125 126 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 127 switch (reason) { 128 case DLL_PROCESS_ATTACH: 129 vm_lib_handle = hinst; 130 if(ForceTimeHighResolution) 131 timeBeginPeriod(1L); 132 break; 133 case DLL_PROCESS_DETACH: 134 if(ForceTimeHighResolution) 135 timeEndPeriod(1L); 136 137 break; 138 default: 139 break; 140 } 141 return true; 142 } 143 144 static inline double fileTimeAsDouble(FILETIME* time) { 145 const double high = (double) ((unsigned int) ~0); 146 const double split = 10000000.0; 147 double result = (time->dwLowDateTime / split) + 148 time->dwHighDateTime * (high/split); 149 return result; 150 } 151 152 // Implementation of os 153 154 bool os::getenv(const char* name, char* buffer, int len) { 155 int result = GetEnvironmentVariable(name, buffer, len); 156 return result > 0 && result < len; 157 } 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 #ifndef _WIN64 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 #endif 183 void os::init_system_properties_values() { 184 /* sysclasspath, java_home, dll_dir */ 185 { 186 char *home_path; 187 char *dll_path; 188 char *pslash; 189 char *bin = "\\bin"; 190 char home_dir[MAX_PATH]; 191 192 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 193 os::jvm_path(home_dir, sizeof(home_dir)); 194 // Found the full path to jvm.dll. 195 // Now cut the path to <java_home>/jre if we can. 196 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 197 pslash = strrchr(home_dir, '\\'); 198 if (pslash != NULL) { 199 *pslash = '\0'; /* get rid of \{client|server} */ 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) 202 *pslash = '\0'; /* get rid of \bin */ 203 } 204 } 205 206 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 207 if (home_path == NULL) 208 return; 209 strcpy(home_path, home_dir); 210 Arguments::set_java_home(home_path); 211 212 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 213 if (dll_path == NULL) 214 return; 215 strcpy(dll_path, home_dir); 216 strcat(dll_path, bin); 217 Arguments::set_dll_dir(dll_path); 218 219 if (!set_boot_path('\\', ';')) 220 return; 221 } 222 223 /* library_path */ 224 #define EXT_DIR "\\lib\\ext" 225 #define BIN_DIR "\\bin" 226 #define PACKAGE_DIR "\\Sun\\Java" 227 { 228 /* Win32 library search order (See the documentation for LoadLibrary): 229 * 230 * 1. The directory from which application is loaded. 231 * 2. The system wide Java Extensions directory (Java only) 232 * 3. System directory (GetSystemDirectory) 233 * 4. Windows directory (GetWindowsDirectory) 234 * 5. The PATH environment variable 235 * 6. The current directory 236 */ 237 238 char *library_path; 239 char tmp[MAX_PATH]; 240 char *path_str = ::getenv("PATH"); 241 242 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 243 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 244 245 library_path[0] = '\0'; 246 247 GetModuleFileName(NULL, tmp, sizeof(tmp)); 248 *(strrchr(tmp, '\\')) = '\0'; 249 strcat(library_path, tmp); 250 251 GetWindowsDirectory(tmp, sizeof(tmp)); 252 strcat(library_path, ";"); 253 strcat(library_path, tmp); 254 strcat(library_path, PACKAGE_DIR BIN_DIR); 255 256 GetSystemDirectory(tmp, sizeof(tmp)); 257 strcat(library_path, ";"); 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 264 if (path_str) { 265 strcat(library_path, ";"); 266 strcat(library_path, path_str); 267 } 268 269 strcat(library_path, ";."); 270 271 Arguments::set_library_path(library_path); 272 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 273 } 274 275 /* Default extensions directory */ 276 { 277 char path[MAX_PATH]; 278 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 279 GetWindowsDirectory(path, MAX_PATH); 280 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 281 path, PACKAGE_DIR, EXT_DIR); 282 Arguments::set_ext_dirs(buf); 283 } 284 #undef EXT_DIR 285 #undef BIN_DIR 286 #undef PACKAGE_DIR 287 288 /* Default endorsed standards directory. */ 289 { 290 #define ENDORSED_DIR "\\lib\\endorsed" 291 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 292 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 293 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 294 Arguments::set_endorsed_dirs(buf); 295 #undef ENDORSED_DIR 296 } 297 298 #ifndef _WIN64 299 // set our UnhandledExceptionFilter and save any previous one 300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 301 #endif 302 303 // Done 304 return; 305 } 306 307 void os::breakpoint() { 308 DebugBreak(); 309 } 310 311 // Invoked from the BREAKPOINT Macro 312 extern "C" void breakpoint() { 313 os::breakpoint(); 314 } 315 316 /* 317 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 318 * So far, this method is only used by Native Memory Tracking, which is 319 * only supported on Windows XP or later. 320 */ 321 322 int os::get_native_stack(address* stack, int frames, int toSkip) { 323 #ifdef _NMT_NOINLINE_ 324 toSkip ++; 325 #endif 326 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 327 (PVOID*)stack, NULL); 328 for (int index = captured; index < frames; index ++) { 329 stack[index] = NULL; 330 } 331 return captured; 332 } 333 334 335 // os::current_stack_base() 336 // 337 // Returns the base of the stack, which is the stack's 338 // starting address. This function must be called 339 // while running on the stack of the thread being queried. 340 341 address os::current_stack_base() { 342 MEMORY_BASIC_INFORMATION minfo; 343 address stack_bottom; 344 size_t stack_size; 345 346 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 347 stack_bottom = (address)minfo.AllocationBase; 348 stack_size = minfo.RegionSize; 349 350 // Add up the sizes of all the regions with the same 351 // AllocationBase. 352 while( 1 ) 353 { 354 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 355 if ( stack_bottom == (address)minfo.AllocationBase ) 356 stack_size += minfo.RegionSize; 357 else 358 break; 359 } 360 361 #ifdef _M_IA64 362 // IA64 has memory and register stacks 363 // 364 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 365 // at thread creation (1MB backing store growing upwards, 1MB memory stack 366 // growing downwards, 2MB summed up) 367 // 368 // ... 369 // ------- top of stack (high address) ----- 370 // | 371 // | 1MB 372 // | Backing Store (Register Stack) 373 // | 374 // | / \ 375 // | | 376 // | | 377 // | | 378 // ------------------------ stack base ----- 379 // | 1MB 380 // | Memory Stack 381 // | 382 // | | 383 // | | 384 // | | 385 // | \ / 386 // | 387 // ----- bottom of stack (low address) ----- 388 // ... 389 390 stack_size = stack_size / 2; 391 #endif 392 return stack_bottom + stack_size; 393 } 394 395 size_t os::current_stack_size() { 396 size_t sz; 397 MEMORY_BASIC_INFORMATION minfo; 398 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 399 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 400 return sz; 401 } 402 403 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 404 const struct tm* time_struct_ptr = localtime(clock); 405 if (time_struct_ptr != NULL) { 406 *res = *time_struct_ptr; 407 return res; 408 } 409 return NULL; 410 } 411 412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 413 414 // Thread start routine for all new Java threads 415 static unsigned __stdcall java_start(Thread* thread) { 416 // Try to randomize the cache line index of hot stack frames. 417 // This helps when threads of the same stack traces evict each other's 418 // cache lines. The threads can be either from the same JVM instance, or 419 // from different JVM instances. The benefit is especially true for 420 // processors with hyperthreading technology. 421 static int counter = 0; 422 int pid = os::current_process_id(); 423 _alloca(((pid ^ counter++) & 7) * 128); 424 425 OSThread* osthr = thread->osthread(); 426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 427 428 if (UseNUMA) { 429 int lgrp_id = os::numa_get_group_id(); 430 if (lgrp_id != -1) { 431 thread->set_lgrp_id(lgrp_id); 432 } 433 } 434 435 436 // Install a win32 structured exception handler around every thread created 437 // by VM, so VM can genrate error dump when an exception occurred in non- 438 // Java thread (e.g. VM thread). 439 __try { 440 thread->run(); 441 } __except(topLevelExceptionFilter( 442 (_EXCEPTION_POINTERS*)_exception_info())) { 443 // Nothing to do. 444 } 445 446 // One less thread is executing 447 // When the VMThread gets here, the main thread may have already exited 448 // which frees the CodeHeap containing the Atomic::add code 449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 451 } 452 453 return 0; 454 } 455 456 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 457 // Allocate the OSThread object 458 OSThread* osthread = new OSThread(NULL, NULL); 459 if (osthread == NULL) return NULL; 460 461 // Initialize support for Java interrupts 462 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 463 if (interrupt_event == NULL) { 464 delete osthread; 465 return NULL; 466 } 467 osthread->set_interrupt_event(interrupt_event); 468 469 // Store info on the Win32 thread into the OSThread 470 osthread->set_thread_handle(thread_handle); 471 osthread->set_thread_id(thread_id); 472 473 if (UseNUMA) { 474 int lgrp_id = os::numa_get_group_id(); 475 if (lgrp_id != -1) { 476 thread->set_lgrp_id(lgrp_id); 477 } 478 } 479 480 // Initial thread state is INITIALIZED, not SUSPENDED 481 osthread->set_state(INITIALIZED); 482 483 return osthread; 484 } 485 486 487 bool os::create_attached_thread(JavaThread* thread) { 488 #ifdef ASSERT 489 thread->verify_not_published(); 490 #endif 491 HANDLE thread_h; 492 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 493 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 494 fatal("DuplicateHandle failed\n"); 495 } 496 OSThread* osthread = create_os_thread(thread, thread_h, 497 (int)current_thread_id()); 498 if (osthread == NULL) { 499 return false; 500 } 501 502 // Initial thread state is RUNNABLE 503 osthread->set_state(RUNNABLE); 504 505 thread->set_osthread(osthread); 506 return true; 507 } 508 509 bool os::create_main_thread(JavaThread* thread) { 510 #ifdef ASSERT 511 thread->verify_not_published(); 512 #endif 513 if (_starting_thread == NULL) { 514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 515 if (_starting_thread == NULL) { 516 return false; 517 } 518 } 519 520 // The primordial thread is runnable from the start) 521 _starting_thread->set_state(RUNNABLE); 522 523 thread->set_osthread(_starting_thread); 524 return true; 525 } 526 527 // Allocate and initialize a new OSThread 528 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 529 unsigned thread_id; 530 531 // Allocate the OSThread object 532 OSThread* osthread = new OSThread(NULL, NULL); 533 if (osthread == NULL) { 534 return false; 535 } 536 537 // Initialize support for Java interrupts 538 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 539 if (interrupt_event == NULL) { 540 delete osthread; 541 return NULL; 542 } 543 osthread->set_interrupt_event(interrupt_event); 544 osthread->set_interrupted(false); 545 546 thread->set_osthread(osthread); 547 548 if (stack_size == 0) { 549 switch (thr_type) { 550 case os::java_thread: 551 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 552 if (JavaThread::stack_size_at_create() > 0) 553 stack_size = JavaThread::stack_size_at_create(); 554 break; 555 case os::compiler_thread: 556 if (CompilerThreadStackSize > 0) { 557 stack_size = (size_t)(CompilerThreadStackSize * K); 558 break; 559 } // else fall through: 560 // use VMThreadStackSize if CompilerThreadStackSize is not defined 561 case os::vm_thread: 562 case os::pgc_thread: 563 case os::cgc_thread: 564 case os::watcher_thread: 565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 566 break; 567 } 568 } 569 570 // Create the Win32 thread 571 // 572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 573 // does not specify stack size. Instead, it specifies the size of 574 // initially committed space. The stack size is determined by 575 // PE header in the executable. If the committed "stack_size" is larger 576 // than default value in the PE header, the stack is rounded up to the 577 // nearest multiple of 1MB. For example if the launcher has default 578 // stack size of 320k, specifying any size less than 320k does not 579 // affect the actual stack size at all, it only affects the initial 580 // commitment. On the other hand, specifying 'stack_size' larger than 581 // default value may cause significant increase in memory usage, because 582 // not only the stack space will be rounded up to MB, but also the 583 // entire space is committed upfront. 584 // 585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 586 // for CreateThread() that can treat 'stack_size' as stack size. However we 587 // are not supposed to call CreateThread() directly according to MSDN 588 // document because JVM uses C runtime library. The good news is that the 589 // flag appears to work with _beginthredex() as well. 590 591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 593 #endif 594 595 HANDLE thread_handle = 596 (HANDLE)_beginthreadex(NULL, 597 (unsigned)stack_size, 598 (unsigned (__stdcall *)(void*)) java_start, 599 thread, 600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 601 &thread_id); 602 if (thread_handle == NULL) { 603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 604 // without the flag. 605 thread_handle = 606 (HANDLE)_beginthreadex(NULL, 607 (unsigned)stack_size, 608 (unsigned (__stdcall *)(void*)) java_start, 609 thread, 610 CREATE_SUSPENDED, 611 &thread_id); 612 } 613 if (thread_handle == NULL) { 614 // Need to clean up stuff we've allocated so far 615 CloseHandle(osthread->interrupt_event()); 616 thread->set_osthread(NULL); 617 delete osthread; 618 return NULL; 619 } 620 621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 622 623 // Store info on the Win32 thread into the OSThread 624 osthread->set_thread_handle(thread_handle); 625 osthread->set_thread_id(thread_id); 626 627 // Initial thread state is INITIALIZED, not SUSPENDED 628 osthread->set_state(INITIALIZED); 629 630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 631 return true; 632 } 633 634 635 // Free Win32 resources related to the OSThread 636 void os::free_thread(OSThread* osthread) { 637 assert(osthread != NULL, "osthread not set"); 638 CloseHandle(osthread->thread_handle()); 639 CloseHandle(osthread->interrupt_event()); 640 delete osthread; 641 } 642 643 644 static int has_performance_count = 0; 645 static jlong first_filetime; 646 static jlong initial_performance_count; 647 static jlong performance_frequency; 648 649 650 jlong as_long(LARGE_INTEGER x) { 651 jlong result = 0; // initialization to avoid warning 652 set_high(&result, x.HighPart); 653 set_low(&result, x.LowPart); 654 return result; 655 } 656 657 658 jlong os::elapsed_counter() { 659 LARGE_INTEGER count; 660 if (has_performance_count) { 661 QueryPerformanceCounter(&count); 662 return as_long(count) - initial_performance_count; 663 } else { 664 FILETIME wt; 665 GetSystemTimeAsFileTime(&wt); 666 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 667 } 668 } 669 670 671 jlong os::elapsed_frequency() { 672 if (has_performance_count) { 673 return performance_frequency; 674 } else { 675 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 676 return 10000000; 677 } 678 } 679 680 681 julong os::available_memory() { 682 return win32::available_memory(); 683 } 684 685 julong os::win32::available_memory() { 686 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 687 // value if total memory is larger than 4GB 688 MEMORYSTATUSEX ms; 689 ms.dwLength = sizeof(ms); 690 GlobalMemoryStatusEx(&ms); 691 692 return (julong)ms.ullAvailPhys; 693 } 694 695 julong os::physical_memory() { 696 return win32::physical_memory(); 697 } 698 699 bool os::has_allocatable_memory_limit(julong* limit) { 700 MEMORYSTATUSEX ms; 701 ms.dwLength = sizeof(ms); 702 GlobalMemoryStatusEx(&ms); 703 #ifdef _LP64 704 *limit = (julong)ms.ullAvailVirtual; 705 return true; 706 #else 707 // Limit to 1400m because of the 2gb address space wall 708 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 709 return true; 710 #endif 711 } 712 713 // VC6 lacks DWORD_PTR 714 #if _MSC_VER < 1300 715 typedef UINT_PTR DWORD_PTR; 716 #endif 717 718 int os::active_processor_count() { 719 // User has overridden the number of active processors 720 if (ActiveProcessorCount > 0) { 721 if (PrintActiveCpus) { 722 tty->print_cr("active_processor_count: " 723 "active processor count set by user : %d", 724 ActiveProcessorCount); 725 } 726 return ActiveProcessorCount; 727 } 728 729 DWORD_PTR lpProcessAffinityMask = 0; 730 DWORD_PTR lpSystemAffinityMask = 0; 731 int proc_count = processor_count(); 732 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 733 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 734 // Nof active processors is number of bits in process affinity mask 735 int bitcount = 0; 736 while (lpProcessAffinityMask != 0) { 737 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 738 bitcount++; 739 } 740 return bitcount; 741 } else { 742 return proc_count; 743 } 744 } 745 746 void os::set_native_thread_name(const char *name) { 747 748 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 749 // 750 // Note that unfortunately this only works if the process 751 // is already attached to a debugger; debugger must observe 752 // the exception below to show the correct name. 753 754 const DWORD MS_VC_EXCEPTION = 0x406D1388; 755 struct { 756 DWORD dwType; // must be 0x1000 757 LPCSTR szName; // pointer to name (in user addr space) 758 DWORD dwThreadID; // thread ID (-1=caller thread) 759 DWORD dwFlags; // reserved for future use, must be zero 760 } info; 761 762 info.dwType = 0x1000; 763 info.szName = name; 764 info.dwThreadID = -1; 765 info.dwFlags = 0; 766 767 __try { 768 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 769 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 770 } 771 772 bool os::distribute_processes(uint length, uint* distribution) { 773 // Not yet implemented. 774 return false; 775 } 776 777 bool os::bind_to_processor(uint processor_id) { 778 // Not yet implemented. 779 return false; 780 } 781 782 static void initialize_performance_counter() { 783 LARGE_INTEGER count; 784 if (QueryPerformanceFrequency(&count)) { 785 has_performance_count = 1; 786 performance_frequency = as_long(count); 787 QueryPerformanceCounter(&count); 788 initial_performance_count = as_long(count); 789 } else { 790 has_performance_count = 0; 791 FILETIME wt; 792 GetSystemTimeAsFileTime(&wt); 793 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 794 } 795 } 796 797 798 double os::elapsedTime() { 799 return (double) elapsed_counter() / (double) elapsed_frequency(); 800 } 801 802 803 // Windows format: 804 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 805 // Java format: 806 // Java standards require the number of milliseconds since 1/1/1970 807 808 // Constant offset - calculated using offset() 809 static jlong _offset = 116444736000000000; 810 // Fake time counter for reproducible results when debugging 811 static jlong fake_time = 0; 812 813 #ifdef ASSERT 814 // Just to be safe, recalculate the offset in debug mode 815 static jlong _calculated_offset = 0; 816 static int _has_calculated_offset = 0; 817 818 jlong offset() { 819 if (_has_calculated_offset) return _calculated_offset; 820 SYSTEMTIME java_origin; 821 java_origin.wYear = 1970; 822 java_origin.wMonth = 1; 823 java_origin.wDayOfWeek = 0; // ignored 824 java_origin.wDay = 1; 825 java_origin.wHour = 0; 826 java_origin.wMinute = 0; 827 java_origin.wSecond = 0; 828 java_origin.wMilliseconds = 0; 829 FILETIME jot; 830 if (!SystemTimeToFileTime(&java_origin, &jot)) { 831 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 832 } 833 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 834 _has_calculated_offset = 1; 835 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 836 return _calculated_offset; 837 } 838 #else 839 jlong offset() { 840 return _offset; 841 } 842 #endif 843 844 jlong windows_to_java_time(FILETIME wt) { 845 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 846 return (a - offset()) / 10000; 847 } 848 849 FILETIME java_to_windows_time(jlong l) { 850 jlong a = (l * 10000) + offset(); 851 FILETIME result; 852 result.dwHighDateTime = high(a); 853 result.dwLowDateTime = low(a); 854 return result; 855 } 856 857 bool os::supports_vtime() { return true; } 858 bool os::enable_vtime() { return false; } 859 bool os::vtime_enabled() { return false; } 860 861 double os::elapsedVTime() { 862 FILETIME created; 863 FILETIME exited; 864 FILETIME kernel; 865 FILETIME user; 866 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 867 // the resolution of windows_to_java_time() should be sufficient (ms) 868 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 869 } else { 870 return elapsedTime(); 871 } 872 } 873 874 jlong os::javaTimeMillis() { 875 if (UseFakeTimers) { 876 return fake_time++; 877 } else { 878 FILETIME wt; 879 GetSystemTimeAsFileTime(&wt); 880 return windows_to_java_time(wt); 881 } 882 } 883 884 jlong os::javaTimeNanos() { 885 if (!has_performance_count) { 886 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 887 } else { 888 LARGE_INTEGER current_count; 889 QueryPerformanceCounter(¤t_count); 890 double current = as_long(current_count); 891 double freq = performance_frequency; 892 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 893 return time; 894 } 895 } 896 897 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 898 if (!has_performance_count) { 899 // javaTimeMillis() doesn't have much percision, 900 // but it is not going to wrap -- so all 64 bits 901 info_ptr->max_value = ALL_64_BITS; 902 903 // this is a wall clock timer, so may skip 904 info_ptr->may_skip_backward = true; 905 info_ptr->may_skip_forward = true; 906 } else { 907 jlong freq = performance_frequency; 908 if (freq < NANOSECS_PER_SEC) { 909 // the performance counter is 64 bits and we will 910 // be multiplying it -- so no wrap in 64 bits 911 info_ptr->max_value = ALL_64_BITS; 912 } else if (freq > NANOSECS_PER_SEC) { 913 // use the max value the counter can reach to 914 // determine the max value which could be returned 915 julong max_counter = (julong)ALL_64_BITS; 916 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 917 } else { 918 // the performance counter is 64 bits and we will 919 // be using it directly -- so no wrap in 64 bits 920 info_ptr->max_value = ALL_64_BITS; 921 } 922 923 // using a counter, so no skipping 924 info_ptr->may_skip_backward = false; 925 info_ptr->may_skip_forward = false; 926 } 927 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 928 } 929 930 char* os::local_time_string(char *buf, size_t buflen) { 931 SYSTEMTIME st; 932 GetLocalTime(&st); 933 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 934 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 935 return buf; 936 } 937 938 bool os::getTimesSecs(double* process_real_time, 939 double* process_user_time, 940 double* process_system_time) { 941 HANDLE h_process = GetCurrentProcess(); 942 FILETIME create_time, exit_time, kernel_time, user_time; 943 BOOL result = GetProcessTimes(h_process, 944 &create_time, 945 &exit_time, 946 &kernel_time, 947 &user_time); 948 if (result != 0) { 949 FILETIME wt; 950 GetSystemTimeAsFileTime(&wt); 951 jlong rtc_millis = windows_to_java_time(wt); 952 jlong user_millis = windows_to_java_time(user_time); 953 jlong system_millis = windows_to_java_time(kernel_time); 954 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 955 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 956 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 957 return true; 958 } else { 959 return false; 960 } 961 } 962 963 void os::shutdown() { 964 965 // allow PerfMemory to attempt cleanup of any persistent resources 966 perfMemory_exit(); 967 968 // flush buffered output, finish log files 969 ostream_abort(); 970 971 // Check for abort hook 972 abort_hook_t abort_hook = Arguments::abort_hook(); 973 if (abort_hook != NULL) { 974 abort_hook(); 975 } 976 } 977 978 979 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 980 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 981 982 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 983 HINSTANCE dbghelp; 984 EXCEPTION_POINTERS ep; 985 MINIDUMP_EXCEPTION_INFORMATION mei; 986 MINIDUMP_EXCEPTION_INFORMATION* pmei; 987 988 HANDLE hProcess = GetCurrentProcess(); 989 DWORD processId = GetCurrentProcessId(); 990 HANDLE dumpFile; 991 MINIDUMP_TYPE dumpType; 992 static const char* cwd; 993 994 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 995 #ifndef ASSERT 996 // If running on a client version of Windows and user has not explicitly enabled dumping 997 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 998 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 999 return; 1000 // If running on a server version of Windows and user has explictly disabled dumping 1001 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 1002 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 1003 return; 1004 } 1005 #else 1006 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 1007 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 1008 return; 1009 } 1010 #endif 1011 1012 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1013 1014 if (dbghelp == NULL) { 1015 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 1016 return; 1017 } 1018 1019 _MiniDumpWriteDump = CAST_TO_FN_PTR( 1020 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 1021 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 1022 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 1023 1024 if (_MiniDumpWriteDump == NULL) { 1025 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1026 return; 1027 } 1028 1029 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1030 1031 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1032 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1033 #if API_VERSION_NUMBER >= 11 1034 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1035 MiniDumpWithUnloadedModules); 1036 #endif 1037 1038 cwd = get_current_directory(NULL, 0); 1039 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id()); 1040 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1041 1042 if (dumpFile == INVALID_HANDLE_VALUE) { 1043 VMError::report_coredump_status("Failed to create file for dumping", false); 1044 return; 1045 } 1046 if (exceptionRecord != NULL && contextRecord != NULL) { 1047 ep.ContextRecord = (PCONTEXT) contextRecord; 1048 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1049 1050 mei.ThreadId = GetCurrentThreadId(); 1051 mei.ExceptionPointers = &ep; 1052 pmei = &mei; 1053 } else { 1054 pmei = NULL; 1055 } 1056 1057 1058 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1059 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1060 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1061 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1062 DWORD error = GetLastError(); 1063 LPTSTR msgbuf = NULL; 1064 1065 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1066 FORMAT_MESSAGE_FROM_SYSTEM | 1067 FORMAT_MESSAGE_IGNORE_INSERTS, 1068 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1069 1070 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1071 LocalFree(msgbuf); 1072 } else { 1073 // Call to FormatMessage failed, just include the result from GetLastError 1074 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1075 } 1076 VMError::report_coredump_status(buffer, false); 1077 } else { 1078 VMError::report_coredump_status(buffer, true); 1079 } 1080 1081 CloseHandle(dumpFile); 1082 } 1083 1084 1085 1086 void os::abort(bool dump_core) 1087 { 1088 os::shutdown(); 1089 // no core dump on Windows 1090 ::exit(1); 1091 } 1092 1093 // Die immediately, no exit hook, no abort hook, no cleanup. 1094 void os::die() { 1095 _exit(-1); 1096 } 1097 1098 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1099 // * dirent_md.c 1.15 00/02/02 1100 // 1101 // The declarations for DIR and struct dirent are in jvm_win32.h. 1102 1103 /* Caller must have already run dirname through JVM_NativePath, which removes 1104 duplicate slashes and converts all instances of '/' into '\\'. */ 1105 1106 DIR * 1107 os::opendir(const char *dirname) 1108 { 1109 assert(dirname != NULL, "just checking"); // hotspot change 1110 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1111 DWORD fattr; // hotspot change 1112 char alt_dirname[4] = { 0, 0, 0, 0 }; 1113 1114 if (dirp == 0) { 1115 errno = ENOMEM; 1116 return 0; 1117 } 1118 1119 /* 1120 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1121 * as a directory in FindFirstFile(). We detect this case here and 1122 * prepend the current drive name. 1123 */ 1124 if (dirname[1] == '\0' && dirname[0] == '\\') { 1125 alt_dirname[0] = _getdrive() + 'A' - 1; 1126 alt_dirname[1] = ':'; 1127 alt_dirname[2] = '\\'; 1128 alt_dirname[3] = '\0'; 1129 dirname = alt_dirname; 1130 } 1131 1132 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1133 if (dirp->path == 0) { 1134 free(dirp, mtInternal); 1135 errno = ENOMEM; 1136 return 0; 1137 } 1138 strcpy(dirp->path, dirname); 1139 1140 fattr = GetFileAttributes(dirp->path); 1141 if (fattr == 0xffffffff) { 1142 free(dirp->path, mtInternal); 1143 free(dirp, mtInternal); 1144 errno = ENOENT; 1145 return 0; 1146 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1147 free(dirp->path, mtInternal); 1148 free(dirp, mtInternal); 1149 errno = ENOTDIR; 1150 return 0; 1151 } 1152 1153 /* Append "*.*", or possibly "\\*.*", to path */ 1154 if (dirp->path[1] == ':' 1155 && (dirp->path[2] == '\0' 1156 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1157 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1158 strcat(dirp->path, "*.*"); 1159 } else { 1160 strcat(dirp->path, "\\*.*"); 1161 } 1162 1163 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1164 if (dirp->handle == INVALID_HANDLE_VALUE) { 1165 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1166 free(dirp->path, mtInternal); 1167 free(dirp, mtInternal); 1168 errno = EACCES; 1169 return 0; 1170 } 1171 } 1172 return dirp; 1173 } 1174 1175 struct dirent * 1176 os::readdir(DIR *dirp) 1177 { 1178 assert(dirp != NULL, "just checking"); // hotspot change 1179 if (dirp->handle == INVALID_HANDLE_VALUE) { 1180 return NULL; 1181 } 1182 1183 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1184 1185 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1186 if (GetLastError() == ERROR_INVALID_HANDLE) { 1187 errno = EBADF; 1188 return NULL; 1189 } 1190 FindClose(dirp->handle); 1191 dirp->handle = INVALID_HANDLE_VALUE; 1192 } 1193 1194 return &dirp->dirent; 1195 } 1196 1197 int 1198 os::closedir(DIR *dirp) 1199 { 1200 assert(dirp != NULL, "just checking"); // hotspot change 1201 if (dirp->handle != INVALID_HANDLE_VALUE) { 1202 if (!FindClose(dirp->handle)) { 1203 errno = EBADF; 1204 return -1; 1205 } 1206 dirp->handle = INVALID_HANDLE_VALUE; 1207 } 1208 free(dirp->path, mtInternal); 1209 free(dirp, mtInternal); 1210 return 0; 1211 } 1212 1213 // This must be hard coded because it's the system's temporary 1214 // directory not the java application's temp directory, ala java.io.tmpdir. 1215 const char* os::get_temp_directory() { 1216 static char path_buf[MAX_PATH]; 1217 if (GetTempPath(MAX_PATH, path_buf)>0) 1218 return path_buf; 1219 else{ 1220 path_buf[0]='\0'; 1221 return path_buf; 1222 } 1223 } 1224 1225 static bool file_exists(const char* filename) { 1226 if (filename == NULL || strlen(filename) == 0) { 1227 return false; 1228 } 1229 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1230 } 1231 1232 bool os::dll_build_name(char *buffer, size_t buflen, 1233 const char* pname, const char* fname) { 1234 bool retval = false; 1235 const size_t pnamelen = pname ? strlen(pname) : 0; 1236 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1237 1238 // Return error on buffer overflow. 1239 if (pnamelen + strlen(fname) + 10 > buflen) { 1240 return retval; 1241 } 1242 1243 if (pnamelen == 0) { 1244 jio_snprintf(buffer, buflen, "%s.dll", fname); 1245 retval = true; 1246 } else if (c == ':' || c == '\\') { 1247 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1248 retval = true; 1249 } else if (strchr(pname, *os::path_separator()) != NULL) { 1250 int n; 1251 char** pelements = split_path(pname, &n); 1252 if (pelements == NULL) { 1253 return false; 1254 } 1255 for (int i = 0 ; i < n ; i++) { 1256 char* path = pelements[i]; 1257 // Really shouldn't be NULL, but check can't hurt 1258 size_t plen = (path == NULL) ? 0 : strlen(path); 1259 if (plen == 0) { 1260 continue; // skip the empty path values 1261 } 1262 const char lastchar = path[plen - 1]; 1263 if (lastchar == ':' || lastchar == '\\') { 1264 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1265 } else { 1266 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1267 } 1268 if (file_exists(buffer)) { 1269 retval = true; 1270 break; 1271 } 1272 } 1273 // release the storage 1274 for (int i = 0 ; i < n ; i++) { 1275 if (pelements[i] != NULL) { 1276 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1277 } 1278 } 1279 if (pelements != NULL) { 1280 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1281 } 1282 } else { 1283 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1284 retval = true; 1285 } 1286 return retval; 1287 } 1288 1289 // Needs to be in os specific directory because windows requires another 1290 // header file <direct.h> 1291 const char* os::get_current_directory(char *buf, size_t buflen) { 1292 int n = static_cast<int>(buflen); 1293 if (buflen > INT_MAX) n = INT_MAX; 1294 return _getcwd(buf, n); 1295 } 1296 1297 //----------------------------------------------------------- 1298 // Helper functions for fatal error handler 1299 #ifdef _WIN64 1300 // Helper routine which returns true if address in 1301 // within the NTDLL address space. 1302 // 1303 static bool _addr_in_ntdll( address addr ) 1304 { 1305 HMODULE hmod; 1306 MODULEINFO minfo; 1307 1308 hmod = GetModuleHandle("NTDLL.DLL"); 1309 if ( hmod == NULL ) return false; 1310 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1311 &minfo, sizeof(MODULEINFO)) ) 1312 return false; 1313 1314 if ( (addr >= minfo.lpBaseOfDll) && 1315 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1316 return true; 1317 else 1318 return false; 1319 } 1320 #endif 1321 1322 1323 // Enumerate all modules for a given process ID 1324 // 1325 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1326 // different API for doing this. We use PSAPI.DLL on NT based 1327 // Windows and ToolHelp on 95/98/Me. 1328 1329 // Callback function that is called by enumerate_modules() on 1330 // every DLL module. 1331 // Input parameters: 1332 // int pid, 1333 // char* module_file_name, 1334 // address module_base_addr, 1335 // unsigned module_size, 1336 // void* param 1337 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1338 1339 // enumerate_modules for Windows NT, using PSAPI 1340 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1341 { 1342 HANDLE hProcess ; 1343 1344 # define MAX_NUM_MODULES 128 1345 HMODULE modules[MAX_NUM_MODULES]; 1346 static char filename[ MAX_PATH ]; 1347 int result = 0; 1348 1349 if (!os::PSApiDll::PSApiAvailable()) { 1350 return 0; 1351 } 1352 1353 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1354 FALSE, pid ) ; 1355 if (hProcess == NULL) return 0; 1356 1357 DWORD size_needed; 1358 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1359 sizeof(modules), &size_needed)) { 1360 CloseHandle( hProcess ); 1361 return 0; 1362 } 1363 1364 // number of modules that are currently loaded 1365 int num_modules = size_needed / sizeof(HMODULE); 1366 1367 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1368 // Get Full pathname: 1369 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1370 filename, sizeof(filename))) { 1371 filename[0] = '\0'; 1372 } 1373 1374 MODULEINFO modinfo; 1375 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1376 &modinfo, sizeof(modinfo))) { 1377 modinfo.lpBaseOfDll = NULL; 1378 modinfo.SizeOfImage = 0; 1379 } 1380 1381 // Invoke callback function 1382 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1383 modinfo.SizeOfImage, param); 1384 if (result) break; 1385 } 1386 1387 CloseHandle( hProcess ) ; 1388 return result; 1389 } 1390 1391 1392 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1393 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1394 { 1395 HANDLE hSnapShot ; 1396 static MODULEENTRY32 modentry ; 1397 int result = 0; 1398 1399 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1400 return 0; 1401 } 1402 1403 // Get a handle to a Toolhelp snapshot of the system 1404 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; 1405 if( hSnapShot == INVALID_HANDLE_VALUE ) { 1406 return FALSE ; 1407 } 1408 1409 // iterate through all modules 1410 modentry.dwSize = sizeof(MODULEENTRY32) ; 1411 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1412 1413 while( not_done ) { 1414 // invoke the callback 1415 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1416 modentry.modBaseSize, param); 1417 if (result) break; 1418 1419 modentry.dwSize = sizeof(MODULEENTRY32) ; 1420 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1421 } 1422 1423 CloseHandle(hSnapShot); 1424 return result; 1425 } 1426 1427 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1428 { 1429 // Get current process ID if caller doesn't provide it. 1430 if (!pid) pid = os::current_process_id(); 1431 1432 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1433 else return _enumerate_modules_windows(pid, func, param); 1434 } 1435 1436 struct _modinfo { 1437 address addr; 1438 char* full_path; // point to a char buffer 1439 int buflen; // size of the buffer 1440 address base_addr; 1441 }; 1442 1443 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1444 unsigned size, void * param) { 1445 struct _modinfo *pmod = (struct _modinfo *)param; 1446 if (!pmod) return -1; 1447 1448 if (base_addr <= pmod->addr && 1449 base_addr+size > pmod->addr) { 1450 // if a buffer is provided, copy path name to the buffer 1451 if (pmod->full_path) { 1452 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1453 } 1454 pmod->base_addr = base_addr; 1455 return 1; 1456 } 1457 return 0; 1458 } 1459 1460 bool os::dll_address_to_library_name(address addr, char* buf, 1461 int buflen, int* offset) { 1462 // buf is not optional, but offset is optional 1463 assert(buf != NULL, "sanity check"); 1464 1465 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1466 // return the full path to the DLL file, sometimes it returns path 1467 // to the corresponding PDB file (debug info); sometimes it only 1468 // returns partial path, which makes life painful. 1469 1470 struct _modinfo mi; 1471 mi.addr = addr; 1472 mi.full_path = buf; 1473 mi.buflen = buflen; 1474 int pid = os::current_process_id(); 1475 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1476 // buf already contains path name 1477 if (offset) *offset = addr - mi.base_addr; 1478 return true; 1479 } 1480 1481 buf[0] = '\0'; 1482 if (offset) *offset = -1; 1483 return false; 1484 } 1485 1486 bool os::dll_address_to_function_name(address addr, char *buf, 1487 int buflen, int *offset) { 1488 // buf is not optional, but offset is optional 1489 assert(buf != NULL, "sanity check"); 1490 1491 if (Decoder::decode(addr, buf, buflen, offset)) { 1492 return true; 1493 } 1494 if (offset != NULL) *offset = -1; 1495 buf[0] = '\0'; 1496 return false; 1497 } 1498 1499 // save the start and end address of jvm.dll into param[0] and param[1] 1500 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1501 unsigned size, void * param) { 1502 if (!param) return -1; 1503 1504 if (base_addr <= (address)_locate_jvm_dll && 1505 base_addr+size > (address)_locate_jvm_dll) { 1506 ((address*)param)[0] = base_addr; 1507 ((address*)param)[1] = base_addr + size; 1508 return 1; 1509 } 1510 return 0; 1511 } 1512 1513 address vm_lib_location[2]; // start and end address of jvm.dll 1514 1515 // check if addr is inside jvm.dll 1516 bool os::address_is_in_vm(address addr) { 1517 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1518 int pid = os::current_process_id(); 1519 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1520 assert(false, "Can't find jvm module."); 1521 return false; 1522 } 1523 } 1524 1525 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1526 } 1527 1528 // print module info; param is outputStream* 1529 static int _print_module(int pid, char* fname, address base, 1530 unsigned size, void* param) { 1531 if (!param) return -1; 1532 1533 outputStream* st = (outputStream*)param; 1534 1535 address end_addr = base + size; 1536 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1537 return 0; 1538 } 1539 1540 // Loads .dll/.so and 1541 // in case of error it checks if .dll/.so was built for the 1542 // same architecture as Hotspot is running on 1543 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1544 { 1545 void * result = LoadLibrary(name); 1546 if (result != NULL) 1547 { 1548 return result; 1549 } 1550 1551 DWORD errcode = GetLastError(); 1552 if (errcode == ERROR_MOD_NOT_FOUND) { 1553 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1554 ebuf[ebuflen-1]='\0'; 1555 return NULL; 1556 } 1557 1558 // Parsing dll below 1559 // If we can read dll-info and find that dll was built 1560 // for an architecture other than Hotspot is running in 1561 // - then print to buffer "DLL was built for a different architecture" 1562 // else call os::lasterror to obtain system error message 1563 1564 // Read system error message into ebuf 1565 // It may or may not be overwritten below (in the for loop and just above) 1566 lasterror(ebuf, (size_t) ebuflen); 1567 ebuf[ebuflen-1]='\0'; 1568 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1569 if (file_descriptor<0) 1570 { 1571 return NULL; 1572 } 1573 1574 uint32_t signature_offset; 1575 uint16_t lib_arch=0; 1576 bool failed_to_get_lib_arch= 1577 ( 1578 //Go to position 3c in the dll 1579 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1580 || 1581 // Read loacation of signature 1582 (sizeof(signature_offset)!= 1583 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1584 || 1585 //Go to COFF File Header in dll 1586 //that is located after"signature" (4 bytes long) 1587 (os::seek_to_file_offset(file_descriptor, 1588 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1589 || 1590 //Read field that contains code of architecture 1591 // that dll was build for 1592 (sizeof(lib_arch)!= 1593 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1594 ); 1595 1596 ::close(file_descriptor); 1597 if (failed_to_get_lib_arch) 1598 { 1599 // file i/o error - report os::lasterror(...) msg 1600 return NULL; 1601 } 1602 1603 typedef struct 1604 { 1605 uint16_t arch_code; 1606 char* arch_name; 1607 } arch_t; 1608 1609 static const arch_t arch_array[]={ 1610 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1611 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1612 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1613 }; 1614 #if (defined _M_IA64) 1615 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1616 #elif (defined _M_AMD64) 1617 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1618 #elif (defined _M_IX86) 1619 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1620 #else 1621 #error Method os::dll_load requires that one of following \ 1622 is defined :_M_IA64,_M_AMD64 or _M_IX86 1623 #endif 1624 1625 1626 // Obtain a string for printf operation 1627 // lib_arch_str shall contain string what platform this .dll was built for 1628 // running_arch_str shall string contain what platform Hotspot was built for 1629 char *running_arch_str=NULL,*lib_arch_str=NULL; 1630 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1631 { 1632 if (lib_arch==arch_array[i].arch_code) 1633 lib_arch_str=arch_array[i].arch_name; 1634 if (running_arch==arch_array[i].arch_code) 1635 running_arch_str=arch_array[i].arch_name; 1636 } 1637 1638 assert(running_arch_str, 1639 "Didn't find runing architecture code in arch_array"); 1640 1641 // If the architure is right 1642 // but some other error took place - report os::lasterror(...) msg 1643 if (lib_arch == running_arch) 1644 { 1645 return NULL; 1646 } 1647 1648 if (lib_arch_str!=NULL) 1649 { 1650 ::_snprintf(ebuf, ebuflen-1, 1651 "Can't load %s-bit .dll on a %s-bit platform", 1652 lib_arch_str,running_arch_str); 1653 } 1654 else 1655 { 1656 // don't know what architecture this dll was build for 1657 ::_snprintf(ebuf, ebuflen-1, 1658 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1659 lib_arch,running_arch_str); 1660 } 1661 1662 return NULL; 1663 } 1664 1665 1666 void os::print_dll_info(outputStream *st) { 1667 int pid = os::current_process_id(); 1668 st->print_cr("Dynamic libraries:"); 1669 enumerate_modules(pid, _print_module, (void *)st); 1670 } 1671 1672 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1673 HANDLE hProcess; 1674 1675 # define MAX_NUM_MODULES 128 1676 HMODULE modules[MAX_NUM_MODULES]; 1677 static char filename[MAX_PATH]; 1678 int result = 0; 1679 1680 int pid = os::current_process_id(); 1681 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1682 FALSE, pid); 1683 if (hProcess == NULL) return 0; 1684 1685 DWORD size_needed; 1686 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1687 CloseHandle(hProcess); 1688 return 0; 1689 } 1690 1691 // number of modules that are currently loaded 1692 int num_modules = size_needed / sizeof(HMODULE); 1693 1694 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1695 // Get Full pathname: 1696 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1697 filename[0] = '\0'; 1698 } 1699 1700 MODULEINFO modinfo; 1701 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1702 modinfo.lpBaseOfDll = NULL; 1703 modinfo.SizeOfImage = 0; 1704 } 1705 1706 // Invoke callback function 1707 result = callback(filename, (address)modinfo.lpBaseOfDll, 1708 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1709 if (result) break; 1710 } 1711 1712 CloseHandle(hProcess); 1713 return result; 1714 } 1715 1716 void os::print_os_info_brief(outputStream* st) { 1717 os::print_os_info(st); 1718 } 1719 1720 void os::print_os_info(outputStream* st) { 1721 st->print("OS:"); 1722 1723 os::win32::print_windows_version(st); 1724 1725 #ifdef _LP64 1726 VM_Version::print_platform_virtualization_info(st); 1727 #endif 1728 } 1729 1730 void os::win32::print_windows_version(outputStream* st) { 1731 OSVERSIONINFOEX osvi; 1732 VS_FIXEDFILEINFO *file_info; 1733 TCHAR kernel32_path[MAX_PATH]; 1734 UINT len, ret; 1735 1736 // Use the GetVersionEx information to see if we're on a server or 1737 // workstation edition of Windows. Starting with Windows 8.1 we can't 1738 // trust the OS version information returned by this API. 1739 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1740 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1741 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1742 st->print_cr("Call to GetVersionEx failed"); 1743 return; 1744 } 1745 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1746 1747 // Get the full path to \Windows\System32\kernel32.dll and use that for 1748 // determining what version of Windows we're running on. 1749 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1750 ret = GetSystemDirectory(kernel32_path, len); 1751 if (ret == 0 || ret > len) { 1752 st->print_cr("Call to GetSystemDirectory failed"); 1753 return; 1754 } 1755 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1756 1757 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1758 if (version_size == 0) { 1759 st->print_cr("Call to GetFileVersionInfoSize failed"); 1760 return; 1761 } 1762 1763 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1764 if (version_info == NULL) { 1765 st->print_cr("Failed to allocate version_info"); 1766 return; 1767 } 1768 1769 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1770 os::free(version_info); 1771 st->print_cr("Call to GetFileVersionInfo failed"); 1772 return; 1773 } 1774 1775 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1776 os::free(version_info); 1777 st->print_cr("Call to VerQueryValue failed"); 1778 return; 1779 } 1780 1781 int major_version = HIWORD(file_info->dwProductVersionMS); 1782 int minor_version = LOWORD(file_info->dwProductVersionMS); 1783 int build_number = HIWORD(file_info->dwProductVersionLS); 1784 int build_minor = LOWORD(file_info->dwProductVersionLS); 1785 int os_vers = major_version * 1000 + minor_version; 1786 os::free(version_info); 1787 1788 st->print(" Windows "); 1789 switch (os_vers) { 1790 1791 case 6000: 1792 if (is_workstation) { 1793 st->print("Vista"); 1794 } else { 1795 st->print("Server 2008"); 1796 } 1797 break; 1798 1799 case 6001: 1800 if (is_workstation) { 1801 st->print("7"); 1802 } else { 1803 st->print("Server 2008 R2"); 1804 } 1805 break; 1806 1807 case 6002: 1808 if (is_workstation) { 1809 st->print("8"); 1810 } else { 1811 st->print("Server 2012"); 1812 } 1813 break; 1814 1815 case 6003: 1816 if (is_workstation) { 1817 st->print("8.1"); 1818 } else { 1819 st->print("Server 2012 R2"); 1820 } 1821 break; 1822 1823 case 6004: 1824 if (is_workstation) { 1825 st->print("10"); 1826 } else { 1827 // distinguish Windows Server 2016 and 2019 by build number 1828 // Windows server 2019 GA 10/2018 build number is 17763 1829 if (build_number > 17762) { 1830 st->print("Server 2019"); 1831 } else { 1832 st->print("Server 2016"); 1833 } 1834 } 1835 break; 1836 1837 default: 1838 // Unrecognized windows, print out its major and minor versions 1839 st->print("%d.%d", major_version, minor_version); 1840 break; 1841 } 1842 1843 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1844 // find out whether we are running on 64 bit processor or not 1845 SYSTEM_INFO si; 1846 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1847 os::Kernel32Dll::GetNativeSystemInfo(&si); 1848 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1849 st->print(" , 64 bit"); 1850 } 1851 1852 st->print(" Build %d", build_number); 1853 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1854 st->cr(); 1855 } 1856 1857 void os::pd_print_cpu_info(outputStream* st) { 1858 // Nothing to do for now. 1859 } 1860 1861 void os::print_memory_info(outputStream* st) { 1862 st->print("Memory:"); 1863 st->print(" %dk page", os::vm_page_size()>>10); 1864 1865 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1866 // value if total memory is larger than 4GB 1867 MEMORYSTATUSEX ms; 1868 ms.dwLength = sizeof(ms); 1869 GlobalMemoryStatusEx(&ms); 1870 1871 st->print(", physical %uk", os::physical_memory() >> 10); 1872 st->print("(%uk free)", os::available_memory() >> 10); 1873 1874 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1875 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1876 st->cr(); 1877 } 1878 1879 void os::print_siginfo(outputStream *st, void *siginfo) { 1880 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1881 st->print("siginfo:"); 1882 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1883 1884 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1885 er->NumberParameters >= 2) { 1886 switch (er->ExceptionInformation[0]) { 1887 case 0: st->print(", reading address"); break; 1888 case 1: st->print(", writing address"); break; 1889 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1890 er->ExceptionInformation[0]); 1891 } 1892 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1893 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1894 er->NumberParameters >= 2 && UseSharedSpaces) { 1895 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1896 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1897 st->print("\n\nError accessing class data sharing archive." \ 1898 " Mapped file inaccessible during execution, " \ 1899 " possible disk/network problem."); 1900 } 1901 } else { 1902 int num = er->NumberParameters; 1903 if (num > 0) { 1904 st->print(", ExceptionInformation="); 1905 for (int i = 0; i < num; i++) { 1906 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1907 } 1908 } 1909 } 1910 st->cr(); 1911 } 1912 1913 1914 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1915 #if _MSC_VER >= 1900 1916 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1917 int result = ::vsnprintf(buf, len, fmt, args); 1918 // If an encoding error occurred (result < 0) then it's not clear 1919 // whether the buffer is NUL terminated, so ensure it is. 1920 if ((result < 0) && (len > 0)) { 1921 buf[len - 1] = '\0'; 1922 } 1923 return result; 1924 #else 1925 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1926 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1927 // versions. However, when len == 0, avoid _vsnprintf too, and just 1928 // go straight to _vscprintf. The output is going to be truncated in 1929 // that case, except in the unusual case of empty output. More 1930 // importantly, the documentation for various versions of Visual Studio 1931 // are inconsistent about the behavior of _vsnprintf when len == 0, 1932 // including it possibly being an error. 1933 int result = -1; 1934 if (len > 0) { 1935 result = _vsnprintf(buf, len, fmt, args); 1936 // If output (including NUL terminator) is truncated, the buffer 1937 // won't be NUL terminated. Add the trailing NUL specified by C99. 1938 if ((result < 0) || (result >= (int) len)) { 1939 buf[len - 1] = '\0'; 1940 } 1941 } 1942 if (result < 0) { 1943 result = _vscprintf(fmt, args); 1944 } 1945 return result; 1946 #endif // _MSC_VER dispatch 1947 } 1948 1949 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1950 // do nothing 1951 } 1952 1953 static char saved_jvm_path[MAX_PATH] = {0}; 1954 1955 // Find the full path to the current module, jvm.dll 1956 void os::jvm_path(char *buf, jint buflen) { 1957 // Error checking. 1958 if (buflen < MAX_PATH) { 1959 assert(false, "must use a large-enough buffer"); 1960 buf[0] = '\0'; 1961 return; 1962 } 1963 // Lazy resolve the path to current module. 1964 if (saved_jvm_path[0] != 0) { 1965 strcpy(buf, saved_jvm_path); 1966 return; 1967 } 1968 1969 buf[0] = '\0'; 1970 if (Arguments::created_by_gamma_launcher()) { 1971 // Support for the gamma launcher. Check for an 1972 // JAVA_HOME environment variable 1973 // and fix up the path so it looks like 1974 // libjvm.so is installed there (append a fake suffix 1975 // hotspot/libjvm.so). 1976 char* java_home_var = ::getenv("JAVA_HOME"); 1977 if (java_home_var != NULL && java_home_var[0] != 0 && 1978 strlen(java_home_var) < (size_t)buflen) { 1979 1980 strncpy(buf, java_home_var, buflen); 1981 1982 // determine if this is a legacy image or modules image 1983 // modules image doesn't have "jre" subdirectory 1984 size_t len = strlen(buf); 1985 char* jrebin_p = buf + len; 1986 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1987 if (0 != _access(buf, 0)) { 1988 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1989 } 1990 len = strlen(buf); 1991 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1992 } 1993 } 1994 1995 if(buf[0] == '\0') { 1996 GetModuleFileName(vm_lib_handle, buf, buflen); 1997 } 1998 strncpy(saved_jvm_path, buf, MAX_PATH); 1999 } 2000 2001 2002 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 2003 #ifndef _WIN64 2004 st->print("_"); 2005 #endif 2006 } 2007 2008 2009 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 2010 #ifndef _WIN64 2011 st->print("@%d", args_size * sizeof(int)); 2012 #endif 2013 } 2014 2015 // This method is a copy of JDK's sysGetLastErrorString 2016 // from src/windows/hpi/src/system_md.c 2017 2018 size_t os::lasterror(char* buf, size_t len) { 2019 DWORD errval; 2020 2021 if ((errval = GetLastError()) != 0) { 2022 // DOS error 2023 size_t n = (size_t)FormatMessage( 2024 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 2025 NULL, 2026 errval, 2027 0, 2028 buf, 2029 (DWORD)len, 2030 NULL); 2031 if (n > 3) { 2032 // Drop final '.', CR, LF 2033 if (buf[n - 1] == '\n') n--; 2034 if (buf[n - 1] == '\r') n--; 2035 if (buf[n - 1] == '.') n--; 2036 buf[n] = '\0'; 2037 } 2038 return n; 2039 } 2040 2041 if (errno != 0) { 2042 // C runtime error that has no corresponding DOS error code 2043 const char* s = strerror(errno); 2044 size_t n = strlen(s); 2045 if (n >= len) n = len - 1; 2046 strncpy(buf, s, n); 2047 buf[n] = '\0'; 2048 return n; 2049 } 2050 2051 return 0; 2052 } 2053 2054 int os::get_last_error() { 2055 DWORD error = GetLastError(); 2056 if (error == 0) 2057 error = errno; 2058 return (int)error; 2059 } 2060 2061 // sun.misc.Signal 2062 // NOTE that this is a workaround for an apparent kernel bug where if 2063 // a signal handler for SIGBREAK is installed then that signal handler 2064 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 2065 // See bug 4416763. 2066 static void (*sigbreakHandler)(int) = NULL; 2067 2068 static void UserHandler(int sig, void *siginfo, void *context) { 2069 os::signal_notify(sig); 2070 // We need to reinstate the signal handler each time... 2071 os::signal(sig, (void*)UserHandler); 2072 } 2073 2074 void* os::user_handler() { 2075 return (void*) UserHandler; 2076 } 2077 2078 void* os::signal(int signal_number, void* handler) { 2079 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 2080 void (*oldHandler)(int) = sigbreakHandler; 2081 sigbreakHandler = (void (*)(int)) handler; 2082 return (void*) oldHandler; 2083 } else { 2084 return (void*)::signal(signal_number, (void (*)(int))handler); 2085 } 2086 } 2087 2088 void os::signal_raise(int signal_number) { 2089 raise(signal_number); 2090 } 2091 2092 // The Win32 C runtime library maps all console control events other than ^C 2093 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 2094 // logoff, and shutdown events. We therefore install our own console handler 2095 // that raises SIGTERM for the latter cases. 2096 // 2097 static BOOL WINAPI consoleHandler(DWORD event) { 2098 switch(event) { 2099 case CTRL_C_EVENT: 2100 if (is_error_reported()) { 2101 // Ctrl-C is pressed during error reporting, likely because the error 2102 // handler fails to abort. Let VM die immediately. 2103 os::die(); 2104 } 2105 2106 os::signal_raise(SIGINT); 2107 return TRUE; 2108 break; 2109 case CTRL_BREAK_EVENT: 2110 if (sigbreakHandler != NULL) { 2111 (*sigbreakHandler)(SIGBREAK); 2112 } 2113 return TRUE; 2114 break; 2115 case CTRL_LOGOFF_EVENT: { 2116 // Don't terminate JVM if it is running in a non-interactive session, 2117 // such as a service process. 2118 USEROBJECTFLAGS flags; 2119 HANDLE handle = GetProcessWindowStation(); 2120 if (handle != NULL && 2121 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 2122 sizeof( USEROBJECTFLAGS), NULL)) { 2123 // If it is a non-interactive session, let next handler to deal 2124 // with it. 2125 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 2126 return FALSE; 2127 } 2128 } 2129 } 2130 case CTRL_CLOSE_EVENT: 2131 case CTRL_SHUTDOWN_EVENT: 2132 os::signal_raise(SIGTERM); 2133 return TRUE; 2134 break; 2135 default: 2136 break; 2137 } 2138 return FALSE; 2139 } 2140 2141 /* 2142 * The following code is moved from os.cpp for making this 2143 * code platform specific, which it is by its very nature. 2144 */ 2145 2146 // Return maximum OS signal used + 1 for internal use only 2147 // Used as exit signal for signal_thread 2148 int os::sigexitnum_pd(){ 2149 return NSIG; 2150 } 2151 2152 // a counter for each possible signal value, including signal_thread exit signal 2153 static volatile jint pending_signals[NSIG+1] = { 0 }; 2154 static HANDLE sig_sem = NULL; 2155 2156 void os::signal_init_pd() { 2157 // Initialize signal structures 2158 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2159 2160 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2161 2162 // Programs embedding the VM do not want it to attempt to receive 2163 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2164 // shutdown hooks mechanism introduced in 1.3. For example, when 2165 // the VM is run as part of a Windows NT service (i.e., a servlet 2166 // engine in a web server), the correct behavior is for any console 2167 // control handler to return FALSE, not TRUE, because the OS's 2168 // "final" handler for such events allows the process to continue if 2169 // it is a service (while terminating it if it is not a service). 2170 // To make this behavior uniform and the mechanism simpler, we 2171 // completely disable the VM's usage of these console events if -Xrs 2172 // (=ReduceSignalUsage) is specified. This means, for example, that 2173 // the CTRL-BREAK thread dump mechanism is also disabled in this 2174 // case. See bugs 4323062, 4345157, and related bugs. 2175 2176 if (!ReduceSignalUsage) { 2177 // Add a CTRL-C handler 2178 SetConsoleCtrlHandler(consoleHandler, TRUE); 2179 } 2180 } 2181 2182 void os::signal_notify(int signal_number) { 2183 BOOL ret; 2184 if (sig_sem != NULL) { 2185 Atomic::inc(&pending_signals[signal_number]); 2186 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2187 assert(ret != 0, "ReleaseSemaphore() failed"); 2188 } 2189 } 2190 2191 static int check_pending_signals(bool wait_for_signal) { 2192 DWORD ret; 2193 while (true) { 2194 for (int i = 0; i < NSIG + 1; i++) { 2195 jint n = pending_signals[i]; 2196 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2197 return i; 2198 } 2199 } 2200 if (!wait_for_signal) { 2201 return -1; 2202 } 2203 2204 JavaThread *thread = JavaThread::current(); 2205 2206 ThreadBlockInVM tbivm(thread); 2207 2208 bool threadIsSuspended; 2209 do { 2210 thread->set_suspend_equivalent(); 2211 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2212 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2213 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2214 2215 // were we externally suspended while we were waiting? 2216 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2217 if (threadIsSuspended) { 2218 // 2219 // The semaphore has been incremented, but while we were waiting 2220 // another thread suspended us. We don't want to continue running 2221 // while suspended because that would surprise the thread that 2222 // suspended us. 2223 // 2224 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2225 assert(ret != 0, "ReleaseSemaphore() failed"); 2226 2227 thread->java_suspend_self(); 2228 } 2229 } while (threadIsSuspended); 2230 } 2231 } 2232 2233 int os::signal_lookup() { 2234 return check_pending_signals(false); 2235 } 2236 2237 int os::signal_wait() { 2238 return check_pending_signals(true); 2239 } 2240 2241 // Implicit OS exception handling 2242 2243 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2244 JavaThread* thread = JavaThread::current(); 2245 // Save pc in thread 2246 #ifdef _M_IA64 2247 // Do not blow up if no thread info available. 2248 if (thread) { 2249 // Saving PRECISE pc (with slot information) in thread. 2250 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2251 // Convert precise PC into "Unix" format 2252 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2253 thread->set_saved_exception_pc((address)precise_pc); 2254 } 2255 // Set pc to handler 2256 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2257 // Clear out psr.ri (= Restart Instruction) in order to continue 2258 // at the beginning of the target bundle. 2259 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2260 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2261 #else 2262 #ifdef _M_AMD64 2263 // Do not blow up if no thread info available. 2264 if (thread) { 2265 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2266 } 2267 // Set pc to handler 2268 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2269 #else 2270 // Do not blow up if no thread info available. 2271 if (thread) { 2272 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2273 } 2274 // Set pc to handler 2275 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2276 #endif 2277 #endif 2278 2279 // Continue the execution 2280 return EXCEPTION_CONTINUE_EXECUTION; 2281 } 2282 2283 2284 // Used for PostMortemDump 2285 extern "C" void safepoints(); 2286 extern "C" void find(int x); 2287 extern "C" void events(); 2288 2289 // According to Windows API documentation, an illegal instruction sequence should generate 2290 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2291 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2292 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2293 2294 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2295 2296 // From "Execution Protection in the Windows Operating System" draft 0.35 2297 // Once a system header becomes available, the "real" define should be 2298 // included or copied here. 2299 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2300 2301 // Handle NAT Bit consumption on IA64. 2302 #ifdef _M_IA64 2303 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2304 #endif 2305 2306 // Windows Vista/2008 heap corruption check 2307 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2308 2309 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2310 // C++ compiler contain this error code. Because this is a compiler-generated 2311 // error, the code is not listed in the Win32 API header files. 2312 // The code is actually a cryptic mnemonic device, with the initial "E" 2313 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2314 // ASCII values of "msc". 2315 2316 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2317 2318 #define def_excpt(val) { #val, (val) } 2319 2320 static const struct { char* name; uint number; } exceptlabels[] = { 2321 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2322 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2323 def_excpt(EXCEPTION_BREAKPOINT), 2324 def_excpt(EXCEPTION_SINGLE_STEP), 2325 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2326 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2327 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2328 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2329 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2330 def_excpt(EXCEPTION_FLT_OVERFLOW), 2331 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2332 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2333 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2334 def_excpt(EXCEPTION_INT_OVERFLOW), 2335 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2336 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2337 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2338 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2339 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2340 def_excpt(EXCEPTION_STACK_OVERFLOW), 2341 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2342 def_excpt(EXCEPTION_GUARD_PAGE), 2343 def_excpt(EXCEPTION_INVALID_HANDLE), 2344 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2345 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2346 #ifdef _M_IA64 2347 , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION) 2348 #endif 2349 }; 2350 2351 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2352 uint code = static_cast<uint>(exception_code); 2353 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2354 if (exceptlabels[i].number == code) { 2355 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2356 return buf; 2357 } 2358 } 2359 2360 return NULL; 2361 } 2362 2363 //----------------------------------------------------------------------------- 2364 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2365 // handle exception caused by idiv; should only happen for -MinInt/-1 2366 // (division by zero is handled explicitly) 2367 #ifdef _M_IA64 2368 assert(0, "Fix Handle_IDiv_Exception"); 2369 #else 2370 #ifdef _M_AMD64 2371 PCONTEXT ctx = exceptionInfo->ContextRecord; 2372 address pc = (address)ctx->Rip; 2373 assert(pc[0] == 0xF7, "not an idiv opcode"); 2374 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2375 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2376 // set correct result values and continue after idiv instruction 2377 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2378 ctx->Rax = (DWORD64)min_jint; // result 2379 ctx->Rdx = (DWORD64)0; // remainder 2380 // Continue the execution 2381 #else 2382 PCONTEXT ctx = exceptionInfo->ContextRecord; 2383 address pc = (address)ctx->Eip; 2384 assert(pc[0] == 0xF7, "not an idiv opcode"); 2385 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2386 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2387 // set correct result values and continue after idiv instruction 2388 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2389 ctx->Eax = (DWORD)min_jint; // result 2390 ctx->Edx = (DWORD)0; // remainder 2391 // Continue the execution 2392 #endif 2393 #endif 2394 return EXCEPTION_CONTINUE_EXECUTION; 2395 } 2396 2397 #ifndef _WIN64 2398 //----------------------------------------------------------------------------- 2399 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2400 // handle exception caused by native method modifying control word 2401 PCONTEXT ctx = exceptionInfo->ContextRecord; 2402 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2403 2404 switch (exception_code) { 2405 case EXCEPTION_FLT_DENORMAL_OPERAND: 2406 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2407 case EXCEPTION_FLT_INEXACT_RESULT: 2408 case EXCEPTION_FLT_INVALID_OPERATION: 2409 case EXCEPTION_FLT_OVERFLOW: 2410 case EXCEPTION_FLT_STACK_CHECK: 2411 case EXCEPTION_FLT_UNDERFLOW: 2412 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2413 if (fp_control_word != ctx->FloatSave.ControlWord) { 2414 // Restore FPCW and mask out FLT exceptions 2415 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2416 // Mask out pending FLT exceptions 2417 ctx->FloatSave.StatusWord &= 0xffffff00; 2418 return EXCEPTION_CONTINUE_EXECUTION; 2419 } 2420 } 2421 2422 if (prev_uef_handler != NULL) { 2423 // We didn't handle this exception so pass it to the previous 2424 // UnhandledExceptionFilter. 2425 return (prev_uef_handler)(exceptionInfo); 2426 } 2427 2428 return EXCEPTION_CONTINUE_SEARCH; 2429 } 2430 #else //_WIN64 2431 /* 2432 On Windows, the mxcsr control bits are non-volatile across calls 2433 See also CR 6192333 2434 If EXCEPTION_FLT_* happened after some native method modified 2435 mxcsr - it is not a jvm fault. 2436 However should we decide to restore of mxcsr after a faulty 2437 native method we can uncomment following code 2438 jint MxCsr = INITIAL_MXCSR; 2439 // we can't use StubRoutines::addr_mxcsr_std() 2440 // because in Win64 mxcsr is not saved there 2441 if (MxCsr != ctx->MxCsr) { 2442 ctx->MxCsr = MxCsr; 2443 return EXCEPTION_CONTINUE_EXECUTION; 2444 } 2445 2446 */ 2447 #endif // _WIN64 2448 2449 2450 static inline void report_error(Thread* t, DWORD exception_code, 2451 address addr, void* siginfo, void* context) { 2452 VMError err(t, exception_code, addr, siginfo, context); 2453 err.report_and_die(); 2454 2455 // If UseOsErrorReporting, this will return here and save the error file 2456 // somewhere where we can find it in the minidump. 2457 } 2458 2459 //----------------------------------------------------------------------------- 2460 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2461 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2462 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2463 #ifdef _M_IA64 2464 // On Itanium, we need the "precise pc", which has the slot number coded 2465 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2466 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2467 // Convert the pc to "Unix format", which has the slot number coded 2468 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2469 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2470 // information is saved in the Unix format. 2471 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2472 #else 2473 #ifdef _M_AMD64 2474 address pc = (address) exceptionInfo->ContextRecord->Rip; 2475 #else 2476 address pc = (address) exceptionInfo->ContextRecord->Eip; 2477 #endif 2478 #endif 2479 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2480 2481 // Handle SafeFetch32 and SafeFetchN exceptions. 2482 if (StubRoutines::is_safefetch_fault(pc)) { 2483 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2484 } 2485 2486 #ifndef _WIN64 2487 // Execution protection violation - win32 running on AMD64 only 2488 // Handled first to avoid misdiagnosis as a "normal" access violation; 2489 // This is safe to do because we have a new/unique ExceptionInformation 2490 // code for this condition. 2491 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2492 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2493 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2494 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2495 2496 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2497 int page_size = os::vm_page_size(); 2498 2499 // Make sure the pc and the faulting address are sane. 2500 // 2501 // If an instruction spans a page boundary, and the page containing 2502 // the beginning of the instruction is executable but the following 2503 // page is not, the pc and the faulting address might be slightly 2504 // different - we still want to unguard the 2nd page in this case. 2505 // 2506 // 15 bytes seems to be a (very) safe value for max instruction size. 2507 bool pc_is_near_addr = 2508 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2509 bool instr_spans_page_boundary = 2510 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2511 (intptr_t) page_size) > 0); 2512 2513 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2514 static volatile address last_addr = 2515 (address) os::non_memory_address_word(); 2516 2517 // In conservative mode, don't unguard unless the address is in the VM 2518 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2519 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2520 2521 // Set memory to RWX and retry 2522 address page_start = 2523 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2524 bool res = os::protect_memory((char*) page_start, page_size, 2525 os::MEM_PROT_RWX); 2526 2527 if (PrintMiscellaneous && Verbose) { 2528 char buf[256]; 2529 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2530 "at " INTPTR_FORMAT 2531 ", unguarding " INTPTR_FORMAT ": %s", addr, 2532 page_start, (res ? "success" : strerror(errno))); 2533 tty->print_raw_cr(buf); 2534 } 2535 2536 // Set last_addr so if we fault again at the same address, we don't 2537 // end up in an endless loop. 2538 // 2539 // There are two potential complications here. Two threads trapping 2540 // at the same address at the same time could cause one of the 2541 // threads to think it already unguarded, and abort the VM. Likely 2542 // very rare. 2543 // 2544 // The other race involves two threads alternately trapping at 2545 // different addresses and failing to unguard the page, resulting in 2546 // an endless loop. This condition is probably even more unlikely 2547 // than the first. 2548 // 2549 // Although both cases could be avoided by using locks or thread 2550 // local last_addr, these solutions are unnecessary complication: 2551 // this handler is a best-effort safety net, not a complete solution. 2552 // It is disabled by default and should only be used as a workaround 2553 // in case we missed any no-execute-unsafe VM code. 2554 2555 last_addr = addr; 2556 2557 return EXCEPTION_CONTINUE_EXECUTION; 2558 } 2559 } 2560 2561 // Last unguard failed or not unguarding 2562 tty->print_raw_cr("Execution protection violation"); 2563 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2564 exceptionInfo->ContextRecord); 2565 return EXCEPTION_CONTINUE_SEARCH; 2566 } 2567 } 2568 #endif // _WIN64 2569 2570 // Check to see if we caught the safepoint code in the 2571 // process of write protecting the memory serialization page. 2572 // It write enables the page immediately after protecting it 2573 // so just return. 2574 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 2575 JavaThread* thread = (JavaThread*) t; 2576 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2577 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2578 if ( os::is_memory_serialize_page(thread, addr) ) { 2579 // Block current thread until the memory serialize page permission restored. 2580 os::block_on_serialize_page_trap(); 2581 return EXCEPTION_CONTINUE_EXECUTION; 2582 } 2583 } 2584 2585 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2586 VM_Version::is_cpuinfo_segv_addr(pc)) { 2587 // Verify that OS save/restore AVX registers. 2588 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2589 } 2590 2591 if (t != NULL && t->is_Java_thread()) { 2592 JavaThread* thread = (JavaThread*) t; 2593 bool in_java = thread->thread_state() == _thread_in_Java; 2594 2595 // Handle potential stack overflows up front. 2596 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2597 if (os::uses_stack_guard_pages()) { 2598 #ifdef _M_IA64 2599 // Use guard page for register stack. 2600 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2601 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2602 // Check for a register stack overflow on Itanium 2603 if (thread->addr_inside_register_stack_red_zone(addr)) { 2604 // Fatal red zone violation happens if the Java program 2605 // catches a StackOverflow error and does so much processing 2606 // that it runs beyond the unprotected yellow guard zone. As 2607 // a result, we are out of here. 2608 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2609 } else if(thread->addr_inside_register_stack(addr)) { 2610 // Disable the yellow zone which sets the state that 2611 // we've got a stack overflow problem. 2612 if (thread->stack_yellow_zone_enabled()) { 2613 thread->disable_stack_yellow_zone(); 2614 } 2615 // Give us some room to process the exception. 2616 thread->disable_register_stack_guard(); 2617 // Tracing with +Verbose. 2618 if (Verbose) { 2619 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2620 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2621 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2622 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2623 thread->register_stack_base(), 2624 thread->register_stack_base() + thread->stack_size()); 2625 } 2626 2627 // Reguard the permanent register stack red zone just to be sure. 2628 // We saw Windows silently disabling this without telling us. 2629 thread->enable_register_stack_red_zone(); 2630 2631 return Handle_Exception(exceptionInfo, 2632 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2633 } 2634 #endif 2635 if (thread->stack_yellow_zone_enabled()) { 2636 // Yellow zone violation. The o/s has unprotected the first yellow 2637 // zone page for us. Note: must call disable_stack_yellow_zone to 2638 // update the enabled status, even if the zone contains only one page. 2639 thread->disable_stack_yellow_zone(); 2640 // If not in java code, return and hope for the best. 2641 return in_java ? Handle_Exception(exceptionInfo, 2642 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2643 : EXCEPTION_CONTINUE_EXECUTION; 2644 } else { 2645 // Fatal red zone violation. 2646 thread->disable_stack_red_zone(); 2647 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2648 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2649 exceptionInfo->ContextRecord); 2650 return EXCEPTION_CONTINUE_SEARCH; 2651 } 2652 } else if (in_java) { 2653 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2654 // a one-time-only guard page, which it has released to us. The next 2655 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2656 return Handle_Exception(exceptionInfo, 2657 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2658 } else { 2659 // Can only return and hope for the best. Further stack growth will 2660 // result in an ACCESS_VIOLATION. 2661 return EXCEPTION_CONTINUE_EXECUTION; 2662 } 2663 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2664 // Either stack overflow or null pointer exception. 2665 if (in_java) { 2666 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2667 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2668 address stack_end = thread->stack_base() - thread->stack_size(); 2669 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2670 // Stack overflow. 2671 assert(!os::uses_stack_guard_pages(), 2672 "should be caught by red zone code above."); 2673 return Handle_Exception(exceptionInfo, 2674 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2675 } 2676 // 2677 // Check for safepoint polling and implicit null 2678 // We only expect null pointers in the stubs (vtable) 2679 // the rest are checked explicitly now. 2680 // 2681 CodeBlob* cb = CodeCache::find_blob(pc); 2682 if (cb != NULL) { 2683 if (os::is_poll_address(addr)) { 2684 address stub = SharedRuntime::get_poll_stub(pc); 2685 return Handle_Exception(exceptionInfo, stub); 2686 } 2687 } 2688 { 2689 #ifdef _WIN64 2690 // 2691 // If it's a legal stack address map the entire region in 2692 // 2693 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2694 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2695 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2696 addr = (address)((uintptr_t)addr & 2697 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2698 os::commit_memory((char *)addr, thread->stack_base() - addr, 2699 !ExecMem); 2700 return EXCEPTION_CONTINUE_EXECUTION; 2701 } 2702 else 2703 #endif 2704 { 2705 // Null pointer exception. 2706 #ifdef _M_IA64 2707 // Process implicit null checks in compiled code. Note: Implicit null checks 2708 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2709 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2710 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2711 // Handle implicit null check in UEP method entry 2712 if (cb && (cb->is_frame_complete_at(pc) || 2713 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2714 if (Verbose) { 2715 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2716 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2717 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2718 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2719 *(bundle_start + 1), *bundle_start); 2720 } 2721 return Handle_Exception(exceptionInfo, 2722 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2723 } 2724 } 2725 2726 // Implicit null checks were processed above. Hence, we should not reach 2727 // here in the usual case => die! 2728 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2729 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2730 exceptionInfo->ContextRecord); 2731 return EXCEPTION_CONTINUE_SEARCH; 2732 2733 #else // !IA64 2734 2735 // Windows 98 reports faulting addresses incorrectly 2736 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2737 !os::win32::is_nt()) { 2738 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2739 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2740 } 2741 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2742 exceptionInfo->ContextRecord); 2743 return EXCEPTION_CONTINUE_SEARCH; 2744 #endif 2745 } 2746 } 2747 } 2748 2749 #ifdef _WIN64 2750 // Special care for fast JNI field accessors. 2751 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2752 // in and the heap gets shrunk before the field access. 2753 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2754 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2755 if (addr != (address)-1) { 2756 return Handle_Exception(exceptionInfo, addr); 2757 } 2758 } 2759 #endif 2760 2761 // Stack overflow or null pointer exception in native code. 2762 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2763 exceptionInfo->ContextRecord); 2764 return EXCEPTION_CONTINUE_SEARCH; 2765 } // /EXCEPTION_ACCESS_VIOLATION 2766 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2767 #if defined _M_IA64 2768 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2769 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2770 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2771 2772 // Compiled method patched to be non entrant? Following conditions must apply: 2773 // 1. must be first instruction in bundle 2774 // 2. must be a break instruction with appropriate code 2775 if((((uint64_t) pc & 0x0F) == 0) && 2776 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2777 return Handle_Exception(exceptionInfo, 2778 (address)SharedRuntime::get_handle_wrong_method_stub()); 2779 } 2780 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2781 #endif 2782 2783 2784 if (in_java) { 2785 switch (exception_code) { 2786 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2787 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2788 2789 case EXCEPTION_INT_OVERFLOW: 2790 return Handle_IDiv_Exception(exceptionInfo); 2791 2792 } // switch 2793 } 2794 #ifndef _WIN64 2795 if (((thread->thread_state() == _thread_in_Java) || 2796 (thread->thread_state() == _thread_in_native)) && 2797 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2798 { 2799 LONG result=Handle_FLT_Exception(exceptionInfo); 2800 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2801 } 2802 #endif //_WIN64 2803 } 2804 2805 if (exception_code != EXCEPTION_BREAKPOINT) { 2806 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2807 exceptionInfo->ContextRecord); 2808 } 2809 return EXCEPTION_CONTINUE_SEARCH; 2810 } 2811 2812 #ifndef _WIN64 2813 // Special care for fast JNI accessors. 2814 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2815 // the heap gets shrunk before the field access. 2816 // Need to install our own structured exception handler since native code may 2817 // install its own. 2818 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2819 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2820 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2821 address pc = (address) exceptionInfo->ContextRecord->Eip; 2822 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2823 if (addr != (address)-1) { 2824 return Handle_Exception(exceptionInfo, addr); 2825 } 2826 } 2827 return EXCEPTION_CONTINUE_SEARCH; 2828 } 2829 2830 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2831 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2832 __try { \ 2833 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2834 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2835 } \ 2836 return 0; \ 2837 } 2838 2839 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2840 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2841 DEFINE_FAST_GETFIELD(jchar, char, Char) 2842 DEFINE_FAST_GETFIELD(jshort, short, Short) 2843 DEFINE_FAST_GETFIELD(jint, int, Int) 2844 DEFINE_FAST_GETFIELD(jlong, long, Long) 2845 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2846 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2847 2848 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2849 switch (type) { 2850 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2851 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2852 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2853 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2854 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2855 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2856 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2857 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2858 default: ShouldNotReachHere(); 2859 } 2860 return (address)-1; 2861 } 2862 #endif 2863 2864 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2865 // Install a win32 structured exception handler around the test 2866 // function call so the VM can generate an error dump if needed. 2867 __try { 2868 (*funcPtr)(); 2869 } __except(topLevelExceptionFilter( 2870 (_EXCEPTION_POINTERS*)_exception_info())) { 2871 // Nothing to do. 2872 } 2873 } 2874 2875 // Virtual Memory 2876 2877 int os::vm_page_size() { return os::win32::vm_page_size(); } 2878 int os::vm_allocation_granularity() { 2879 return os::win32::vm_allocation_granularity(); 2880 } 2881 2882 // Windows large page support is available on Windows 2003. In order to use 2883 // large page memory, the administrator must first assign additional privilege 2884 // to the user: 2885 // + select Control Panel -> Administrative Tools -> Local Security Policy 2886 // + select Local Policies -> User Rights Assignment 2887 // + double click "Lock pages in memory", add users and/or groups 2888 // + reboot 2889 // Note the above steps are needed for administrator as well, as administrators 2890 // by default do not have the privilege to lock pages in memory. 2891 // 2892 // Note about Windows 2003: although the API supports committing large page 2893 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2894 // scenario, I found through experiment it only uses large page if the entire 2895 // memory region is reserved and committed in a single VirtualAlloc() call. 2896 // This makes Windows large page support more or less like Solaris ISM, in 2897 // that the entire heap must be committed upfront. This probably will change 2898 // in the future, if so the code below needs to be revisited. 2899 2900 #ifndef MEM_LARGE_PAGES 2901 #define MEM_LARGE_PAGES 0x20000000 2902 #endif 2903 2904 static HANDLE _hProcess; 2905 static HANDLE _hToken; 2906 2907 // Container for NUMA node list info 2908 class NUMANodeListHolder { 2909 private: 2910 int *_numa_used_node_list; // allocated below 2911 int _numa_used_node_count; 2912 2913 void free_node_list() { 2914 if (_numa_used_node_list != NULL) { 2915 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2916 } 2917 } 2918 2919 public: 2920 NUMANodeListHolder() { 2921 _numa_used_node_count = 0; 2922 _numa_used_node_list = NULL; 2923 // do rest of initialization in build routine (after function pointers are set up) 2924 } 2925 2926 ~NUMANodeListHolder() { 2927 free_node_list(); 2928 } 2929 2930 bool build() { 2931 DWORD_PTR proc_aff_mask; 2932 DWORD_PTR sys_aff_mask; 2933 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2934 ULONG highest_node_number; 2935 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2936 free_node_list(); 2937 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2938 for (unsigned int i = 0; i <= highest_node_number; i++) { 2939 ULONGLONG proc_mask_numa_node; 2940 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2941 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2942 _numa_used_node_list[_numa_used_node_count++] = i; 2943 } 2944 } 2945 return (_numa_used_node_count > 1); 2946 } 2947 2948 int get_count() {return _numa_used_node_count;} 2949 int get_node_list_entry(int n) { 2950 // for indexes out of range, returns -1 2951 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2952 } 2953 2954 } numa_node_list_holder; 2955 2956 2957 2958 static size_t _large_page_size = 0; 2959 2960 static bool resolve_functions_for_large_page_init() { 2961 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2962 os::Advapi32Dll::AdvapiAvailable(); 2963 } 2964 2965 static bool request_lock_memory_privilege() { 2966 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2967 os::current_process_id()); 2968 2969 LUID luid; 2970 if (_hProcess != NULL && 2971 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2972 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2973 2974 TOKEN_PRIVILEGES tp; 2975 tp.PrivilegeCount = 1; 2976 tp.Privileges[0].Luid = luid; 2977 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2978 2979 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2980 // privilege. Check GetLastError() too. See MSDN document. 2981 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2982 (GetLastError() == ERROR_SUCCESS)) { 2983 return true; 2984 } 2985 } 2986 2987 return false; 2988 } 2989 2990 static void cleanup_after_large_page_init() { 2991 if (_hProcess) CloseHandle(_hProcess); 2992 _hProcess = NULL; 2993 if (_hToken) CloseHandle(_hToken); 2994 _hToken = NULL; 2995 } 2996 2997 static bool numa_interleaving_init() { 2998 bool success = false; 2999 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 3000 3001 // print a warning if UseNUMAInterleaving flag is specified on command line 3002 bool warn_on_failure = use_numa_interleaving_specified; 3003 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3004 3005 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 3006 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 3007 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 3008 3009 if (os::Kernel32Dll::NumaCallsAvailable()) { 3010 if (numa_node_list_holder.build()) { 3011 if (PrintMiscellaneous && Verbose) { 3012 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 3013 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 3014 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 3015 } 3016 tty->print("\n"); 3017 } 3018 success = true; 3019 } else { 3020 WARN("Process does not cover multiple NUMA nodes."); 3021 } 3022 } else { 3023 WARN("NUMA Interleaving is not supported by the operating system."); 3024 } 3025 if (!success) { 3026 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 3027 } 3028 return success; 3029 #undef WARN 3030 } 3031 3032 // this routine is used whenever we need to reserve a contiguous VA range 3033 // but we need to make separate VirtualAlloc calls for each piece of the range 3034 // Reasons for doing this: 3035 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 3036 // * UseNUMAInterleaving requires a separate node for each piece 3037 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 3038 bool should_inject_error=false) { 3039 char * p_buf; 3040 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 3041 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 3042 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 3043 3044 // first reserve enough address space in advance since we want to be 3045 // able to break a single contiguous virtual address range into multiple 3046 // large page commits but WS2003 does not allow reserving large page space 3047 // so we just use 4K pages for reserve, this gives us a legal contiguous 3048 // address space. then we will deallocate that reservation, and re alloc 3049 // using large pages 3050 const size_t size_of_reserve = bytes + chunk_size; 3051 if (bytes > size_of_reserve) { 3052 // Overflowed. 3053 return NULL; 3054 } 3055 p_buf = (char *) VirtualAlloc(addr, 3056 size_of_reserve, // size of Reserve 3057 MEM_RESERVE, 3058 PAGE_READWRITE); 3059 // If reservation failed, return NULL 3060 if (p_buf == NULL) return NULL; 3061 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 3062 os::release_memory(p_buf, bytes + chunk_size); 3063 3064 // we still need to round up to a page boundary (in case we are using large pages) 3065 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 3066 // instead we handle this in the bytes_to_rq computation below 3067 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 3068 3069 // now go through and allocate one chunk at a time until all bytes are 3070 // allocated 3071 size_t bytes_remaining = bytes; 3072 // An overflow of align_size_up() would have been caught above 3073 // in the calculation of size_of_reserve. 3074 char * next_alloc_addr = p_buf; 3075 HANDLE hProc = GetCurrentProcess(); 3076 3077 #ifdef ASSERT 3078 // Variable for the failure injection 3079 long ran_num = os::random(); 3080 size_t fail_after = ran_num % bytes; 3081 #endif 3082 3083 int count=0; 3084 while (bytes_remaining) { 3085 // select bytes_to_rq to get to the next chunk_size boundary 3086 3087 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 3088 // Note allocate and commit 3089 char * p_new; 3090 3091 #ifdef ASSERT 3092 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 3093 #else 3094 const bool inject_error_now = false; 3095 #endif 3096 3097 if (inject_error_now) { 3098 p_new = NULL; 3099 } else { 3100 if (!UseNUMAInterleaving) { 3101 p_new = (char *) VirtualAlloc(next_alloc_addr, 3102 bytes_to_rq, 3103 flags, 3104 prot); 3105 } else { 3106 // get the next node to use from the used_node_list 3107 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 3108 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 3109 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 3110 next_alloc_addr, 3111 bytes_to_rq, 3112 flags, 3113 prot, 3114 node); 3115 } 3116 } 3117 3118 if (p_new == NULL) { 3119 // Free any allocated pages 3120 if (next_alloc_addr > p_buf) { 3121 // Some memory was committed so release it. 3122 size_t bytes_to_release = bytes - bytes_remaining; 3123 // NMT has yet to record any individual blocks, so it 3124 // need to create a dummy 'reserve' record to match 3125 // the release. 3126 MemTracker::record_virtual_memory_reserve((address)p_buf, 3127 bytes_to_release, CALLER_PC); 3128 os::release_memory(p_buf, bytes_to_release); 3129 } 3130 #ifdef ASSERT 3131 if (should_inject_error) { 3132 if (TracePageSizes && Verbose) { 3133 tty->print_cr("Reserving pages individually failed."); 3134 } 3135 } 3136 #endif 3137 return NULL; 3138 } 3139 3140 bytes_remaining -= bytes_to_rq; 3141 next_alloc_addr += bytes_to_rq; 3142 count++; 3143 } 3144 // Although the memory is allocated individually, it is returned as one. 3145 // NMT records it as one block. 3146 if ((flags & MEM_COMMIT) != 0) { 3147 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3148 } else { 3149 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3150 } 3151 3152 // made it this far, success 3153 return p_buf; 3154 } 3155 3156 3157 3158 void os::large_page_init() { 3159 if (!UseLargePages) return; 3160 3161 // print a warning if any large page related flag is specified on command line 3162 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3163 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3164 bool success = false; 3165 3166 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3167 if (resolve_functions_for_large_page_init()) { 3168 if (request_lock_memory_privilege()) { 3169 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3170 if (s) { 3171 #if defined(IA32) || defined(AMD64) 3172 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3173 WARN("JVM cannot use large pages bigger than 4mb."); 3174 } else { 3175 #endif 3176 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3177 _large_page_size = LargePageSizeInBytes; 3178 } else { 3179 _large_page_size = s; 3180 } 3181 success = true; 3182 #if defined(IA32) || defined(AMD64) 3183 } 3184 #endif 3185 } else { 3186 WARN("Large page is not supported by the processor."); 3187 } 3188 } else { 3189 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3190 } 3191 } else { 3192 WARN("Large page is not supported by the operating system."); 3193 } 3194 #undef WARN 3195 3196 const size_t default_page_size = (size_t) vm_page_size(); 3197 if (success && _large_page_size > default_page_size) { 3198 _page_sizes[0] = _large_page_size; 3199 _page_sizes[1] = default_page_size; 3200 _page_sizes[2] = 0; 3201 } 3202 3203 cleanup_after_large_page_init(); 3204 UseLargePages = success; 3205 } 3206 3207 // On win32, one cannot release just a part of reserved memory, it's an 3208 // all or nothing deal. When we split a reservation, we must break the 3209 // reservation into two reservations. 3210 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3211 bool realloc) { 3212 if (size > 0) { 3213 release_memory(base, size); 3214 if (realloc) { 3215 reserve_memory(split, base); 3216 } 3217 if (size != split) { 3218 reserve_memory(size - split, base + split); 3219 } 3220 } 3221 } 3222 3223 // Multiple threads can race in this code but it's not possible to unmap small sections of 3224 // virtual space to get requested alignment, like posix-like os's. 3225 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3226 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3227 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3228 "Alignment must be a multiple of allocation granularity (page size)"); 3229 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3230 3231 size_t extra_size = size + alignment; 3232 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3233 3234 char* aligned_base = NULL; 3235 3236 do { 3237 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3238 if (extra_base == NULL) { 3239 return NULL; 3240 } 3241 // Do manual alignment 3242 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3243 3244 os::release_memory(extra_base, extra_size); 3245 3246 aligned_base = os::reserve_memory(size, aligned_base); 3247 3248 } while (aligned_base == NULL); 3249 3250 return aligned_base; 3251 } 3252 3253 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3254 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3255 "reserve alignment"); 3256 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3257 char* res; 3258 // note that if UseLargePages is on, all the areas that require interleaving 3259 // will go thru reserve_memory_special rather than thru here. 3260 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3261 if (!use_individual) { 3262 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3263 } else { 3264 elapsedTimer reserveTimer; 3265 if( Verbose && PrintMiscellaneous ) reserveTimer.start(); 3266 // in numa interleaving, we have to allocate pages individually 3267 // (well really chunks of NUMAInterleaveGranularity size) 3268 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3269 if (res == NULL) { 3270 warning("NUMA page allocation failed"); 3271 } 3272 if( Verbose && PrintMiscellaneous ) { 3273 reserveTimer.stop(); 3274 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3275 reserveTimer.milliseconds(), reserveTimer.ticks()); 3276 } 3277 } 3278 assert(res == NULL || addr == NULL || addr == res, 3279 "Unexpected address from reserve."); 3280 3281 return res; 3282 } 3283 3284 // Reserve memory at an arbitrary address, only if that area is 3285 // available (and not reserved for something else). 3286 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3287 // Windows os::reserve_memory() fails of the requested address range is 3288 // not avilable. 3289 return reserve_memory(bytes, requested_addr); 3290 } 3291 3292 size_t os::large_page_size() { 3293 return _large_page_size; 3294 } 3295 3296 bool os::can_commit_large_page_memory() { 3297 // Windows only uses large page memory when the entire region is reserved 3298 // and committed in a single VirtualAlloc() call. This may change in the 3299 // future, but with Windows 2003 it's not possible to commit on demand. 3300 return false; 3301 } 3302 3303 bool os::can_execute_large_page_memory() { 3304 return true; 3305 } 3306 3307 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3308 assert(UseLargePages, "only for large pages"); 3309 3310 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3311 return NULL; // Fallback to small pages. 3312 } 3313 3314 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3315 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3316 3317 // with large pages, there are two cases where we need to use Individual Allocation 3318 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3319 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3320 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3321 if (TracePageSizes && Verbose) { 3322 tty->print_cr("Reserving large pages individually."); 3323 } 3324 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3325 if (p_buf == NULL) { 3326 // give an appropriate warning message 3327 if (UseNUMAInterleaving) { 3328 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3329 } 3330 if (UseLargePagesIndividualAllocation) { 3331 warning("Individually allocated large pages failed, " 3332 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3333 } 3334 return NULL; 3335 } 3336 3337 return p_buf; 3338 3339 } else { 3340 if (TracePageSizes && Verbose) { 3341 tty->print_cr("Reserving large pages in a single large chunk."); 3342 } 3343 // normal policy just allocate it all at once 3344 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3345 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3346 if (res != NULL) { 3347 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3348 } 3349 3350 return res; 3351 } 3352 } 3353 3354 bool os::release_memory_special(char* base, size_t bytes) { 3355 assert(base != NULL, "Sanity check"); 3356 return release_memory(base, bytes); 3357 } 3358 3359 void os::print_statistics() { 3360 } 3361 3362 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3363 int err = os::get_last_error(); 3364 char buf[256]; 3365 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3366 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3367 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3368 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3369 } 3370 3371 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3372 if (bytes == 0) { 3373 // Don't bother the OS with noops. 3374 return true; 3375 } 3376 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3377 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3378 // Don't attempt to print anything if the OS call fails. We're 3379 // probably low on resources, so the print itself may cause crashes. 3380 3381 // unless we have NUMAInterleaving enabled, the range of a commit 3382 // is always within a reserve covered by a single VirtualAlloc 3383 // in that case we can just do a single commit for the requested size 3384 if (!UseNUMAInterleaving) { 3385 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3386 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3387 return false; 3388 } 3389 if (exec) { 3390 DWORD oldprot; 3391 // Windows doc says to use VirtualProtect to get execute permissions 3392 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3393 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3394 return false; 3395 } 3396 } 3397 return true; 3398 } else { 3399 3400 // when NUMAInterleaving is enabled, the commit might cover a range that 3401 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3402 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3403 // returns represents the number of bytes that can be committed in one step. 3404 size_t bytes_remaining = bytes; 3405 char * next_alloc_addr = addr; 3406 while (bytes_remaining > 0) { 3407 MEMORY_BASIC_INFORMATION alloc_info; 3408 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3409 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3410 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3411 PAGE_READWRITE) == NULL) { 3412 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3413 exec);) 3414 return false; 3415 } 3416 if (exec) { 3417 DWORD oldprot; 3418 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3419 PAGE_EXECUTE_READWRITE, &oldprot)) { 3420 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3421 exec);) 3422 return false; 3423 } 3424 } 3425 bytes_remaining -= bytes_to_rq; 3426 next_alloc_addr += bytes_to_rq; 3427 } 3428 } 3429 // if we made it this far, return true 3430 return true; 3431 } 3432 3433 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3434 bool exec) { 3435 // alignment_hint is ignored on this OS 3436 return pd_commit_memory(addr, size, exec); 3437 } 3438 3439 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3440 const char* mesg) { 3441 assert(mesg != NULL, "mesg must be specified"); 3442 if (!pd_commit_memory(addr, size, exec)) { 3443 warn_fail_commit_memory(addr, size, exec); 3444 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3445 } 3446 } 3447 3448 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3449 size_t alignment_hint, bool exec, 3450 const char* mesg) { 3451 // alignment_hint is ignored on this OS 3452 pd_commit_memory_or_exit(addr, size, exec, mesg); 3453 } 3454 3455 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3456 if (bytes == 0) { 3457 // Don't bother the OS with noops. 3458 return true; 3459 } 3460 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3461 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3462 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3463 } 3464 3465 bool os::pd_release_memory(char* addr, size_t bytes) { 3466 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3467 } 3468 3469 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3470 return os::commit_memory(addr, size, !ExecMem); 3471 } 3472 3473 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3474 return os::uncommit_memory(addr, size); 3475 } 3476 3477 // Set protections specified 3478 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3479 bool is_committed) { 3480 unsigned int p = 0; 3481 switch (prot) { 3482 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3483 case MEM_PROT_READ: p = PAGE_READONLY; break; 3484 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3485 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3486 default: 3487 ShouldNotReachHere(); 3488 } 3489 3490 DWORD old_status; 3491 3492 // Strange enough, but on Win32 one can change protection only for committed 3493 // memory, not a big deal anyway, as bytes less or equal than 64K 3494 if (!is_committed) { 3495 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3496 "cannot commit protection page"); 3497 } 3498 // One cannot use os::guard_memory() here, as on Win32 guard page 3499 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3500 // 3501 // Pages in the region become guard pages. Any attempt to access a guard page 3502 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3503 // the guard page status. Guard pages thus act as a one-time access alarm. 3504 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3505 } 3506 3507 bool os::guard_memory(char* addr, size_t bytes) { 3508 DWORD old_status; 3509 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3510 } 3511 3512 bool os::unguard_memory(char* addr, size_t bytes) { 3513 DWORD old_status; 3514 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3515 } 3516 3517 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3518 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3519 void os::numa_make_global(char *addr, size_t bytes) { } 3520 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3521 bool os::numa_topology_changed() { return false; } 3522 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3523 int os::numa_get_group_id() { return 0; } 3524 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3525 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3526 // Provide an answer for UMA systems 3527 ids[0] = 0; 3528 return 1; 3529 } else { 3530 // check for size bigger than actual groups_num 3531 size = MIN2(size, numa_get_groups_num()); 3532 for (int i = 0; i < (int)size; i++) { 3533 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3534 } 3535 return size; 3536 } 3537 } 3538 3539 bool os::get_page_info(char *start, page_info* info) { 3540 return false; 3541 } 3542 3543 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3544 return end; 3545 } 3546 3547 char* os::non_memory_address_word() { 3548 // Must never look like an address returned by reserve_memory, 3549 // even in its subfields (as defined by the CPU immediate fields, 3550 // if the CPU splits constants across multiple instructions). 3551 return (char*)-1; 3552 } 3553 3554 #define MAX_ERROR_COUNT 100 3555 #define SYS_THREAD_ERROR 0xffffffffUL 3556 3557 void os::pd_start_thread(Thread* thread) { 3558 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3559 // Returns previous suspend state: 3560 // 0: Thread was not suspended 3561 // 1: Thread is running now 3562 // >1: Thread is still suspended. 3563 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3564 } 3565 3566 class HighResolutionInterval : public CHeapObj<mtThread> { 3567 // The default timer resolution seems to be 10 milliseconds. 3568 // (Where is this written down?) 3569 // If someone wants to sleep for only a fraction of the default, 3570 // then we set the timer resolution down to 1 millisecond for 3571 // the duration of their interval. 3572 // We carefully set the resolution back, since otherwise we 3573 // seem to incur an overhead (3%?) that we don't need. 3574 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3575 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3576 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3577 // timeBeginPeriod() if the relative error exceeded some threshold. 3578 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3579 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3580 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3581 // resolution timers running. 3582 private: 3583 jlong resolution; 3584 public: 3585 HighResolutionInterval(jlong ms) { 3586 resolution = ms % 10L; 3587 if (resolution != 0) { 3588 MMRESULT result = timeBeginPeriod(1L); 3589 } 3590 } 3591 ~HighResolutionInterval() { 3592 if (resolution != 0) { 3593 MMRESULT result = timeEndPeriod(1L); 3594 } 3595 resolution = 0L; 3596 } 3597 }; 3598 3599 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3600 jlong limit = (jlong) MAXDWORD; 3601 3602 while(ms > limit) { 3603 int res; 3604 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3605 return res; 3606 ms -= limit; 3607 } 3608 3609 assert(thread == Thread::current(), "thread consistency check"); 3610 OSThread* osthread = thread->osthread(); 3611 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3612 int result; 3613 if (interruptable) { 3614 assert(thread->is_Java_thread(), "must be java thread"); 3615 JavaThread *jt = (JavaThread *) thread; 3616 ThreadBlockInVM tbivm(jt); 3617 3618 jt->set_suspend_equivalent(); 3619 // cleared by handle_special_suspend_equivalent_condition() or 3620 // java_suspend_self() via check_and_wait_while_suspended() 3621 3622 HANDLE events[1]; 3623 events[0] = osthread->interrupt_event(); 3624 HighResolutionInterval *phri=NULL; 3625 if(!ForceTimeHighResolution) 3626 phri = new HighResolutionInterval( ms ); 3627 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3628 result = OS_TIMEOUT; 3629 } else { 3630 ResetEvent(osthread->interrupt_event()); 3631 osthread->set_interrupted(false); 3632 result = OS_INTRPT; 3633 } 3634 delete phri; //if it is NULL, harmless 3635 3636 // were we externally suspended while we were waiting? 3637 jt->check_and_wait_while_suspended(); 3638 } else { 3639 assert(!thread->is_Java_thread(), "must not be java thread"); 3640 Sleep((long) ms); 3641 result = OS_TIMEOUT; 3642 } 3643 return result; 3644 } 3645 3646 // 3647 // Short sleep, direct OS call. 3648 // 3649 // ms = 0, means allow others (if any) to run. 3650 // 3651 void os::naked_short_sleep(jlong ms) { 3652 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3653 Sleep(ms); 3654 } 3655 3656 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3657 void os::infinite_sleep() { 3658 while (true) { // sleep forever ... 3659 Sleep(100000); // ... 100 seconds at a time 3660 } 3661 } 3662 3663 typedef BOOL (WINAPI * STTSignature)(void) ; 3664 3665 os::YieldResult os::NakedYield() { 3666 // Use either SwitchToThread() or Sleep(0) 3667 // Consider passing back the return value from SwitchToThread(). 3668 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3669 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; 3670 } else { 3671 Sleep(0); 3672 } 3673 return os::YIELD_UNKNOWN ; 3674 } 3675 3676 void os::yield() { os::NakedYield(); } 3677 3678 void os::yield_all(int attempts) { 3679 // Yields to all threads, including threads with lower priorities 3680 Sleep(1); 3681 } 3682 3683 // Win32 only gives you access to seven real priorities at a time, 3684 // so we compress Java's ten down to seven. It would be better 3685 // if we dynamically adjusted relative priorities. 3686 3687 int os::java_to_os_priority[CriticalPriority + 1] = { 3688 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3689 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3690 THREAD_PRIORITY_LOWEST, // 2 3691 THREAD_PRIORITY_BELOW_NORMAL, // 3 3692 THREAD_PRIORITY_BELOW_NORMAL, // 4 3693 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3694 THREAD_PRIORITY_NORMAL, // 6 3695 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3696 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3697 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3698 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3699 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3700 }; 3701 3702 int prio_policy1[CriticalPriority + 1] = { 3703 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3704 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3705 THREAD_PRIORITY_LOWEST, // 2 3706 THREAD_PRIORITY_BELOW_NORMAL, // 3 3707 THREAD_PRIORITY_BELOW_NORMAL, // 4 3708 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3709 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3710 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3711 THREAD_PRIORITY_HIGHEST, // 8 3712 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3713 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3714 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3715 }; 3716 3717 static int prio_init() { 3718 // If ThreadPriorityPolicy is 1, switch tables 3719 if (ThreadPriorityPolicy == 1) { 3720 int i; 3721 for (i = 0; i < CriticalPriority + 1; i++) { 3722 os::java_to_os_priority[i] = prio_policy1[i]; 3723 } 3724 } 3725 if (UseCriticalJavaThreadPriority) { 3726 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ; 3727 } 3728 return 0; 3729 } 3730 3731 OSReturn os::set_native_priority(Thread* thread, int priority) { 3732 if (!UseThreadPriorities) return OS_OK; 3733 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3734 return ret ? OS_OK : OS_ERR; 3735 } 3736 3737 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3738 if ( !UseThreadPriorities ) { 3739 *priority_ptr = java_to_os_priority[NormPriority]; 3740 return OS_OK; 3741 } 3742 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3743 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3744 assert(false, "GetThreadPriority failed"); 3745 return OS_ERR; 3746 } 3747 *priority_ptr = os_prio; 3748 return OS_OK; 3749 } 3750 3751 3752 // Hint to the underlying OS that a task switch would not be good. 3753 // Void return because it's a hint and can fail. 3754 void os::hint_no_preempt() {} 3755 3756 void os::interrupt(Thread* thread) { 3757 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3758 "possibility of dangling Thread pointer"); 3759 3760 OSThread* osthread = thread->osthread(); 3761 osthread->set_interrupted(true); 3762 // More than one thread can get here with the same value of osthread, 3763 // resulting in multiple notifications. We do, however, want the store 3764 // to interrupted() to be visible to other threads before we post 3765 // the interrupt event. 3766 OrderAccess::release(); 3767 SetEvent(osthread->interrupt_event()); 3768 // For JSR166: unpark after setting status 3769 if (thread->is_Java_thread()) 3770 ((JavaThread*)thread)->parker()->unpark(); 3771 3772 ParkEvent * ev = thread->_ParkEvent ; 3773 if (ev != NULL) ev->unpark() ; 3774 3775 } 3776 3777 3778 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3779 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3780 "possibility of dangling Thread pointer"); 3781 3782 OSThread* osthread = thread->osthread(); 3783 // There is no synchronization between the setting of the interrupt 3784 // and it being cleared here. It is critical - see 6535709 - that 3785 // we only clear the interrupt state, and reset the interrupt event, 3786 // if we are going to report that we were indeed interrupted - else 3787 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3788 // depending on the timing. By checking thread interrupt event to see 3789 // if the thread gets real interrupt thus prevent spurious wakeup. 3790 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3791 if (interrupted && clear_interrupted) { 3792 osthread->set_interrupted(false); 3793 ResetEvent(osthread->interrupt_event()); 3794 } // Otherwise leave the interrupted state alone 3795 3796 return interrupted; 3797 } 3798 3799 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3800 ExtendedPC os::get_thread_pc(Thread* thread) { 3801 CONTEXT context; 3802 context.ContextFlags = CONTEXT_CONTROL; 3803 HANDLE handle = thread->osthread()->thread_handle(); 3804 #ifdef _M_IA64 3805 assert(0, "Fix get_thread_pc"); 3806 return ExtendedPC(NULL); 3807 #else 3808 if (GetThreadContext(handle, &context)) { 3809 #ifdef _M_AMD64 3810 return ExtendedPC((address) context.Rip); 3811 #else 3812 return ExtendedPC((address) context.Eip); 3813 #endif 3814 } else { 3815 return ExtendedPC(NULL); 3816 } 3817 #endif 3818 } 3819 3820 // GetCurrentThreadId() returns DWORD 3821 intx os::current_thread_id() { return GetCurrentThreadId(); } 3822 3823 static int _initial_pid = 0; 3824 3825 int os::current_process_id() 3826 { 3827 return (_initial_pid ? _initial_pid : _getpid()); 3828 } 3829 3830 int os::win32::_vm_page_size = 0; 3831 int os::win32::_vm_allocation_granularity = 0; 3832 int os::win32::_processor_type = 0; 3833 // Processor level is not available on non-NT systems, use vm_version instead 3834 int os::win32::_processor_level = 0; 3835 julong os::win32::_physical_memory = 0; 3836 size_t os::win32::_default_stack_size = 0; 3837 3838 intx os::win32::_os_thread_limit = 0; 3839 volatile intx os::win32::_os_thread_count = 0; 3840 3841 bool os::win32::_is_nt = false; 3842 bool os::win32::_is_windows_2003 = false; 3843 bool os::win32::_is_windows_server = false; 3844 3845 void os::win32::initialize_system_info() { 3846 SYSTEM_INFO si; 3847 GetSystemInfo(&si); 3848 _vm_page_size = si.dwPageSize; 3849 _vm_allocation_granularity = si.dwAllocationGranularity; 3850 _processor_type = si.dwProcessorType; 3851 _processor_level = si.wProcessorLevel; 3852 set_processor_count(si.dwNumberOfProcessors); 3853 3854 MEMORYSTATUSEX ms; 3855 ms.dwLength = sizeof(ms); 3856 3857 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3858 // dwMemoryLoad (% of memory in use) 3859 GlobalMemoryStatusEx(&ms); 3860 _physical_memory = ms.ullTotalPhys; 3861 3862 OSVERSIONINFOEX oi; 3863 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3864 GetVersionEx((OSVERSIONINFO*)&oi); 3865 switch(oi.dwPlatformId) { 3866 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3867 case VER_PLATFORM_WIN32_NT: 3868 _is_nt = true; 3869 { 3870 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3871 if (os_vers == 5002) { 3872 _is_windows_2003 = true; 3873 } 3874 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3875 oi.wProductType == VER_NT_SERVER) { 3876 _is_windows_server = true; 3877 } 3878 } 3879 break; 3880 default: fatal("Unknown platform"); 3881 } 3882 3883 _default_stack_size = os::current_stack_size(); 3884 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3885 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3886 "stack size not a multiple of page size"); 3887 3888 initialize_performance_counter(); 3889 3890 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3891 // known to deadlock the system, if the VM issues to thread operations with 3892 // a too high frequency, e.g., such as changing the priorities. 3893 // The 6000 seems to work well - no deadlocks has been notices on the test 3894 // programs that we have seen experience this problem. 3895 if (!os::win32::is_nt()) { 3896 StarvationMonitorInterval = 6000; 3897 } 3898 } 3899 3900 3901 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3902 char path[MAX_PATH]; 3903 DWORD size; 3904 DWORD pathLen = (DWORD)sizeof(path); 3905 HINSTANCE result = NULL; 3906 3907 // only allow library name without path component 3908 assert(strchr(name, '\\') == NULL, "path not allowed"); 3909 assert(strchr(name, ':') == NULL, "path not allowed"); 3910 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3911 jio_snprintf(ebuf, ebuflen, 3912 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3913 return NULL; 3914 } 3915 3916 // search system directory 3917 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3918 strcat(path, "\\"); 3919 strcat(path, name); 3920 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3921 return result; 3922 } 3923 } 3924 3925 // try Windows directory 3926 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3927 strcat(path, "\\"); 3928 strcat(path, name); 3929 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3930 return result; 3931 } 3932 } 3933 3934 jio_snprintf(ebuf, ebuflen, 3935 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3936 return NULL; 3937 } 3938 3939 void os::win32::setmode_streams() { 3940 _setmode(_fileno(stdin), _O_BINARY); 3941 _setmode(_fileno(stdout), _O_BINARY); 3942 _setmode(_fileno(stderr), _O_BINARY); 3943 } 3944 3945 3946 bool os::is_debugger_attached() { 3947 return IsDebuggerPresent() ? true : false; 3948 } 3949 3950 3951 void os::wait_for_keypress_at_exit(void) { 3952 if (PauseAtExit) { 3953 fprintf(stderr, "Press any key to continue...\n"); 3954 fgetc(stdin); 3955 } 3956 } 3957 3958 3959 int os::message_box(const char* title, const char* message) { 3960 int result = MessageBox(NULL, message, title, 3961 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3962 return result == IDYES; 3963 } 3964 3965 int os::allocate_thread_local_storage() { 3966 return TlsAlloc(); 3967 } 3968 3969 3970 void os::free_thread_local_storage(int index) { 3971 TlsFree(index); 3972 } 3973 3974 3975 void os::thread_local_storage_at_put(int index, void* value) { 3976 TlsSetValue(index, value); 3977 assert(thread_local_storage_at(index) == value, "Just checking"); 3978 } 3979 3980 3981 void* os::thread_local_storage_at(int index) { 3982 return TlsGetValue(index); 3983 } 3984 3985 3986 #ifndef PRODUCT 3987 #ifndef _WIN64 3988 // Helpers to check whether NX protection is enabled 3989 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3990 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3991 pex->ExceptionRecord->NumberParameters > 0 && 3992 pex->ExceptionRecord->ExceptionInformation[0] == 3993 EXCEPTION_INFO_EXEC_VIOLATION) { 3994 return EXCEPTION_EXECUTE_HANDLER; 3995 } 3996 return EXCEPTION_CONTINUE_SEARCH; 3997 } 3998 3999 void nx_check_protection() { 4000 // If NX is enabled we'll get an exception calling into code on the stack 4001 char code[] = { (char)0xC3 }; // ret 4002 void *code_ptr = (void *)code; 4003 __try { 4004 __asm call code_ptr 4005 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4006 tty->print_raw_cr("NX protection detected."); 4007 } 4008 } 4009 #endif // _WIN64 4010 #endif // PRODUCT 4011 4012 // this is called _before_ the global arguments have been parsed 4013 void os::init(void) { 4014 _initial_pid = _getpid(); 4015 4016 init_random(1234567); 4017 4018 win32::initialize_system_info(); 4019 win32::setmode_streams(); 4020 init_page_sizes((size_t) win32::vm_page_size()); 4021 4022 // For better scalability on MP systems (must be called after initialize_system_info) 4023 #ifndef PRODUCT 4024 if (is_MP()) { 4025 NoYieldsInMicrolock = true; 4026 } 4027 #endif 4028 // This may be overridden later when argument processing is done. 4029 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 4030 os::win32::is_windows_2003()); 4031 4032 // Initialize main_process and main_thread 4033 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4034 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4035 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4036 fatal("DuplicateHandle failed\n"); 4037 } 4038 main_thread_id = (int) GetCurrentThreadId(); 4039 } 4040 4041 // To install functions for atexit processing 4042 extern "C" { 4043 static void perfMemory_exit_helper() { 4044 perfMemory_exit(); 4045 } 4046 } 4047 4048 static jint initSock(); 4049 4050 // this is called _after_ the global arguments have been parsed 4051 jint os::init_2(void) { 4052 // Allocate a single page and mark it as readable for safepoint polling 4053 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4054 guarantee( polling_page != NULL, "Reserve Failed for polling page"); 4055 4056 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4057 guarantee( return_page != NULL, "Commit Failed for polling page"); 4058 4059 os::set_polling_page( polling_page ); 4060 4061 #ifndef PRODUCT 4062 if( Verbose && PrintMiscellaneous ) 4063 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 4064 #endif 4065 4066 if (!UseMembar) { 4067 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4068 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4069 4070 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4071 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); 4072 4073 os::set_memory_serialize_page( mem_serialize_page ); 4074 4075 #ifndef PRODUCT 4076 if(Verbose && PrintMiscellaneous) 4077 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4078 #endif 4079 } 4080 4081 // Setup Windows Exceptions 4082 4083 // for debugging float code generation bugs 4084 if (ForceFloatExceptions) { 4085 #ifndef _WIN64 4086 static long fp_control_word = 0; 4087 __asm { fstcw fp_control_word } 4088 // see Intel PPro Manual, Vol. 2, p 7-16 4089 const long precision = 0x20; 4090 const long underflow = 0x10; 4091 const long overflow = 0x08; 4092 const long zero_div = 0x04; 4093 const long denorm = 0x02; 4094 const long invalid = 0x01; 4095 fp_control_word |= invalid; 4096 __asm { fldcw fp_control_word } 4097 #endif 4098 } 4099 4100 // If stack_commit_size is 0, windows will reserve the default size, 4101 // but only commit a small portion of it. 4102 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4103 size_t default_reserve_size = os::win32::default_stack_size(); 4104 size_t actual_reserve_size = stack_commit_size; 4105 if (stack_commit_size < default_reserve_size) { 4106 // If stack_commit_size == 0, we want this too 4107 actual_reserve_size = default_reserve_size; 4108 } 4109 4110 // Check minimum allowable stack size for thread creation and to initialize 4111 // the java system classes, including StackOverflowError - depends on page 4112 // size. Add a page for compiler2 recursion in main thread. 4113 // Add in 2*BytesPerWord times page size to account for VM stack during 4114 // class initialization depending on 32 or 64 bit VM. 4115 size_t min_stack_allowed = 4116 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4117 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4118 if (actual_reserve_size < min_stack_allowed) { 4119 tty->print_cr("\nThe stack size specified is too small, " 4120 "Specify at least %dk", 4121 min_stack_allowed / K); 4122 return JNI_ERR; 4123 } 4124 4125 JavaThread::set_stack_size_at_create(stack_commit_size); 4126 4127 // Calculate theoretical max. size of Threads to guard gainst artifical 4128 // out-of-memory situations, where all available address-space has been 4129 // reserved by thread stacks. 4130 assert(actual_reserve_size != 0, "Must have a stack"); 4131 4132 // Calculate the thread limit when we should start doing Virtual Memory 4133 // banging. Currently when the threads will have used all but 200Mb of space. 4134 // 4135 // TODO: consider performing a similar calculation for commit size instead 4136 // as reserve size, since on a 64-bit platform we'll run into that more 4137 // often than running out of virtual memory space. We can use the 4138 // lower value of the two calculations as the os_thread_limit. 4139 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4140 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4141 4142 // at exit methods are called in the reverse order of their registration. 4143 // there is no limit to the number of functions registered. atexit does 4144 // not set errno. 4145 4146 if (PerfAllowAtExitRegistration) { 4147 // only register atexit functions if PerfAllowAtExitRegistration is set. 4148 // atexit functions can be delayed until process exit time, which 4149 // can be problematic for embedded VM situations. Embedded VMs should 4150 // call DestroyJavaVM() to assure that VM resources are released. 4151 4152 // note: perfMemory_exit_helper atexit function may be removed in 4153 // the future if the appropriate cleanup code can be added to the 4154 // VM_Exit VMOperation's doit method. 4155 if (atexit(perfMemory_exit_helper) != 0) { 4156 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4157 } 4158 } 4159 4160 #ifndef _WIN64 4161 // Print something if NX is enabled (win32 on AMD64) 4162 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4163 #endif 4164 4165 // initialize thread priority policy 4166 prio_init(); 4167 4168 if (UseNUMA && !ForceNUMA) { 4169 UseNUMA = false; // We don't fully support this yet 4170 } 4171 4172 if (UseNUMAInterleaving) { 4173 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4174 bool success = numa_interleaving_init(); 4175 if (!success) UseNUMAInterleaving = false; 4176 } 4177 4178 if (initSock() != JNI_OK) { 4179 return JNI_ERR; 4180 } 4181 4182 return JNI_OK; 4183 } 4184 4185 // Mark the polling page as unreadable 4186 void os::make_polling_page_unreadable(void) { 4187 DWORD old_status; 4188 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) 4189 fatal("Could not disable polling page"); 4190 }; 4191 4192 // Mark the polling page as readable 4193 void os::make_polling_page_readable(void) { 4194 DWORD old_status; 4195 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) 4196 fatal("Could not enable polling page"); 4197 }; 4198 4199 4200 int os::stat(const char *path, struct stat *sbuf) { 4201 char pathbuf[MAX_PATH]; 4202 if (strlen(path) > MAX_PATH - 1) { 4203 errno = ENAMETOOLONG; 4204 return -1; 4205 } 4206 os::native_path(strcpy(pathbuf, path)); 4207 int ret = ::stat(pathbuf, sbuf); 4208 if (sbuf != NULL && UseUTCFileTimestamp) { 4209 // Fix for 6539723. st_mtime returned from stat() is dependent on 4210 // the system timezone and so can return different values for the 4211 // same file if/when daylight savings time changes. This adjustment 4212 // makes sure the same timestamp is returned regardless of the TZ. 4213 // 4214 // See: 4215 // http://msdn.microsoft.com/library/ 4216 // default.asp?url=/library/en-us/sysinfo/base/ 4217 // time_zone_information_str.asp 4218 // and 4219 // http://msdn.microsoft.com/library/default.asp?url= 4220 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4221 // 4222 // NOTE: there is a insidious bug here: If the timezone is changed 4223 // after the call to stat() but before 'GetTimeZoneInformation()', then 4224 // the adjustment we do here will be wrong and we'll return the wrong 4225 // value (which will likely end up creating an invalid class data 4226 // archive). Absent a better API for this, or some time zone locking 4227 // mechanism, we'll have to live with this risk. 4228 TIME_ZONE_INFORMATION tz; 4229 DWORD tzid = GetTimeZoneInformation(&tz); 4230 int daylightBias = 4231 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4232 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4233 } 4234 return ret; 4235 } 4236 4237 4238 #define FT2INT64(ft) \ 4239 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4240 4241 4242 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4243 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4244 // of a thread. 4245 // 4246 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4247 // the fast estimate available on the platform. 4248 4249 // current_thread_cpu_time() is not optimized for Windows yet 4250 jlong os::current_thread_cpu_time() { 4251 // return user + sys since the cost is the same 4252 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4253 } 4254 4255 jlong os::thread_cpu_time(Thread* thread) { 4256 // consistent with what current_thread_cpu_time() returns. 4257 return os::thread_cpu_time(thread, true /* user+sys */); 4258 } 4259 4260 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4261 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4262 } 4263 4264 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4265 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4266 // If this function changes, os::is_thread_cpu_time_supported() should too 4267 if (os::win32::is_nt()) { 4268 FILETIME CreationTime; 4269 FILETIME ExitTime; 4270 FILETIME KernelTime; 4271 FILETIME UserTime; 4272 4273 if ( GetThreadTimes(thread->osthread()->thread_handle(), 4274 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4275 return -1; 4276 else 4277 if (user_sys_cpu_time) { 4278 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4279 } else { 4280 return FT2INT64(UserTime) * 100; 4281 } 4282 } else { 4283 return (jlong) timeGetTime() * 1000000; 4284 } 4285 } 4286 4287 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4288 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4289 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4290 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4291 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4292 } 4293 4294 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4295 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4296 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4297 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4298 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4299 } 4300 4301 bool os::is_thread_cpu_time_supported() { 4302 // see os::thread_cpu_time 4303 if (os::win32::is_nt()) { 4304 FILETIME CreationTime; 4305 FILETIME ExitTime; 4306 FILETIME KernelTime; 4307 FILETIME UserTime; 4308 4309 if ( GetThreadTimes(GetCurrentThread(), 4310 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4311 return false; 4312 else 4313 return true; 4314 } else { 4315 return false; 4316 } 4317 } 4318 4319 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4320 // It does have primitives (PDH API) to get CPU usage and run queue length. 4321 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4322 // If we wanted to implement loadavg on Windows, we have a few options: 4323 // 4324 // a) Query CPU usage and run queue length and "fake" an answer by 4325 // returning the CPU usage if it's under 100%, and the run queue 4326 // length otherwise. It turns out that querying is pretty slow 4327 // on Windows, on the order of 200 microseconds on a fast machine. 4328 // Note that on the Windows the CPU usage value is the % usage 4329 // since the last time the API was called (and the first call 4330 // returns 100%), so we'd have to deal with that as well. 4331 // 4332 // b) Sample the "fake" answer using a sampling thread and store 4333 // the answer in a global variable. The call to loadavg would 4334 // just return the value of the global, avoiding the slow query. 4335 // 4336 // c) Sample a better answer using exponential decay to smooth the 4337 // value. This is basically the algorithm used by UNIX kernels. 4338 // 4339 // Note that sampling thread starvation could affect both (b) and (c). 4340 int os::loadavg(double loadavg[], int nelem) { 4341 return -1; 4342 } 4343 4344 4345 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4346 bool os::dont_yield() { 4347 return DontYieldALot; 4348 } 4349 4350 // This method is a slightly reworked copy of JDK's sysOpen 4351 // from src/windows/hpi/src/sys_api_md.c 4352 4353 int os::open(const char *path, int oflag, int mode) { 4354 char pathbuf[MAX_PATH]; 4355 4356 if (strlen(path) > MAX_PATH - 1) { 4357 errno = ENAMETOOLONG; 4358 return -1; 4359 } 4360 os::native_path(strcpy(pathbuf, path)); 4361 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4362 } 4363 4364 FILE* os::open(int fd, const char* mode) { 4365 return ::_fdopen(fd, mode); 4366 } 4367 4368 // Is a (classpath) directory empty? 4369 bool os::dir_is_empty(const char* path) { 4370 WIN32_FIND_DATA fd; 4371 HANDLE f = FindFirstFile(path, &fd); 4372 if (f == INVALID_HANDLE_VALUE) { 4373 return true; 4374 } 4375 FindClose(f); 4376 return false; 4377 } 4378 4379 // create binary file, rewriting existing file if required 4380 int os::create_binary_file(const char* path, bool rewrite_existing) { 4381 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4382 if (!rewrite_existing) { 4383 oflags |= _O_EXCL; 4384 } 4385 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4386 } 4387 4388 // return current position of file pointer 4389 jlong os::current_file_offset(int fd) { 4390 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4391 } 4392 4393 // move file pointer to the specified offset 4394 jlong os::seek_to_file_offset(int fd, jlong offset) { 4395 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4396 } 4397 4398 4399 jlong os::lseek(int fd, jlong offset, int whence) { 4400 return (jlong) ::_lseeki64(fd, offset, whence); 4401 } 4402 4403 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4404 OVERLAPPED ov; 4405 DWORD nread; 4406 BOOL result; 4407 4408 ZeroMemory(&ov, sizeof(ov)); 4409 ov.Offset = (DWORD)offset; 4410 ov.OffsetHigh = (DWORD)(offset >> 32); 4411 4412 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4413 4414 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4415 4416 return result ? nread : 0; 4417 } 4418 4419 // This method is a slightly reworked copy of JDK's sysNativePath 4420 // from src/windows/hpi/src/path_md.c 4421 4422 /* Convert a pathname to native format. On win32, this involves forcing all 4423 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4424 sometimes rejects '/') and removing redundant separators. The input path is 4425 assumed to have been converted into the character encoding used by the local 4426 system. Because this might be a double-byte encoding, care is taken to 4427 treat double-byte lead characters correctly. 4428 4429 This procedure modifies the given path in place, as the result is never 4430 longer than the original. There is no error return; this operation always 4431 succeeds. */ 4432 char * os::native_path(char *path) { 4433 char *src = path, *dst = path, *end = path; 4434 char *colon = NULL; /* If a drive specifier is found, this will 4435 point to the colon following the drive 4436 letter */ 4437 4438 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4439 assert(((!::IsDBCSLeadByte('/')) 4440 && (!::IsDBCSLeadByte('\\')) 4441 && (!::IsDBCSLeadByte(':'))), 4442 "Illegal lead byte"); 4443 4444 /* Check for leading separators */ 4445 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4446 while (isfilesep(*src)) { 4447 src++; 4448 } 4449 4450 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4451 /* Remove leading separators if followed by drive specifier. This 4452 hack is necessary to support file URLs containing drive 4453 specifiers (e.g., "file://c:/path"). As a side effect, 4454 "/c:/path" can be used as an alternative to "c:/path". */ 4455 *dst++ = *src++; 4456 colon = dst; 4457 *dst++ = ':'; 4458 src++; 4459 } else { 4460 src = path; 4461 if (isfilesep(src[0]) && isfilesep(src[1])) { 4462 /* UNC pathname: Retain first separator; leave src pointed at 4463 second separator so that further separators will be collapsed 4464 into the second separator. The result will be a pathname 4465 beginning with "\\\\" followed (most likely) by a host name. */ 4466 src = dst = path + 1; 4467 path[0] = '\\'; /* Force first separator to '\\' */ 4468 } 4469 } 4470 4471 end = dst; 4472 4473 /* Remove redundant separators from remainder of path, forcing all 4474 separators to be '\\' rather than '/'. Also, single byte space 4475 characters are removed from the end of the path because those 4476 are not legal ending characters on this operating system. 4477 */ 4478 while (*src != '\0') { 4479 if (isfilesep(*src)) { 4480 *dst++ = '\\'; src++; 4481 while (isfilesep(*src)) src++; 4482 if (*src == '\0') { 4483 /* Check for trailing separator */ 4484 end = dst; 4485 if (colon == dst - 2) break; /* "z:\\" */ 4486 if (dst == path + 1) break; /* "\\" */ 4487 if (dst == path + 2 && isfilesep(path[0])) { 4488 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4489 beginning of a UNC pathname. Even though it is not, by 4490 itself, a valid UNC pathname, we leave it as is in order 4491 to be consistent with the path canonicalizer as well 4492 as the win32 APIs, which treat this case as an invalid 4493 UNC pathname rather than as an alias for the root 4494 directory of the current drive. */ 4495 break; 4496 } 4497 end = --dst; /* Path does not denote a root directory, so 4498 remove trailing separator */ 4499 break; 4500 } 4501 end = dst; 4502 } else { 4503 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4504 *dst++ = *src++; 4505 if (*src) *dst++ = *src++; 4506 end = dst; 4507 } else { /* Copy a single-byte character */ 4508 char c = *src++; 4509 *dst++ = c; 4510 /* Space is not a legal ending character */ 4511 if (c != ' ') end = dst; 4512 } 4513 } 4514 } 4515 4516 *end = '\0'; 4517 4518 /* For "z:", add "." to work around a bug in the C runtime library */ 4519 if (colon == dst - 1) { 4520 path[2] = '.'; 4521 path[3] = '\0'; 4522 } 4523 4524 return path; 4525 } 4526 4527 // This code is a copy of JDK's sysSetLength 4528 // from src/windows/hpi/src/sys_api_md.c 4529 4530 int os::ftruncate(int fd, jlong length) { 4531 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4532 long high = (long)(length >> 32); 4533 DWORD ret; 4534 4535 if (h == (HANDLE)(-1)) { 4536 return -1; 4537 } 4538 4539 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4540 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4541 return -1; 4542 } 4543 4544 if (::SetEndOfFile(h) == FALSE) { 4545 return -1; 4546 } 4547 4548 return 0; 4549 } 4550 4551 4552 // This code is a copy of JDK's sysSync 4553 // from src/windows/hpi/src/sys_api_md.c 4554 // except for the legacy workaround for a bug in Win 98 4555 4556 int os::fsync(int fd) { 4557 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4558 4559 if ( (!::FlushFileBuffers(handle)) && 4560 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4561 /* from winerror.h */ 4562 return -1; 4563 } 4564 return 0; 4565 } 4566 4567 static int nonSeekAvailable(int, long *); 4568 static int stdinAvailable(int, long *); 4569 4570 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4571 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4572 4573 // This code is a copy of JDK's sysAvailable 4574 // from src/windows/hpi/src/sys_api_md.c 4575 4576 int os::available(int fd, jlong *bytes) { 4577 jlong cur, end; 4578 struct _stati64 stbuf64; 4579 4580 if (::_fstati64(fd, &stbuf64) >= 0) { 4581 int mode = stbuf64.st_mode; 4582 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4583 int ret; 4584 long lpbytes; 4585 if (fd == 0) { 4586 ret = stdinAvailable(fd, &lpbytes); 4587 } else { 4588 ret = nonSeekAvailable(fd, &lpbytes); 4589 } 4590 (*bytes) = (jlong)(lpbytes); 4591 return ret; 4592 } 4593 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4594 return FALSE; 4595 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4596 return FALSE; 4597 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4598 return FALSE; 4599 } 4600 *bytes = end - cur; 4601 return TRUE; 4602 } else { 4603 return FALSE; 4604 } 4605 } 4606 4607 // This code is a copy of JDK's nonSeekAvailable 4608 // from src/windows/hpi/src/sys_api_md.c 4609 4610 static int nonSeekAvailable(int fd, long *pbytes) { 4611 /* This is used for available on non-seekable devices 4612 * (like both named and anonymous pipes, such as pipes 4613 * connected to an exec'd process). 4614 * Standard Input is a special case. 4615 * 4616 */ 4617 HANDLE han; 4618 4619 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4620 return FALSE; 4621 } 4622 4623 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4624 /* PeekNamedPipe fails when at EOF. In that case we 4625 * simply make *pbytes = 0 which is consistent with the 4626 * behavior we get on Solaris when an fd is at EOF. 4627 * The only alternative is to raise an Exception, 4628 * which isn't really warranted. 4629 */ 4630 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4631 return FALSE; 4632 } 4633 *pbytes = 0; 4634 } 4635 return TRUE; 4636 } 4637 4638 #define MAX_INPUT_EVENTS 2000 4639 4640 // This code is a copy of JDK's stdinAvailable 4641 // from src/windows/hpi/src/sys_api_md.c 4642 4643 static int stdinAvailable(int fd, long *pbytes) { 4644 HANDLE han; 4645 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4646 DWORD numEvents = 0; /* Number of events in buffer */ 4647 DWORD i = 0; /* Loop index */ 4648 DWORD curLength = 0; /* Position marker */ 4649 DWORD actualLength = 0; /* Number of bytes readable */ 4650 BOOL error = FALSE; /* Error holder */ 4651 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4652 4653 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4654 return FALSE; 4655 } 4656 4657 /* Construct an array of input records in the console buffer */ 4658 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4659 if (error == 0) { 4660 return nonSeekAvailable(fd, pbytes); 4661 } 4662 4663 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4664 if (numEvents > MAX_INPUT_EVENTS) { 4665 numEvents = MAX_INPUT_EVENTS; 4666 } 4667 4668 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4669 if (lpBuffer == NULL) { 4670 return FALSE; 4671 } 4672 4673 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4674 if (error == 0) { 4675 os::free(lpBuffer, mtInternal); 4676 return FALSE; 4677 } 4678 4679 /* Examine input records for the number of bytes available */ 4680 for(i=0; i<numEvents; i++) { 4681 if (lpBuffer[i].EventType == KEY_EVENT) { 4682 4683 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4684 &(lpBuffer[i].Event); 4685 if (keyRecord->bKeyDown == TRUE) { 4686 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4687 curLength++; 4688 if (*keyPressed == '\r') { 4689 actualLength = curLength; 4690 } 4691 } 4692 } 4693 } 4694 4695 if(lpBuffer != NULL) { 4696 os::free(lpBuffer, mtInternal); 4697 } 4698 4699 *pbytes = (long) actualLength; 4700 return TRUE; 4701 } 4702 4703 // Map a block of memory. 4704 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4705 char *addr, size_t bytes, bool read_only, 4706 bool allow_exec) { 4707 HANDLE hFile; 4708 char* base; 4709 4710 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4711 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4712 if (hFile == NULL) { 4713 if (PrintMiscellaneous && Verbose) { 4714 DWORD err = GetLastError(); 4715 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4716 } 4717 return NULL; 4718 } 4719 4720 if (allow_exec) { 4721 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4722 // unless it comes from a PE image (which the shared archive is not.) 4723 // Even VirtualProtect refuses to give execute access to mapped memory 4724 // that was not previously executable. 4725 // 4726 // Instead, stick the executable region in anonymous memory. Yuck. 4727 // Penalty is that ~4 pages will not be shareable - in the future 4728 // we might consider DLLizing the shared archive with a proper PE 4729 // header so that mapping executable + sharing is possible. 4730 4731 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4732 PAGE_READWRITE); 4733 if (base == NULL) { 4734 if (PrintMiscellaneous && Verbose) { 4735 DWORD err = GetLastError(); 4736 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4737 } 4738 CloseHandle(hFile); 4739 return NULL; 4740 } 4741 4742 DWORD bytes_read; 4743 OVERLAPPED overlapped; 4744 overlapped.Offset = (DWORD)file_offset; 4745 overlapped.OffsetHigh = 0; 4746 overlapped.hEvent = NULL; 4747 // ReadFile guarantees that if the return value is true, the requested 4748 // number of bytes were read before returning. 4749 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4750 if (!res) { 4751 if (PrintMiscellaneous && Verbose) { 4752 DWORD err = GetLastError(); 4753 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4754 } 4755 release_memory(base, bytes); 4756 CloseHandle(hFile); 4757 return NULL; 4758 } 4759 } else { 4760 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4761 NULL /*file_name*/); 4762 if (hMap == NULL) { 4763 if (PrintMiscellaneous && Verbose) { 4764 DWORD err = GetLastError(); 4765 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4766 } 4767 CloseHandle(hFile); 4768 return NULL; 4769 } 4770 4771 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4772 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4773 (DWORD)bytes, addr); 4774 if (base == NULL) { 4775 if (PrintMiscellaneous && Verbose) { 4776 DWORD err = GetLastError(); 4777 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4778 } 4779 CloseHandle(hMap); 4780 CloseHandle(hFile); 4781 return NULL; 4782 } 4783 4784 if (CloseHandle(hMap) == 0) { 4785 if (PrintMiscellaneous && Verbose) { 4786 DWORD err = GetLastError(); 4787 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4788 } 4789 CloseHandle(hFile); 4790 return base; 4791 } 4792 } 4793 4794 if (allow_exec) { 4795 DWORD old_protect; 4796 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4797 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4798 4799 if (!res) { 4800 if (PrintMiscellaneous && Verbose) { 4801 DWORD err = GetLastError(); 4802 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4803 } 4804 // Don't consider this a hard error, on IA32 even if the 4805 // VirtualProtect fails, we should still be able to execute 4806 CloseHandle(hFile); 4807 return base; 4808 } 4809 } 4810 4811 if (CloseHandle(hFile) == 0) { 4812 if (PrintMiscellaneous && Verbose) { 4813 DWORD err = GetLastError(); 4814 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4815 } 4816 return base; 4817 } 4818 4819 return base; 4820 } 4821 4822 4823 // Remap a block of memory. 4824 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4825 char *addr, size_t bytes, bool read_only, 4826 bool allow_exec) { 4827 // This OS does not allow existing memory maps to be remapped so we 4828 // have to unmap the memory before we remap it. 4829 if (!os::unmap_memory(addr, bytes)) { 4830 return NULL; 4831 } 4832 4833 // There is a very small theoretical window between the unmap_memory() 4834 // call above and the map_memory() call below where a thread in native 4835 // code may be able to access an address that is no longer mapped. 4836 4837 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4838 read_only, allow_exec); 4839 } 4840 4841 4842 // Unmap a block of memory. 4843 // Returns true=success, otherwise false. 4844 4845 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4846 BOOL result = UnmapViewOfFile(addr); 4847 if (result == 0) { 4848 if (PrintMiscellaneous && Verbose) { 4849 DWORD err = GetLastError(); 4850 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4851 } 4852 return false; 4853 } 4854 return true; 4855 } 4856 4857 void os::pause() { 4858 char filename[MAX_PATH]; 4859 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4860 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4861 } else { 4862 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4863 } 4864 4865 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4866 if (fd != -1) { 4867 struct stat buf; 4868 ::close(fd); 4869 while (::stat(filename, &buf) == 0) { 4870 Sleep(100); 4871 } 4872 } else { 4873 jio_fprintf(stderr, 4874 "Could not open pause file '%s', continuing immediately.\n", filename); 4875 } 4876 } 4877 4878 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4879 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4880 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4881 4882 os::ThreadCrashProtection::ThreadCrashProtection() { 4883 } 4884 4885 // See the caveats for this class in os_windows.hpp 4886 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4887 // into this method and returns false. If no OS EXCEPTION was raised, returns 4888 // true. 4889 // The callback is supposed to provide the method that should be protected. 4890 // 4891 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4892 4893 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4894 4895 _protected_thread = ThreadLocalStorage::thread(); 4896 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4897 4898 bool success = true; 4899 __try { 4900 _crash_protection = this; 4901 cb.call(); 4902 } __except(EXCEPTION_EXECUTE_HANDLER) { 4903 // only for protection, nothing to do 4904 success = false; 4905 } 4906 _crash_protection = NULL; 4907 _protected_thread = NULL; 4908 Thread::muxRelease(&_crash_mux); 4909 return success; 4910 } 4911 4912 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4913 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4914 } 4915 4916 /* 4917 * See the caveats for this class in os_windows.hpp 4918 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4919 * into this method and returns false. If no OS EXCEPTION was raised, returns 4920 * true. 4921 * The callback is supposed to provide the method that should be protected. 4922 */ 4923 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4924 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4925 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4926 "crash_protection already set?"); 4927 4928 bool success = true; 4929 __try { 4930 WatcherThread::watcher_thread()->set_crash_protection(this); 4931 cb.call(); 4932 } __except(EXCEPTION_EXECUTE_HANDLER) { 4933 // only for protection, nothing to do 4934 success = false; 4935 } 4936 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4937 return success; 4938 } 4939 4940 // An Event wraps a win32 "CreateEvent" kernel handle. 4941 // 4942 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4943 // 4944 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4945 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4946 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4947 // In addition, an unpark() operation might fetch the handle field, but the 4948 // event could recycle between the fetch and the SetEvent() operation. 4949 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4950 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4951 // on an stale but recycled handle would be harmless, but in practice this might 4952 // confuse other non-Sun code, so it's not a viable approach. 4953 // 4954 // 2: Once a win32 event handle is associated with an Event, it remains associated 4955 // with the Event. The event handle is never closed. This could be construed 4956 // as handle leakage, but only up to the maximum # of threads that have been extant 4957 // at any one time. This shouldn't be an issue, as windows platforms typically 4958 // permit a process to have hundreds of thousands of open handles. 4959 // 4960 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4961 // and release unused handles. 4962 // 4963 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4964 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4965 // 4966 // 5. Use an RCU-like mechanism (Read-Copy Update). 4967 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4968 // 4969 // We use (2). 4970 // 4971 // TODO-FIXME: 4972 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4973 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4974 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4975 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4976 // into a single win32 CreateEvent() handle. 4977 // 4978 // _Event transitions in park() 4979 // -1 => -1 : illegal 4980 // 1 => 0 : pass - return immediately 4981 // 0 => -1 : block 4982 // 4983 // _Event serves as a restricted-range semaphore : 4984 // -1 : thread is blocked 4985 // 0 : neutral - thread is running or ready 4986 // 1 : signaled - thread is running or ready 4987 // 4988 // Another possible encoding of _Event would be 4989 // with explicit "PARKED" and "SIGNALED" bits. 4990 4991 int os::PlatformEvent::park (jlong Millis) { 4992 guarantee (_ParkHandle != NULL , "Invariant") ; 4993 guarantee (Millis > 0 , "Invariant") ; 4994 int v ; 4995 4996 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4997 // the initial park() operation. 4998 4999 for (;;) { 5000 v = _Event ; 5001 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5002 } 5003 guarantee ((v == 0) || (v == 1), "invariant") ; 5004 if (v != 0) return OS_OK ; 5005 5006 // Do this the hard way by blocking ... 5007 // TODO: consider a brief spin here, gated on the success of recent 5008 // spin attempts by this thread. 5009 // 5010 // We decompose long timeouts into series of shorter timed waits. 5011 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5012 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5013 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5014 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5015 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5016 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5017 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5018 // for the already waited time. This policy does not admit any new outcomes. 5019 // In the future, however, we might want to track the accumulated wait time and 5020 // adjust Millis accordingly if we encounter a spurious wakeup. 5021 5022 const int MAXTIMEOUT = 0x10000000 ; 5023 DWORD rv = WAIT_TIMEOUT ; 5024 while (_Event < 0 && Millis > 0) { 5025 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) 5026 if (Millis > MAXTIMEOUT) { 5027 prd = MAXTIMEOUT ; 5028 } 5029 rv = ::WaitForSingleObject (_ParkHandle, prd) ; 5030 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; 5031 if (rv == WAIT_TIMEOUT) { 5032 Millis -= prd ; 5033 } 5034 } 5035 v = _Event ; 5036 _Event = 0 ; 5037 // see comment at end of os::PlatformEvent::park() below: 5038 OrderAccess::fence() ; 5039 // If we encounter a nearly simultanous timeout expiry and unpark() 5040 // we return OS_OK indicating we awoke via unpark(). 5041 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5042 return (v >= 0) ? OS_OK : OS_TIMEOUT ; 5043 } 5044 5045 void os::PlatformEvent::park () { 5046 guarantee (_ParkHandle != NULL, "Invariant") ; 5047 // Invariant: Only the thread associated with the Event/PlatformEvent 5048 // may call park(). 5049 int v ; 5050 for (;;) { 5051 v = _Event ; 5052 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 5053 } 5054 guarantee ((v == 0) || (v == 1), "invariant") ; 5055 if (v != 0) return ; 5056 5057 // Do this the hard way by blocking ... 5058 // TODO: consider a brief spin here, gated on the success of recent 5059 // spin attempts by this thread. 5060 while (_Event < 0) { 5061 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; 5062 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; 5063 } 5064 5065 // Usually we'll find _Event == 0 at this point, but as 5066 // an optional optimization we clear it, just in case can 5067 // multiple unpark() operations drove _Event up to 1. 5068 _Event = 0 ; 5069 OrderAccess::fence() ; 5070 guarantee (_Event >= 0, "invariant") ; 5071 } 5072 5073 void os::PlatformEvent::unpark() { 5074 guarantee (_ParkHandle != NULL, "Invariant") ; 5075 5076 // Transitions for _Event: 5077 // 0 :=> 1 5078 // 1 :=> 1 5079 // -1 :=> either 0 or 1; must signal target thread 5080 // That is, we can safely transition _Event from -1 to either 5081 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 5082 // unpark() calls. 5083 // See also: "Semaphores in Plan 9" by Mullender & Cox 5084 // 5085 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5086 // that it will take two back-to-back park() calls for the owning 5087 // thread to block. This has the benefit of forcing a spurious return 5088 // from the first park() call after an unpark() call which will help 5089 // shake out uses of park() and unpark() without condition variables. 5090 5091 if (Atomic::xchg(1, &_Event) >= 0) return; 5092 5093 ::SetEvent(_ParkHandle); 5094 } 5095 5096 5097 // JSR166 5098 // ------------------------------------------------------- 5099 5100 /* 5101 * The Windows implementation of Park is very straightforward: Basic 5102 * operations on Win32 Events turn out to have the right semantics to 5103 * use them directly. We opportunistically resuse the event inherited 5104 * from Monitor. 5105 */ 5106 5107 5108 void Parker::park(bool isAbsolute, jlong time) { 5109 guarantee (_ParkEvent != NULL, "invariant") ; 5110 // First, demultiplex/decode time arguments 5111 if (time < 0) { // don't wait 5112 return; 5113 } 5114 else if (time == 0 && !isAbsolute) { 5115 time = INFINITE; 5116 } 5117 else if (isAbsolute) { 5118 time -= os::javaTimeMillis(); // convert to relative time 5119 if (time <= 0) // already elapsed 5120 return; 5121 } 5122 else { // relative 5123 time /= 1000000; // Must coarsen from nanos to millis 5124 if (time == 0) // Wait for the minimal time unit if zero 5125 time = 1; 5126 } 5127 5128 JavaThread* thread = (JavaThread*)(Thread::current()); 5129 assert(thread->is_Java_thread(), "Must be JavaThread"); 5130 JavaThread *jt = (JavaThread *)thread; 5131 5132 // Don't wait if interrupted or already triggered 5133 if (Thread::is_interrupted(thread, false) || 5134 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5135 ResetEvent(_ParkEvent); 5136 return; 5137 } 5138 else { 5139 ThreadBlockInVM tbivm(jt); 5140 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5141 jt->set_suspend_equivalent(); 5142 5143 WaitForSingleObject(_ParkEvent, time); 5144 ResetEvent(_ParkEvent); 5145 5146 // If externally suspended while waiting, re-suspend 5147 if (jt->handle_special_suspend_equivalent_condition()) { 5148 jt->java_suspend_self(); 5149 } 5150 } 5151 } 5152 5153 void Parker::unpark() { 5154 guarantee (_ParkEvent != NULL, "invariant") ; 5155 SetEvent(_ParkEvent); 5156 } 5157 5158 // Run the specified command in a separate process. Return its exit value, 5159 // or -1 on failure (e.g. can't create a new process). 5160 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { 5161 STARTUPINFO si; 5162 PROCESS_INFORMATION pi; 5163 5164 memset(&si, 0, sizeof(si)); 5165 si.cb = sizeof(si); 5166 memset(&pi, 0, sizeof(pi)); 5167 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5168 cmd, // command line 5169 NULL, // process security attribute 5170 NULL, // thread security attribute 5171 TRUE, // inherits system handles 5172 0, // no creation flags 5173 NULL, // use parent's environment block 5174 NULL, // use parent's starting directory 5175 &si, // (in) startup information 5176 &pi); // (out) process information 5177 5178 if (rslt) { 5179 // Wait until child process exits. 5180 WaitForSingleObject(pi.hProcess, INFINITE); 5181 5182 DWORD exit_code; 5183 GetExitCodeProcess(pi.hProcess, &exit_code); 5184 5185 // Close process and thread handles. 5186 CloseHandle(pi.hProcess); 5187 CloseHandle(pi.hThread); 5188 5189 return (int)exit_code; 5190 } else { 5191 return -1; 5192 } 5193 } 5194 5195 //-------------------------------------------------------------------------------------------------- 5196 // Non-product code 5197 5198 static int mallocDebugIntervalCounter = 0; 5199 static int mallocDebugCounter = 0; 5200 bool os::check_heap(bool force) { 5201 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5202 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5203 // Note: HeapValidate executes two hardware breakpoints when it finds something 5204 // wrong; at these points, eax contains the address of the offending block (I think). 5205 // To get to the exlicit error message(s) below, just continue twice. 5206 HANDLE heap = GetProcessHeap(); 5207 { HeapLock(heap); 5208 PROCESS_HEAP_ENTRY phe; 5209 phe.lpData = NULL; 5210 while (HeapWalk(heap, &phe) != 0) { 5211 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5212 !HeapValidate(heap, 0, phe.lpData)) { 5213 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5214 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5215 fatal("corrupted C heap"); 5216 } 5217 } 5218 DWORD err = GetLastError(); 5219 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5220 fatal(err_msg("heap walk aborted with error %d", err)); 5221 } 5222 HeapUnlock(heap); 5223 } 5224 mallocDebugIntervalCounter = 0; 5225 } 5226 return true; 5227 } 5228 5229 5230 bool os::find(address addr, outputStream* st) { 5231 // Nothing yet 5232 return false; 5233 } 5234 5235 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5236 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5237 5238 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 5239 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5240 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5241 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5242 5243 if (os::is_memory_serialize_page(thread, addr)) 5244 return EXCEPTION_CONTINUE_EXECUTION; 5245 } 5246 5247 return EXCEPTION_CONTINUE_SEARCH; 5248 } 5249 5250 // We don't build a headless jre for Windows 5251 bool os::is_headless_jre() { return false; } 5252 5253 static jint initSock() { 5254 WSADATA wsadata; 5255 5256 if (!os::WinSock2Dll::WinSock2Available()) { 5257 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5258 ::GetLastError()); 5259 return JNI_ERR; 5260 } 5261 5262 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5263 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5264 ::GetLastError()); 5265 return JNI_ERR; 5266 } 5267 return JNI_OK; 5268 } 5269 5270 struct hostent* os::get_host_by_name(char* name) { 5271 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5272 } 5273 5274 int os::socket_close(int fd) { 5275 return ::closesocket(fd); 5276 } 5277 5278 int os::socket_available(int fd, jint *pbytes) { 5279 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5280 return (ret < 0) ? 0 : 1; 5281 } 5282 5283 int os::socket(int domain, int type, int protocol) { 5284 return ::socket(domain, type, protocol); 5285 } 5286 5287 int os::listen(int fd, int count) { 5288 return ::listen(fd, count); 5289 } 5290 5291 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5292 return ::connect(fd, him, len); 5293 } 5294 5295 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5296 return ::accept(fd, him, len); 5297 } 5298 5299 int os::sendto(int fd, char* buf, size_t len, uint flags, 5300 struct sockaddr* to, socklen_t tolen) { 5301 5302 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5303 } 5304 5305 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5306 sockaddr* from, socklen_t* fromlen) { 5307 5308 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5309 } 5310 5311 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5312 return ::recv(fd, buf, (int)nBytes, flags); 5313 } 5314 5315 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5316 return ::send(fd, buf, (int)nBytes, flags); 5317 } 5318 5319 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5320 return ::send(fd, buf, (int)nBytes, flags); 5321 } 5322 5323 int os::timeout(int fd, long timeout) { 5324 fd_set tbl; 5325 struct timeval t; 5326 5327 t.tv_sec = timeout / 1000; 5328 t.tv_usec = (timeout % 1000) * 1000; 5329 5330 tbl.fd_count = 1; 5331 tbl.fd_array[0] = fd; 5332 5333 return ::select(1, &tbl, 0, 0, &t); 5334 } 5335 5336 int os::get_host_name(char* name, int namelen) { 5337 return ::gethostname(name, namelen); 5338 } 5339 5340 int os::socket_shutdown(int fd, int howto) { 5341 return ::shutdown(fd, howto); 5342 } 5343 5344 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5345 return ::bind(fd, him, len); 5346 } 5347 5348 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5349 return ::getsockname(fd, him, len); 5350 } 5351 5352 int os::get_sock_opt(int fd, int level, int optname, 5353 char* optval, socklen_t* optlen) { 5354 return ::getsockopt(fd, level, optname, optval, optlen); 5355 } 5356 5357 int os::set_sock_opt(int fd, int level, int optname, 5358 const char* optval, socklen_t optlen) { 5359 return ::setsockopt(fd, level, optname, optval, optlen); 5360 } 5361 5362 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5363 #if defined(IA32) 5364 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5365 #elif defined (AMD64) 5366 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5367 #endif 5368 5369 // returns true if thread could be suspended, 5370 // false otherwise 5371 static bool do_suspend(HANDLE* h) { 5372 if (h != NULL) { 5373 if (SuspendThread(*h) != ~0) { 5374 return true; 5375 } 5376 } 5377 return false; 5378 } 5379 5380 // resume the thread 5381 // calling resume on an active thread is a no-op 5382 static void do_resume(HANDLE* h) { 5383 if (h != NULL) { 5384 ResumeThread(*h); 5385 } 5386 } 5387 5388 // retrieve a suspend/resume context capable handle 5389 // from the tid. Caller validates handle return value. 5390 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5391 if (h != NULL) { 5392 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5393 } 5394 } 5395 5396 // 5397 // Thread sampling implementation 5398 // 5399 void os::SuspendedThreadTask::internal_do_task() { 5400 CONTEXT ctxt; 5401 HANDLE h = NULL; 5402 5403 // get context capable handle for thread 5404 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5405 5406 // sanity 5407 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5408 return; 5409 } 5410 5411 // suspend the thread 5412 if (do_suspend(&h)) { 5413 ctxt.ContextFlags = sampling_context_flags; 5414 // get thread context 5415 GetThreadContext(h, &ctxt); 5416 SuspendedThreadTaskContext context(_thread, &ctxt); 5417 // pass context to Thread Sampling impl 5418 do_task(context); 5419 // resume thread 5420 do_resume(&h); 5421 } 5422 5423 // close handle 5424 CloseHandle(h); 5425 } 5426 5427 5428 // Kernel32 API 5429 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5430 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5431 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5432 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5433 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5434 5435 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5436 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5437 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5438 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5439 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5440 5441 5442 BOOL os::Kernel32Dll::initialized = FALSE; 5443 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5444 assert(initialized && _GetLargePageMinimum != NULL, 5445 "GetLargePageMinimumAvailable() not yet called"); 5446 return _GetLargePageMinimum(); 5447 } 5448 5449 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5450 if (!initialized) { 5451 initialize(); 5452 } 5453 return _GetLargePageMinimum != NULL; 5454 } 5455 5456 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5457 if (!initialized) { 5458 initialize(); 5459 } 5460 return _VirtualAllocExNuma != NULL; 5461 } 5462 5463 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5464 assert(initialized && _VirtualAllocExNuma != NULL, 5465 "NUMACallsAvailable() not yet called"); 5466 5467 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5468 } 5469 5470 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5471 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5472 "NUMACallsAvailable() not yet called"); 5473 5474 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5475 } 5476 5477 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5478 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5479 "NUMACallsAvailable() not yet called"); 5480 5481 return _GetNumaNodeProcessorMask(node, proc_mask); 5482 } 5483 5484 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5485 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5486 if (!initialized) { 5487 initialize(); 5488 } 5489 5490 if (_RtlCaptureStackBackTrace != NULL) { 5491 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5492 BackTrace, BackTraceHash); 5493 } else { 5494 return 0; 5495 } 5496 } 5497 5498 void os::Kernel32Dll::initializeCommon() { 5499 if (!initialized) { 5500 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5501 assert(handle != NULL, "Just check"); 5502 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5503 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5504 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5505 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5506 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5507 initialized = TRUE; 5508 } 5509 } 5510 5511 5512 5513 #ifndef JDK6_OR_EARLIER 5514 5515 void os::Kernel32Dll::initialize() { 5516 initializeCommon(); 5517 } 5518 5519 5520 // Kernel32 API 5521 inline BOOL os::Kernel32Dll::SwitchToThread() { 5522 return ::SwitchToThread(); 5523 } 5524 5525 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5526 return true; 5527 } 5528 5529 // Help tools 5530 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5531 return true; 5532 } 5533 5534 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5535 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5536 } 5537 5538 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5539 return ::Module32First(hSnapshot, lpme); 5540 } 5541 5542 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5543 return ::Module32Next(hSnapshot, lpme); 5544 } 5545 5546 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5547 ::GetNativeSystemInfo(lpSystemInfo); 5548 } 5549 5550 // PSAPI API 5551 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5552 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5553 } 5554 5555 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5556 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5557 } 5558 5559 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5560 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5561 } 5562 5563 inline BOOL os::PSApiDll::PSApiAvailable() { 5564 return true; 5565 } 5566 5567 5568 // WinSock2 API 5569 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5570 return ::WSAStartup(wVersionRequested, lpWSAData); 5571 } 5572 5573 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5574 return ::gethostbyname(name); 5575 } 5576 5577 inline BOOL os::WinSock2Dll::WinSock2Available() { 5578 return true; 5579 } 5580 5581 // Advapi API 5582 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5583 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5584 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5585 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5586 BufferLength, PreviousState, ReturnLength); 5587 } 5588 5589 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5590 PHANDLE TokenHandle) { 5591 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5592 } 5593 5594 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5595 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5596 } 5597 5598 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5599 return true; 5600 } 5601 5602 void* os::get_default_process_handle() { 5603 return (void*)GetModuleHandle(NULL); 5604 } 5605 5606 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5607 // which is used to find statically linked in agents. 5608 // Additionally for windows, takes into account __stdcall names. 5609 // Parameters: 5610 // sym_name: Symbol in library we are looking for 5611 // lib_name: Name of library to look in, NULL for shared libs. 5612 // is_absolute_path == true if lib_name is absolute path to agent 5613 // such as "C:/a/b/L.dll" 5614 // == false if only the base name of the library is passed in 5615 // such as "L" 5616 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5617 bool is_absolute_path) { 5618 char *agent_entry_name; 5619 size_t len; 5620 size_t name_len; 5621 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5622 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5623 const char *start; 5624 5625 if (lib_name != NULL) { 5626 len = name_len = strlen(lib_name); 5627 if (is_absolute_path) { 5628 // Need to strip path, prefix and suffix 5629 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5630 lib_name = ++start; 5631 } else { 5632 // Need to check for drive prefix 5633 if ((start = strchr(lib_name, ':')) != NULL) { 5634 lib_name = ++start; 5635 } 5636 } 5637 if (len <= (prefix_len + suffix_len)) { 5638 return NULL; 5639 } 5640 lib_name += prefix_len; 5641 name_len = strlen(lib_name) - suffix_len; 5642 } 5643 } 5644 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5645 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5646 if (agent_entry_name == NULL) { 5647 return NULL; 5648 } 5649 if (lib_name != NULL) { 5650 const char *p = strrchr(sym_name, '@'); 5651 if (p != NULL && p != sym_name) { 5652 // sym_name == _Agent_OnLoad@XX 5653 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5654 agent_entry_name[(p-sym_name)] = '\0'; 5655 // agent_entry_name == _Agent_OnLoad 5656 strcat(agent_entry_name, "_"); 5657 strncat(agent_entry_name, lib_name, name_len); 5658 strcat(agent_entry_name, p); 5659 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5660 } else { 5661 strcpy(agent_entry_name, sym_name); 5662 strcat(agent_entry_name, "_"); 5663 strncat(agent_entry_name, lib_name, name_len); 5664 } 5665 } else { 5666 strcpy(agent_entry_name, sym_name); 5667 } 5668 return agent_entry_name; 5669 } 5670 5671 #else 5672 // Kernel32 API 5673 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5674 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5675 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5676 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5677 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5678 5679 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5680 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5681 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5682 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5683 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5684 5685 void os::Kernel32Dll::initialize() { 5686 if (!initialized) { 5687 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5688 assert(handle != NULL, "Just check"); 5689 5690 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5691 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5692 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5693 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5694 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5695 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5696 initializeCommon(); // resolve the functions that always need resolving 5697 5698 initialized = TRUE; 5699 } 5700 } 5701 5702 BOOL os::Kernel32Dll::SwitchToThread() { 5703 assert(initialized && _SwitchToThread != NULL, 5704 "SwitchToThreadAvailable() not yet called"); 5705 return _SwitchToThread(); 5706 } 5707 5708 5709 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5710 if (!initialized) { 5711 initialize(); 5712 } 5713 return _SwitchToThread != NULL; 5714 } 5715 5716 // Help tools 5717 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5718 if (!initialized) { 5719 initialize(); 5720 } 5721 return _CreateToolhelp32Snapshot != NULL && 5722 _Module32First != NULL && 5723 _Module32Next != NULL; 5724 } 5725 5726 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5727 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5728 "HelpToolsAvailable() not yet called"); 5729 5730 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5731 } 5732 5733 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5734 assert(initialized && _Module32First != NULL, 5735 "HelpToolsAvailable() not yet called"); 5736 5737 return _Module32First(hSnapshot, lpme); 5738 } 5739 5740 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5741 assert(initialized && _Module32Next != NULL, 5742 "HelpToolsAvailable() not yet called"); 5743 5744 return _Module32Next(hSnapshot, lpme); 5745 } 5746 5747 5748 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5749 if (!initialized) { 5750 initialize(); 5751 } 5752 return _GetNativeSystemInfo != NULL; 5753 } 5754 5755 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5756 assert(initialized && _GetNativeSystemInfo != NULL, 5757 "GetNativeSystemInfoAvailable() not yet called"); 5758 5759 _GetNativeSystemInfo(lpSystemInfo); 5760 } 5761 5762 // PSAPI API 5763 5764 5765 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5766 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5767 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5768 5769 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5770 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5771 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5772 BOOL os::PSApiDll::initialized = FALSE; 5773 5774 void os::PSApiDll::initialize() { 5775 if (!initialized) { 5776 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5777 if (handle != NULL) { 5778 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5779 "EnumProcessModules"); 5780 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5781 "GetModuleFileNameExA"); 5782 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5783 "GetModuleInformation"); 5784 } 5785 initialized = TRUE; 5786 } 5787 } 5788 5789 5790 5791 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5792 assert(initialized && _EnumProcessModules != NULL, 5793 "PSApiAvailable() not yet called"); 5794 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5795 } 5796 5797 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5798 assert(initialized && _GetModuleFileNameEx != NULL, 5799 "PSApiAvailable() not yet called"); 5800 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5801 } 5802 5803 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5804 assert(initialized && _GetModuleInformation != NULL, 5805 "PSApiAvailable() not yet called"); 5806 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5807 } 5808 5809 BOOL os::PSApiDll::PSApiAvailable() { 5810 if (!initialized) { 5811 initialize(); 5812 } 5813 return _EnumProcessModules != NULL && 5814 _GetModuleFileNameEx != NULL && 5815 _GetModuleInformation != NULL; 5816 } 5817 5818 5819 // WinSock2 API 5820 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5821 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5822 5823 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5824 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5825 BOOL os::WinSock2Dll::initialized = FALSE; 5826 5827 void os::WinSock2Dll::initialize() { 5828 if (!initialized) { 5829 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5830 if (handle != NULL) { 5831 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5832 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5833 } 5834 initialized = TRUE; 5835 } 5836 } 5837 5838 5839 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5840 assert(initialized && _WSAStartup != NULL, 5841 "WinSock2Available() not yet called"); 5842 return _WSAStartup(wVersionRequested, lpWSAData); 5843 } 5844 5845 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5846 assert(initialized && _gethostbyname != NULL, 5847 "WinSock2Available() not yet called"); 5848 return _gethostbyname(name); 5849 } 5850 5851 BOOL os::WinSock2Dll::WinSock2Available() { 5852 if (!initialized) { 5853 initialize(); 5854 } 5855 return _WSAStartup != NULL && 5856 _gethostbyname != NULL; 5857 } 5858 5859 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5860 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5861 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5862 5863 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5864 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5865 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5866 BOOL os::Advapi32Dll::initialized = FALSE; 5867 5868 void os::Advapi32Dll::initialize() { 5869 if (!initialized) { 5870 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5871 if (handle != NULL) { 5872 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5873 "AdjustTokenPrivileges"); 5874 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5875 "OpenProcessToken"); 5876 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5877 "LookupPrivilegeValueA"); 5878 } 5879 initialized = TRUE; 5880 } 5881 } 5882 5883 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5884 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5885 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5886 assert(initialized && _AdjustTokenPrivileges != NULL, 5887 "AdvapiAvailable() not yet called"); 5888 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5889 BufferLength, PreviousState, ReturnLength); 5890 } 5891 5892 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5893 PHANDLE TokenHandle) { 5894 assert(initialized && _OpenProcessToken != NULL, 5895 "AdvapiAvailable() not yet called"); 5896 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5897 } 5898 5899 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5900 assert(initialized && _LookupPrivilegeValue != NULL, 5901 "AdvapiAvailable() not yet called"); 5902 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5903 } 5904 5905 BOOL os::Advapi32Dll::AdvapiAvailable() { 5906 if (!initialized) { 5907 initialize(); 5908 } 5909 return _AdjustTokenPrivileges != NULL && 5910 _OpenProcessToken != NULL && 5911 _LookupPrivilegeValue != NULL; 5912 } 5913 5914 #endif 5915 5916 #ifndef PRODUCT 5917 5918 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5919 // contiguous memory block at a particular address. 5920 // The test first tries to find a good approximate address to allocate at by using the same 5921 // method to allocate some memory at any address. The test then tries to allocate memory in 5922 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5923 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5924 // the previously allocated memory is available for allocation. The only actual failure 5925 // that is reported is when the test tries to allocate at a particular location but gets a 5926 // different valid one. A NULL return value at this point is not considered an error but may 5927 // be legitimate. 5928 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5929 void TestReserveMemorySpecial_test() { 5930 if (!UseLargePages) { 5931 if (VerboseInternalVMTests) { 5932 gclog_or_tty->print("Skipping test because large pages are disabled"); 5933 } 5934 return; 5935 } 5936 // save current value of globals 5937 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5938 bool old_use_numa_interleaving = UseNUMAInterleaving; 5939 5940 // set globals to make sure we hit the correct code path 5941 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5942 5943 // do an allocation at an address selected by the OS to get a good one. 5944 const size_t large_allocation_size = os::large_page_size() * 4; 5945 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5946 if (result == NULL) { 5947 if (VerboseInternalVMTests) { 5948 gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5949 large_allocation_size); 5950 } 5951 } else { 5952 os::release_memory_special(result, large_allocation_size); 5953 5954 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5955 // we managed to get it once. 5956 const size_t expected_allocation_size = os::large_page_size(); 5957 char* expected_location = result + os::large_page_size(); 5958 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5959 if (actual_location == NULL) { 5960 if (VerboseInternalVMTests) { 5961 gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5962 expected_location, large_allocation_size); 5963 } 5964 } else { 5965 // release memory 5966 os::release_memory_special(actual_location, expected_allocation_size); 5967 // only now check, after releasing any memory to avoid any leaks. 5968 assert(actual_location == expected_location, 5969 err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5970 expected_location, expected_allocation_size, actual_location)); 5971 } 5972 } 5973 5974 // restore globals 5975 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5976 UseNUMAInterleaving = old_use_numa_interleaving; 5977 } 5978 #endif // PRODUCT 5979