1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/extendedPC.hpp" 48 #include "runtime/globals.hpp" 49 #include "runtime/interfaceSupport.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/javaCalls.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/objectMonitor.hpp" 54 #include "runtime/orderAccess.inline.hpp" 55 #include "runtime/osThread.hpp" 56 #include "runtime/perfMemory.hpp" 57 #include "runtime/sharedRuntime.hpp" 58 #include "runtime/statSampler.hpp" 59 #include "runtime/stubRoutines.hpp" 60 #include "runtime/thread.inline.hpp" 61 #include "runtime/threadCritical.hpp" 62 #include "runtime/timer.hpp" 63 #include "services/attachListener.hpp" 64 #include "services/memTracker.hpp" 65 #include "services/runtimeService.hpp" 66 #include "utilities/decoder.hpp" 67 #include "utilities/defaultStream.hpp" 68 #include "utilities/events.hpp" 69 #include "utilities/growableArray.hpp" 70 #include "utilities/vmError.hpp" 71 72 #ifdef _DEBUG 73 #include <crtdbg.h> 74 #endif 75 76 77 #include <windows.h> 78 #include <sys/types.h> 79 #include <sys/stat.h> 80 #include <sys/timeb.h> 81 #include <objidl.h> 82 #include <shlobj.h> 83 84 #include <malloc.h> 85 #include <signal.h> 86 #include <direct.h> 87 #include <errno.h> 88 #include <fcntl.h> 89 #include <io.h> 90 #include <process.h> // For _beginthreadex(), _endthreadex() 91 #include <imagehlp.h> // For os::dll_address_to_function_name 92 /* for enumerating dll libraries */ 93 #include <vdmdbg.h> 94 95 // for timer info max values which include all bits 96 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 97 98 // For DLL loading/load error detection 99 // Values of PE COFF 100 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 101 #define IMAGE_FILE_SIGNATURE_LENGTH 4 102 103 static HANDLE main_process; 104 static HANDLE main_thread; 105 static int main_thread_id; 106 107 static FILETIME process_creation_time; 108 static FILETIME process_exit_time; 109 static FILETIME process_user_time; 110 static FILETIME process_kernel_time; 111 112 #ifdef _M_IA64 113 #define __CPU__ ia64 114 #else 115 #ifdef _M_AMD64 116 #define __CPU__ amd64 117 #else 118 #define __CPU__ i486 119 #endif 120 #endif 121 122 // save DLL module handle, used by GetModuleFileName 123 124 HINSTANCE vm_lib_handle; 125 126 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 127 switch (reason) { 128 case DLL_PROCESS_ATTACH: 129 vm_lib_handle = hinst; 130 if(ForceTimeHighResolution) 131 timeBeginPeriod(1L); 132 break; 133 case DLL_PROCESS_DETACH: 134 if(ForceTimeHighResolution) 135 timeEndPeriod(1L); 136 137 break; 138 default: 139 break; 140 } 141 return true; 142 } 143 144 static inline double fileTimeAsDouble(FILETIME* time) { 145 const double high = (double) ((unsigned int) ~0); 146 const double split = 10000000.0; 147 double result = (time->dwLowDateTime / split) + 148 time->dwHighDateTime * (high/split); 149 return result; 150 } 151 152 // Implementation of os 153 154 bool os::getenv(const char* name, char* buffer, int len) { 155 int result = GetEnvironmentVariable(name, buffer, len); 156 return result > 0 && result < len; 157 } 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 #ifndef _WIN64 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 #endif 183 void os::init_system_properties_values() { 184 /* sysclasspath, java_home, dll_dir */ 185 { 186 char *home_path; 187 char *dll_path; 188 char *pslash; 189 char *bin = "\\bin"; 190 char home_dir[MAX_PATH]; 191 192 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 193 os::jvm_path(home_dir, sizeof(home_dir)); 194 // Found the full path to jvm.dll. 195 // Now cut the path to <java_home>/jre if we can. 196 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 197 pslash = strrchr(home_dir, '\\'); 198 if (pslash != NULL) { 199 *pslash = '\0'; /* get rid of \{client|server} */ 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) 202 *pslash = '\0'; /* get rid of \bin */ 203 } 204 } 205 206 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 207 if (home_path == NULL) 208 return; 209 strcpy(home_path, home_dir); 210 Arguments::set_java_home(home_path); 211 212 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 213 if (dll_path == NULL) 214 return; 215 strcpy(dll_path, home_dir); 216 strcat(dll_path, bin); 217 Arguments::set_dll_dir(dll_path); 218 219 if (!set_boot_path('\\', ';')) 220 return; 221 } 222 223 /* library_path */ 224 #define EXT_DIR "\\lib\\ext" 225 #define BIN_DIR "\\bin" 226 #define PACKAGE_DIR "\\Sun\\Java" 227 { 228 /* Win32 library search order (See the documentation for LoadLibrary): 229 * 230 * 1. The directory from which application is loaded. 231 * 2. The system wide Java Extensions directory (Java only) 232 * 3. System directory (GetSystemDirectory) 233 * 4. Windows directory (GetWindowsDirectory) 234 * 5. The PATH environment variable 235 * 6. The current directory 236 */ 237 238 char *library_path; 239 char tmp[MAX_PATH]; 240 char *path_str = ::getenv("PATH"); 241 242 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 243 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 244 245 library_path[0] = '\0'; 246 247 GetModuleFileName(NULL, tmp, sizeof(tmp)); 248 *(strrchr(tmp, '\\')) = '\0'; 249 strcat(library_path, tmp); 250 251 GetWindowsDirectory(tmp, sizeof(tmp)); 252 strcat(library_path, ";"); 253 strcat(library_path, tmp); 254 strcat(library_path, PACKAGE_DIR BIN_DIR); 255 256 GetSystemDirectory(tmp, sizeof(tmp)); 257 strcat(library_path, ";"); 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 264 if (path_str) { 265 strcat(library_path, ";"); 266 strcat(library_path, path_str); 267 } 268 269 strcat(library_path, ";."); 270 271 Arguments::set_library_path(library_path); 272 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 273 } 274 275 /* Default extensions directory */ 276 { 277 char path[MAX_PATH]; 278 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 279 GetWindowsDirectory(path, MAX_PATH); 280 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 281 path, PACKAGE_DIR, EXT_DIR); 282 Arguments::set_ext_dirs(buf); 283 } 284 #undef EXT_DIR 285 #undef BIN_DIR 286 #undef PACKAGE_DIR 287 288 /* Default endorsed standards directory. */ 289 { 290 #define ENDORSED_DIR "\\lib\\endorsed" 291 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 292 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 293 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 294 Arguments::set_endorsed_dirs(buf); 295 #undef ENDORSED_DIR 296 } 297 298 #ifndef _WIN64 299 // set our UnhandledExceptionFilter and save any previous one 300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 301 #endif 302 303 // Done 304 return; 305 } 306 307 void os::breakpoint() { 308 DebugBreak(); 309 } 310 311 // Invoked from the BREAKPOINT Macro 312 extern "C" void breakpoint() { 313 os::breakpoint(); 314 } 315 316 /* 317 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 318 * So far, this method is only used by Native Memory Tracking, which is 319 * only supported on Windows XP or later. 320 */ 321 322 int os::get_native_stack(address* stack, int frames, int toSkip) { 323 #ifdef _NMT_NOINLINE_ 324 toSkip ++; 325 #endif 326 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 327 (PVOID*)stack, NULL); 328 for (int index = captured; index < frames; index ++) { 329 stack[index] = NULL; 330 } 331 return captured; 332 } 333 334 335 // os::current_stack_base() 336 // 337 // Returns the base of the stack, which is the stack's 338 // starting address. This function must be called 339 // while running on the stack of the thread being queried. 340 341 address os::current_stack_base() { 342 MEMORY_BASIC_INFORMATION minfo; 343 address stack_bottom; 344 size_t stack_size; 345 346 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 347 stack_bottom = (address)minfo.AllocationBase; 348 stack_size = minfo.RegionSize; 349 350 // Add up the sizes of all the regions with the same 351 // AllocationBase. 352 while( 1 ) 353 { 354 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 355 if ( stack_bottom == (address)minfo.AllocationBase ) 356 stack_size += minfo.RegionSize; 357 else 358 break; 359 } 360 361 #ifdef _M_IA64 362 // IA64 has memory and register stacks 363 // 364 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 365 // at thread creation (1MB backing store growing upwards, 1MB memory stack 366 // growing downwards, 2MB summed up) 367 // 368 // ... 369 // ------- top of stack (high address) ----- 370 // | 371 // | 1MB 372 // | Backing Store (Register Stack) 373 // | 374 // | / \ 375 // | | 376 // | | 377 // | | 378 // ------------------------ stack base ----- 379 // | 1MB 380 // | Memory Stack 381 // | 382 // | | 383 // | | 384 // | | 385 // | \ / 386 // | 387 // ----- bottom of stack (low address) ----- 388 // ... 389 390 stack_size = stack_size / 2; 391 #endif 392 return stack_bottom + stack_size; 393 } 394 395 size_t os::current_stack_size() { 396 size_t sz; 397 MEMORY_BASIC_INFORMATION minfo; 398 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 399 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 400 return sz; 401 } 402 403 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 404 const struct tm* time_struct_ptr = localtime(clock); 405 if (time_struct_ptr != NULL) { 406 *res = *time_struct_ptr; 407 return res; 408 } 409 return NULL; 410 } 411 412 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 413 414 // Thread start routine for all new Java threads 415 static unsigned __stdcall java_start(Thread* thread) { 416 // Try to randomize the cache line index of hot stack frames. 417 // This helps when threads of the same stack traces evict each other's 418 // cache lines. The threads can be either from the same JVM instance, or 419 // from different JVM instances. The benefit is especially true for 420 // processors with hyperthreading technology. 421 static int counter = 0; 422 int pid = os::current_process_id(); 423 _alloca(((pid ^ counter++) & 7) * 128); 424 425 OSThread* osthr = thread->osthread(); 426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 427 428 if (UseNUMA) { 429 int lgrp_id = os::numa_get_group_id(); 430 if (lgrp_id != -1) { 431 thread->set_lgrp_id(lgrp_id); 432 } 433 } 434 435 436 // Install a win32 structured exception handler around every thread created 437 // by VM, so VM can genrate error dump when an exception occurred in non- 438 // Java thread (e.g. VM thread). 439 __try { 440 thread->run(); 441 } __except(topLevelExceptionFilter( 442 (_EXCEPTION_POINTERS*)_exception_info())) { 443 // Nothing to do. 444 } 445 446 // One less thread is executing 447 // When the VMThread gets here, the main thread may have already exited 448 // which frees the CodeHeap containing the Atomic::add code 449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 451 } 452 453 return 0; 454 } 455 456 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 457 // Allocate the OSThread object 458 OSThread* osthread = new OSThread(NULL, NULL); 459 if (osthread == NULL) return NULL; 460 461 // Initialize support for Java interrupts 462 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 463 if (interrupt_event == NULL) { 464 delete osthread; 465 return NULL; 466 } 467 osthread->set_interrupt_event(interrupt_event); 468 469 // Store info on the Win32 thread into the OSThread 470 osthread->set_thread_handle(thread_handle); 471 osthread->set_thread_id(thread_id); 472 473 if (UseNUMA) { 474 int lgrp_id = os::numa_get_group_id(); 475 if (lgrp_id != -1) { 476 thread->set_lgrp_id(lgrp_id); 477 } 478 } 479 480 // Initial thread state is INITIALIZED, not SUSPENDED 481 osthread->set_state(INITIALIZED); 482 483 return osthread; 484 } 485 486 487 bool os::create_attached_thread(JavaThread* thread) { 488 #ifdef ASSERT 489 thread->verify_not_published(); 490 #endif 491 HANDLE thread_h; 492 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 493 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 494 fatal("DuplicateHandle failed\n"); 495 } 496 OSThread* osthread = create_os_thread(thread, thread_h, 497 (int)current_thread_id()); 498 if (osthread == NULL) { 499 return false; 500 } 501 502 // Initial thread state is RUNNABLE 503 osthread->set_state(RUNNABLE); 504 505 thread->set_osthread(osthread); 506 return true; 507 } 508 509 bool os::create_main_thread(JavaThread* thread) { 510 #ifdef ASSERT 511 thread->verify_not_published(); 512 #endif 513 if (_starting_thread == NULL) { 514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 515 if (_starting_thread == NULL) { 516 return false; 517 } 518 } 519 520 // The primordial thread is runnable from the start) 521 _starting_thread->set_state(RUNNABLE); 522 523 thread->set_osthread(_starting_thread); 524 return true; 525 } 526 527 // Allocate and initialize a new OSThread 528 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 529 unsigned thread_id; 530 531 // Allocate the OSThread object 532 OSThread* osthread = new OSThread(NULL, NULL); 533 if (osthread == NULL) { 534 return false; 535 } 536 537 // Initialize support for Java interrupts 538 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 539 if (interrupt_event == NULL) { 540 delete osthread; 541 return NULL; 542 } 543 osthread->set_interrupt_event(interrupt_event); 544 osthread->set_interrupted(false); 545 546 thread->set_osthread(osthread); 547 548 if (stack_size == 0) { 549 switch (thr_type) { 550 case os::java_thread: 551 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 552 if (JavaThread::stack_size_at_create() > 0) 553 stack_size = JavaThread::stack_size_at_create(); 554 break; 555 case os::compiler_thread: 556 if (CompilerThreadStackSize > 0) { 557 stack_size = (size_t)(CompilerThreadStackSize * K); 558 break; 559 } // else fall through: 560 // use VMThreadStackSize if CompilerThreadStackSize is not defined 561 case os::vm_thread: 562 case os::pgc_thread: 563 case os::cgc_thread: 564 case os::watcher_thread: 565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 566 break; 567 } 568 } 569 570 // Create the Win32 thread 571 // 572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 573 // does not specify stack size. Instead, it specifies the size of 574 // initially committed space. The stack size is determined by 575 // PE header in the executable. If the committed "stack_size" is larger 576 // than default value in the PE header, the stack is rounded up to the 577 // nearest multiple of 1MB. For example if the launcher has default 578 // stack size of 320k, specifying any size less than 320k does not 579 // affect the actual stack size at all, it only affects the initial 580 // commitment. On the other hand, specifying 'stack_size' larger than 581 // default value may cause significant increase in memory usage, because 582 // not only the stack space will be rounded up to MB, but also the 583 // entire space is committed upfront. 584 // 585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 586 // for CreateThread() that can treat 'stack_size' as stack size. However we 587 // are not supposed to call CreateThread() directly according to MSDN 588 // document because JVM uses C runtime library. The good news is that the 589 // flag appears to work with _beginthredex() as well. 590 591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 593 #endif 594 595 HANDLE thread_handle = 596 (HANDLE)_beginthreadex(NULL, 597 (unsigned)stack_size, 598 (unsigned (__stdcall *)(void*)) java_start, 599 thread, 600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 601 &thread_id); 602 if (thread_handle == NULL) { 603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 604 // without the flag. 605 thread_handle = 606 (HANDLE)_beginthreadex(NULL, 607 (unsigned)stack_size, 608 (unsigned (__stdcall *)(void*)) java_start, 609 thread, 610 CREATE_SUSPENDED, 611 &thread_id); 612 } 613 if (thread_handle == NULL) { 614 // Need to clean up stuff we've allocated so far 615 CloseHandle(osthread->interrupt_event()); 616 thread->set_osthread(NULL); 617 delete osthread; 618 return NULL; 619 } 620 621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 622 623 // Store info on the Win32 thread into the OSThread 624 osthread->set_thread_handle(thread_handle); 625 osthread->set_thread_id(thread_id); 626 627 // Initial thread state is INITIALIZED, not SUSPENDED 628 osthread->set_state(INITIALIZED); 629 630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 631 return true; 632 } 633 634 635 // Free Win32 resources related to the OSThread 636 void os::free_thread(OSThread* osthread) { 637 assert(osthread != NULL, "osthread not set"); 638 CloseHandle(osthread->thread_handle()); 639 CloseHandle(osthread->interrupt_event()); 640 delete osthread; 641 } 642 643 644 static int has_performance_count = 0; 645 static jlong first_filetime; 646 static jlong initial_performance_count; 647 static jlong performance_frequency; 648 649 650 jlong as_long(LARGE_INTEGER x) { 651 jlong result = 0; // initialization to avoid warning 652 set_high(&result, x.HighPart); 653 set_low(&result, x.LowPart); 654 return result; 655 } 656 657 658 jlong os::elapsed_counter() { 659 LARGE_INTEGER count; 660 if (has_performance_count) { 661 QueryPerformanceCounter(&count); 662 return as_long(count) - initial_performance_count; 663 } else { 664 FILETIME wt; 665 GetSystemTimeAsFileTime(&wt); 666 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 667 } 668 } 669 670 671 jlong os::elapsed_frequency() { 672 if (has_performance_count) { 673 return performance_frequency; 674 } else { 675 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 676 return 10000000; 677 } 678 } 679 680 681 julong os::available_memory() { 682 return win32::available_memory(); 683 } 684 685 julong os::win32::available_memory() { 686 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 687 // value if total memory is larger than 4GB 688 MEMORYSTATUSEX ms; 689 ms.dwLength = sizeof(ms); 690 GlobalMemoryStatusEx(&ms); 691 692 return (julong)ms.ullAvailPhys; 693 } 694 695 julong os::physical_memory() { 696 return win32::physical_memory(); 697 } 698 699 bool os::has_allocatable_memory_limit(julong* limit) { 700 MEMORYSTATUSEX ms; 701 ms.dwLength = sizeof(ms); 702 GlobalMemoryStatusEx(&ms); 703 #ifdef _LP64 704 *limit = (julong)ms.ullAvailVirtual; 705 return true; 706 #else 707 // Limit to 1400m because of the 2gb address space wall 708 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 709 return true; 710 #endif 711 } 712 713 // VC6 lacks DWORD_PTR 714 #if _MSC_VER < 1300 715 typedef UINT_PTR DWORD_PTR; 716 #endif 717 718 int os::active_processor_count() { 719 // User has overridden the number of active processors 720 if (ActiveProcessorCount > 0) { 721 if (PrintActiveCpus) { 722 tty->print_cr("active_processor_count: " 723 "active processor count set by user : %d", 724 ActiveProcessorCount); 725 } 726 return ActiveProcessorCount; 727 } 728 729 DWORD_PTR lpProcessAffinityMask = 0; 730 DWORD_PTR lpSystemAffinityMask = 0; 731 int proc_count = processor_count(); 732 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 733 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 734 // Nof active processors is number of bits in process affinity mask 735 int bitcount = 0; 736 while (lpProcessAffinityMask != 0) { 737 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 738 bitcount++; 739 } 740 return bitcount; 741 } else { 742 return proc_count; 743 } 744 } 745 746 void os::set_native_thread_name(const char *name) { 747 // Not yet implemented. 748 return; 749 } 750 751 bool os::distribute_processes(uint length, uint* distribution) { 752 // Not yet implemented. 753 return false; 754 } 755 756 bool os::bind_to_processor(uint processor_id) { 757 // Not yet implemented. 758 return false; 759 } 760 761 static void initialize_performance_counter() { 762 LARGE_INTEGER count; 763 if (QueryPerformanceFrequency(&count)) { 764 has_performance_count = 1; 765 performance_frequency = as_long(count); 766 QueryPerformanceCounter(&count); 767 initial_performance_count = as_long(count); 768 } else { 769 has_performance_count = 0; 770 FILETIME wt; 771 GetSystemTimeAsFileTime(&wt); 772 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 773 } 774 } 775 776 777 double os::elapsedTime() { 778 return (double) elapsed_counter() / (double) elapsed_frequency(); 779 } 780 781 782 // Windows format: 783 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 784 // Java format: 785 // Java standards require the number of milliseconds since 1/1/1970 786 787 // Constant offset - calculated using offset() 788 static jlong _offset = 116444736000000000; 789 // Fake time counter for reproducible results when debugging 790 static jlong fake_time = 0; 791 792 #ifdef ASSERT 793 // Just to be safe, recalculate the offset in debug mode 794 static jlong _calculated_offset = 0; 795 static int _has_calculated_offset = 0; 796 797 jlong offset() { 798 if (_has_calculated_offset) return _calculated_offset; 799 SYSTEMTIME java_origin; 800 java_origin.wYear = 1970; 801 java_origin.wMonth = 1; 802 java_origin.wDayOfWeek = 0; // ignored 803 java_origin.wDay = 1; 804 java_origin.wHour = 0; 805 java_origin.wMinute = 0; 806 java_origin.wSecond = 0; 807 java_origin.wMilliseconds = 0; 808 FILETIME jot; 809 if (!SystemTimeToFileTime(&java_origin, &jot)) { 810 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 811 } 812 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 813 _has_calculated_offset = 1; 814 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 815 return _calculated_offset; 816 } 817 #else 818 jlong offset() { 819 return _offset; 820 } 821 #endif 822 823 jlong windows_to_java_time(FILETIME wt) { 824 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 825 return (a - offset()) / 10000; 826 } 827 828 FILETIME java_to_windows_time(jlong l) { 829 jlong a = (l * 10000) + offset(); 830 FILETIME result; 831 result.dwHighDateTime = high(a); 832 result.dwLowDateTime = low(a); 833 return result; 834 } 835 836 bool os::supports_vtime() { return true; } 837 bool os::enable_vtime() { return false; } 838 bool os::vtime_enabled() { return false; } 839 840 double os::elapsedVTime() { 841 FILETIME created; 842 FILETIME exited; 843 FILETIME kernel; 844 FILETIME user; 845 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 846 // the resolution of windows_to_java_time() should be sufficient (ms) 847 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 848 } else { 849 return elapsedTime(); 850 } 851 } 852 853 jlong os::javaTimeMillis() { 854 if (UseFakeTimers) { 855 return fake_time++; 856 } else { 857 FILETIME wt; 858 GetSystemTimeAsFileTime(&wt); 859 return windows_to_java_time(wt); 860 } 861 } 862 863 jlong os::javaTimeNanos() { 864 if (!has_performance_count) { 865 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 866 } else { 867 LARGE_INTEGER current_count; 868 QueryPerformanceCounter(¤t_count); 869 double current = as_long(current_count); 870 double freq = performance_frequency; 871 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 872 return time; 873 } 874 } 875 876 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 877 if (!has_performance_count) { 878 // javaTimeMillis() doesn't have much percision, 879 // but it is not going to wrap -- so all 64 bits 880 info_ptr->max_value = ALL_64_BITS; 881 882 // this is a wall clock timer, so may skip 883 info_ptr->may_skip_backward = true; 884 info_ptr->may_skip_forward = true; 885 } else { 886 jlong freq = performance_frequency; 887 if (freq < NANOSECS_PER_SEC) { 888 // the performance counter is 64 bits and we will 889 // be multiplying it -- so no wrap in 64 bits 890 info_ptr->max_value = ALL_64_BITS; 891 } else if (freq > NANOSECS_PER_SEC) { 892 // use the max value the counter can reach to 893 // determine the max value which could be returned 894 julong max_counter = (julong)ALL_64_BITS; 895 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 896 } else { 897 // the performance counter is 64 bits and we will 898 // be using it directly -- so no wrap in 64 bits 899 info_ptr->max_value = ALL_64_BITS; 900 } 901 902 // using a counter, so no skipping 903 info_ptr->may_skip_backward = false; 904 info_ptr->may_skip_forward = false; 905 } 906 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 907 } 908 909 char* os::local_time_string(char *buf, size_t buflen) { 910 SYSTEMTIME st; 911 GetLocalTime(&st); 912 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 913 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 914 return buf; 915 } 916 917 bool os::getTimesSecs(double* process_real_time, 918 double* process_user_time, 919 double* process_system_time) { 920 HANDLE h_process = GetCurrentProcess(); 921 FILETIME create_time, exit_time, kernel_time, user_time; 922 BOOL result = GetProcessTimes(h_process, 923 &create_time, 924 &exit_time, 925 &kernel_time, 926 &user_time); 927 if (result != 0) { 928 FILETIME wt; 929 GetSystemTimeAsFileTime(&wt); 930 jlong rtc_millis = windows_to_java_time(wt); 931 jlong user_millis = windows_to_java_time(user_time); 932 jlong system_millis = windows_to_java_time(kernel_time); 933 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 934 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 935 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 936 return true; 937 } else { 938 return false; 939 } 940 } 941 942 void os::shutdown() { 943 944 // allow PerfMemory to attempt cleanup of any persistent resources 945 perfMemory_exit(); 946 947 // flush buffered output, finish log files 948 ostream_abort(); 949 950 // Check for abort hook 951 abort_hook_t abort_hook = Arguments::abort_hook(); 952 if (abort_hook != NULL) { 953 abort_hook(); 954 } 955 } 956 957 958 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 959 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 960 961 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 962 HINSTANCE dbghelp; 963 EXCEPTION_POINTERS ep; 964 MINIDUMP_EXCEPTION_INFORMATION mei; 965 MINIDUMP_EXCEPTION_INFORMATION* pmei; 966 967 HANDLE hProcess = GetCurrentProcess(); 968 DWORD processId = GetCurrentProcessId(); 969 HANDLE dumpFile; 970 MINIDUMP_TYPE dumpType; 971 static const char* cwd; 972 973 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 974 #ifndef ASSERT 975 // If running on a client version of Windows and user has not explicitly enabled dumping 976 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 977 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 978 return; 979 // If running on a server version of Windows and user has explictly disabled dumping 980 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 981 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 982 return; 983 } 984 #else 985 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 986 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 987 return; 988 } 989 #endif 990 991 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 992 993 if (dbghelp == NULL) { 994 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 995 return; 996 } 997 998 _MiniDumpWriteDump = CAST_TO_FN_PTR( 999 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 1000 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 1001 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 1002 1003 if (_MiniDumpWriteDump == NULL) { 1004 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1005 return; 1006 } 1007 1008 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1009 1010 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1011 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1012 #if API_VERSION_NUMBER >= 11 1013 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1014 MiniDumpWithUnloadedModules); 1015 #endif 1016 1017 cwd = get_current_directory(NULL, 0); 1018 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id()); 1019 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1020 1021 if (dumpFile == INVALID_HANDLE_VALUE) { 1022 VMError::report_coredump_status("Failed to create file for dumping", false); 1023 return; 1024 } 1025 if (exceptionRecord != NULL && contextRecord != NULL) { 1026 ep.ContextRecord = (PCONTEXT) contextRecord; 1027 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1028 1029 mei.ThreadId = GetCurrentThreadId(); 1030 mei.ExceptionPointers = &ep; 1031 pmei = &mei; 1032 } else { 1033 pmei = NULL; 1034 } 1035 1036 1037 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1038 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1039 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1040 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1041 DWORD error = GetLastError(); 1042 LPTSTR msgbuf = NULL; 1043 1044 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1045 FORMAT_MESSAGE_FROM_SYSTEM | 1046 FORMAT_MESSAGE_IGNORE_INSERTS, 1047 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1048 1049 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1050 LocalFree(msgbuf); 1051 } else { 1052 // Call to FormatMessage failed, just include the result from GetLastError 1053 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1054 } 1055 VMError::report_coredump_status(buffer, false); 1056 } else { 1057 VMError::report_coredump_status(buffer, true); 1058 } 1059 1060 CloseHandle(dumpFile); 1061 } 1062 1063 1064 1065 void os::abort(bool dump_core) 1066 { 1067 os::shutdown(); 1068 // no core dump on Windows 1069 ::exit(1); 1070 } 1071 1072 // Die immediately, no exit hook, no abort hook, no cleanup. 1073 void os::die() { 1074 _exit(-1); 1075 } 1076 1077 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1078 // * dirent_md.c 1.15 00/02/02 1079 // 1080 // The declarations for DIR and struct dirent are in jvm_win32.h. 1081 1082 /* Caller must have already run dirname through JVM_NativePath, which removes 1083 duplicate slashes and converts all instances of '/' into '\\'. */ 1084 1085 DIR * 1086 os::opendir(const char *dirname) 1087 { 1088 assert(dirname != NULL, "just checking"); // hotspot change 1089 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1090 DWORD fattr; // hotspot change 1091 char alt_dirname[4] = { 0, 0, 0, 0 }; 1092 1093 if (dirp == 0) { 1094 errno = ENOMEM; 1095 return 0; 1096 } 1097 1098 /* 1099 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1100 * as a directory in FindFirstFile(). We detect this case here and 1101 * prepend the current drive name. 1102 */ 1103 if (dirname[1] == '\0' && dirname[0] == '\\') { 1104 alt_dirname[0] = _getdrive() + 'A' - 1; 1105 alt_dirname[1] = ':'; 1106 alt_dirname[2] = '\\'; 1107 alt_dirname[3] = '\0'; 1108 dirname = alt_dirname; 1109 } 1110 1111 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1112 if (dirp->path == 0) { 1113 free(dirp, mtInternal); 1114 errno = ENOMEM; 1115 return 0; 1116 } 1117 strcpy(dirp->path, dirname); 1118 1119 fattr = GetFileAttributes(dirp->path); 1120 if (fattr == 0xffffffff) { 1121 free(dirp->path, mtInternal); 1122 free(dirp, mtInternal); 1123 errno = ENOENT; 1124 return 0; 1125 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1126 free(dirp->path, mtInternal); 1127 free(dirp, mtInternal); 1128 errno = ENOTDIR; 1129 return 0; 1130 } 1131 1132 /* Append "*.*", or possibly "\\*.*", to path */ 1133 if (dirp->path[1] == ':' 1134 && (dirp->path[2] == '\0' 1135 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1136 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1137 strcat(dirp->path, "*.*"); 1138 } else { 1139 strcat(dirp->path, "\\*.*"); 1140 } 1141 1142 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1143 if (dirp->handle == INVALID_HANDLE_VALUE) { 1144 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1145 free(dirp->path, mtInternal); 1146 free(dirp, mtInternal); 1147 errno = EACCES; 1148 return 0; 1149 } 1150 } 1151 return dirp; 1152 } 1153 1154 struct dirent *os::readdir(DIR *dirp) 1155 { 1156 assert(dirp != NULL, "just checking"); // hotspot change 1157 if (dirp->handle == INVALID_HANDLE_VALUE) { 1158 return NULL; 1159 } 1160 1161 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1162 1163 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1164 if (GetLastError() == ERROR_INVALID_HANDLE) { 1165 errno = EBADF; 1166 return NULL; 1167 } 1168 FindClose(dirp->handle); 1169 dirp->handle = INVALID_HANDLE_VALUE; 1170 } 1171 1172 return &dirp->dirent; 1173 } 1174 1175 int 1176 os::closedir(DIR *dirp) 1177 { 1178 assert(dirp != NULL, "just checking"); // hotspot change 1179 if (dirp->handle != INVALID_HANDLE_VALUE) { 1180 if (!FindClose(dirp->handle)) { 1181 errno = EBADF; 1182 return -1; 1183 } 1184 dirp->handle = INVALID_HANDLE_VALUE; 1185 } 1186 free(dirp->path, mtInternal); 1187 free(dirp, mtInternal); 1188 return 0; 1189 } 1190 1191 // This must be hard coded because it's the system's temporary 1192 // directory not the java application's temp directory, ala java.io.tmpdir. 1193 const char* os::get_temp_directory() { 1194 static char path_buf[MAX_PATH]; 1195 if (GetTempPath(MAX_PATH, path_buf)>0) 1196 return path_buf; 1197 else{ 1198 path_buf[0]='\0'; 1199 return path_buf; 1200 } 1201 } 1202 1203 static bool file_exists(const char* filename) { 1204 if (filename == NULL || strlen(filename) == 0) { 1205 return false; 1206 } 1207 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1208 } 1209 1210 bool os::dll_build_name(char *buffer, size_t buflen, 1211 const char* pname, const char* fname) { 1212 bool retval = false; 1213 const size_t pnamelen = pname ? strlen(pname) : 0; 1214 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1215 1216 // Return error on buffer overflow. 1217 if (pnamelen + strlen(fname) + 10 > buflen) { 1218 return retval; 1219 } 1220 1221 if (pnamelen == 0) { 1222 jio_snprintf(buffer, buflen, "%s.dll", fname); 1223 retval = true; 1224 } else if (c == ':' || c == '\\') { 1225 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1226 retval = true; 1227 } else if (strchr(pname, *os::path_separator()) != NULL) { 1228 int n; 1229 char** pelements = split_path(pname, &n); 1230 if (pelements == NULL) { 1231 return false; 1232 } 1233 for (int i = 0 ; i < n ; i++) { 1234 char* path = pelements[i]; 1235 // Really shouldn't be NULL, but check can't hurt 1236 size_t plen = (path == NULL) ? 0 : strlen(path); 1237 if (plen == 0) { 1238 continue; // skip the empty path values 1239 } 1240 const char lastchar = path[plen - 1]; 1241 if (lastchar == ':' || lastchar == '\\') { 1242 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1243 } else { 1244 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1245 } 1246 if (file_exists(buffer)) { 1247 retval = true; 1248 break; 1249 } 1250 } 1251 // release the storage 1252 for (int i = 0 ; i < n ; i++) { 1253 if (pelements[i] != NULL) { 1254 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1255 } 1256 } 1257 if (pelements != NULL) { 1258 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1259 } 1260 } else { 1261 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1262 retval = true; 1263 } 1264 return retval; 1265 } 1266 1267 // Needs to be in os specific directory because windows requires another 1268 // header file <direct.h> 1269 const char* os::get_current_directory(char *buf, size_t buflen) { 1270 int n = static_cast<int>(buflen); 1271 if (buflen > INT_MAX) n = INT_MAX; 1272 return _getcwd(buf, n); 1273 } 1274 1275 //----------------------------------------------------------- 1276 // Helper functions for fatal error handler 1277 #ifdef _WIN64 1278 // Helper routine which returns true if address in 1279 // within the NTDLL address space. 1280 // 1281 static bool _addr_in_ntdll( address addr ) 1282 { 1283 HMODULE hmod; 1284 MODULEINFO minfo; 1285 1286 hmod = GetModuleHandle("NTDLL.DLL"); 1287 if ( hmod == NULL ) return false; 1288 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1289 &minfo, sizeof(MODULEINFO)) ) 1290 return false; 1291 1292 if ( (addr >= minfo.lpBaseOfDll) && 1293 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1294 return true; 1295 else 1296 return false; 1297 } 1298 #endif 1299 1300 1301 // Enumerate all modules for a given process ID 1302 // 1303 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1304 // different API for doing this. We use PSAPI.DLL on NT based 1305 // Windows and ToolHelp on 95/98/Me. 1306 1307 // Callback function that is called by enumerate_modules() on 1308 // every DLL module. 1309 // Input parameters: 1310 // int pid, 1311 // char* module_file_name, 1312 // address module_base_addr, 1313 // unsigned module_size, 1314 // void* param 1315 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1316 1317 // enumerate_modules for Windows NT, using PSAPI 1318 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1319 { 1320 HANDLE hProcess ; 1321 1322 # define MAX_NUM_MODULES 128 1323 HMODULE modules[MAX_NUM_MODULES]; 1324 static char filename[ MAX_PATH ]; 1325 int result = 0; 1326 1327 if (!os::PSApiDll::PSApiAvailable()) { 1328 return 0; 1329 } 1330 1331 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1332 FALSE, pid ) ; 1333 if (hProcess == NULL) return 0; 1334 1335 DWORD size_needed; 1336 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1337 sizeof(modules), &size_needed)) { 1338 CloseHandle( hProcess ); 1339 return 0; 1340 } 1341 1342 // number of modules that are currently loaded 1343 int num_modules = size_needed / sizeof(HMODULE); 1344 1345 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1346 // Get Full pathname: 1347 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1348 filename, sizeof(filename))) { 1349 filename[0] = '\0'; 1350 } 1351 1352 MODULEINFO modinfo; 1353 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1354 &modinfo, sizeof(modinfo))) { 1355 modinfo.lpBaseOfDll = NULL; 1356 modinfo.SizeOfImage = 0; 1357 } 1358 1359 // Invoke callback function 1360 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1361 modinfo.SizeOfImage, param); 1362 if (result) break; 1363 } 1364 1365 CloseHandle( hProcess ) ; 1366 return result; 1367 } 1368 1369 1370 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1371 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1372 { 1373 HANDLE hSnapShot ; 1374 static MODULEENTRY32 modentry ; 1375 int result = 0; 1376 1377 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1378 return 0; 1379 } 1380 1381 // Get a handle to a Toolhelp snapshot of the system 1382 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; 1383 if( hSnapShot == INVALID_HANDLE_VALUE ) { 1384 return FALSE ; 1385 } 1386 1387 // iterate through all modules 1388 modentry.dwSize = sizeof(MODULEENTRY32) ; 1389 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1390 1391 while( not_done ) { 1392 // invoke the callback 1393 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1394 modentry.modBaseSize, param); 1395 if (result) break; 1396 1397 modentry.dwSize = sizeof(MODULEENTRY32) ; 1398 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1399 } 1400 1401 CloseHandle(hSnapShot); 1402 return result; 1403 } 1404 1405 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1406 { 1407 // Get current process ID if caller doesn't provide it. 1408 if (!pid) pid = os::current_process_id(); 1409 1410 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1411 else return _enumerate_modules_windows(pid, func, param); 1412 } 1413 1414 struct _modinfo { 1415 address addr; 1416 char* full_path; // point to a char buffer 1417 int buflen; // size of the buffer 1418 address base_addr; 1419 }; 1420 1421 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1422 unsigned size, void * param) { 1423 struct _modinfo *pmod = (struct _modinfo *)param; 1424 if (!pmod) return -1; 1425 1426 if (base_addr <= pmod->addr && 1427 base_addr+size > pmod->addr) { 1428 // if a buffer is provided, copy path name to the buffer 1429 if (pmod->full_path) { 1430 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1431 } 1432 pmod->base_addr = base_addr; 1433 return 1; 1434 } 1435 return 0; 1436 } 1437 1438 bool os::dll_address_to_library_name(address addr, char* buf, 1439 int buflen, int* offset) { 1440 // buf is not optional, but offset is optional 1441 assert(buf != NULL, "sanity check"); 1442 1443 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1444 // return the full path to the DLL file, sometimes it returns path 1445 // to the corresponding PDB file (debug info); sometimes it only 1446 // returns partial path, which makes life painful. 1447 1448 struct _modinfo mi; 1449 mi.addr = addr; 1450 mi.full_path = buf; 1451 mi.buflen = buflen; 1452 int pid = os::current_process_id(); 1453 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1454 // buf already contains path name 1455 if (offset) *offset = addr - mi.base_addr; 1456 return true; 1457 } 1458 1459 buf[0] = '\0'; 1460 if (offset) *offset = -1; 1461 return false; 1462 } 1463 1464 bool os::dll_address_to_function_name(address addr, char *buf, 1465 int buflen, int *offset) { 1466 // buf is not optional, but offset is optional 1467 assert(buf != NULL, "sanity check"); 1468 1469 if (Decoder::decode(addr, buf, buflen, offset)) { 1470 return true; 1471 } 1472 if (offset != NULL) *offset = -1; 1473 buf[0] = '\0'; 1474 return false; 1475 } 1476 1477 // save the start and end address of jvm.dll into param[0] and param[1] 1478 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1479 unsigned size, void * param) { 1480 if (!param) return -1; 1481 1482 if (base_addr <= (address)_locate_jvm_dll && 1483 base_addr+size > (address)_locate_jvm_dll) { 1484 ((address*)param)[0] = base_addr; 1485 ((address*)param)[1] = base_addr + size; 1486 return 1; 1487 } 1488 return 0; 1489 } 1490 1491 address vm_lib_location[2]; // start and end address of jvm.dll 1492 1493 // check if addr is inside jvm.dll 1494 bool os::address_is_in_vm(address addr) { 1495 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1496 int pid = os::current_process_id(); 1497 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1498 assert(false, "Can't find jvm module."); 1499 return false; 1500 } 1501 } 1502 1503 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1504 } 1505 1506 // print module info; param is outputStream* 1507 static int _print_module(int pid, char* fname, address base, 1508 unsigned size, void* param) { 1509 if (!param) return -1; 1510 1511 outputStream* st = (outputStream*)param; 1512 1513 address end_addr = base + size; 1514 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1515 return 0; 1516 } 1517 1518 // Loads .dll/.so and 1519 // in case of error it checks if .dll/.so was built for the 1520 // same architecture as Hotspot is running on 1521 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1522 { 1523 void * result = LoadLibrary(name); 1524 if (result != NULL) 1525 { 1526 return result; 1527 } 1528 1529 DWORD errcode = GetLastError(); 1530 if (errcode == ERROR_MOD_NOT_FOUND) { 1531 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1532 ebuf[ebuflen-1]='\0'; 1533 return NULL; 1534 } 1535 1536 // Parsing dll below 1537 // If we can read dll-info and find that dll was built 1538 // for an architecture other than Hotspot is running in 1539 // - then print to buffer "DLL was built for a different architecture" 1540 // else call os::lasterror to obtain system error message 1541 1542 // Read system error message into ebuf 1543 // It may or may not be overwritten below (in the for loop and just above) 1544 lasterror(ebuf, (size_t) ebuflen); 1545 ebuf[ebuflen-1]='\0'; 1546 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1547 if (file_descriptor<0) 1548 { 1549 return NULL; 1550 } 1551 1552 uint32_t signature_offset; 1553 uint16_t lib_arch=0; 1554 bool failed_to_get_lib_arch= 1555 ( 1556 //Go to position 3c in the dll 1557 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1558 || 1559 // Read loacation of signature 1560 (sizeof(signature_offset)!= 1561 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1562 || 1563 //Go to COFF File Header in dll 1564 //that is located after"signature" (4 bytes long) 1565 (os::seek_to_file_offset(file_descriptor, 1566 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1567 || 1568 //Read field that contains code of architecture 1569 // that dll was build for 1570 (sizeof(lib_arch)!= 1571 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1572 ); 1573 1574 ::close(file_descriptor); 1575 if (failed_to_get_lib_arch) 1576 { 1577 // file i/o error - report os::lasterror(...) msg 1578 return NULL; 1579 } 1580 1581 typedef struct 1582 { 1583 uint16_t arch_code; 1584 char* arch_name; 1585 } arch_t; 1586 1587 static const arch_t arch_array[]={ 1588 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1589 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1590 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1591 }; 1592 #if (defined _M_IA64) 1593 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1594 #elif (defined _M_AMD64) 1595 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1596 #elif (defined _M_IX86) 1597 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1598 #else 1599 #error Method os::dll_load requires that one of following \ 1600 is defined :_M_IA64,_M_AMD64 or _M_IX86 1601 #endif 1602 1603 1604 // Obtain a string for printf operation 1605 // lib_arch_str shall contain string what platform this .dll was built for 1606 // running_arch_str shall string contain what platform Hotspot was built for 1607 char *running_arch_str=NULL,*lib_arch_str=NULL; 1608 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1609 { 1610 if (lib_arch==arch_array[i].arch_code) 1611 lib_arch_str=arch_array[i].arch_name; 1612 if (running_arch==arch_array[i].arch_code) 1613 running_arch_str=arch_array[i].arch_name; 1614 } 1615 1616 assert(running_arch_str, 1617 "Didn't find runing architecture code in arch_array"); 1618 1619 // If the architure is right 1620 // but some other error took place - report os::lasterror(...) msg 1621 if (lib_arch == running_arch) 1622 { 1623 return NULL; 1624 } 1625 1626 if (lib_arch_str!=NULL) 1627 { 1628 ::_snprintf(ebuf, ebuflen-1, 1629 "Can't load %s-bit .dll on a %s-bit platform", 1630 lib_arch_str,running_arch_str); 1631 } 1632 else 1633 { 1634 // don't know what architecture this dll was build for 1635 ::_snprintf(ebuf, ebuflen-1, 1636 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1637 lib_arch,running_arch_str); 1638 } 1639 1640 return NULL; 1641 } 1642 1643 1644 void os::print_dll_info(outputStream *st) { 1645 int pid = os::current_process_id(); 1646 st->print_cr("Dynamic libraries:"); 1647 enumerate_modules(pid, _print_module, (void *)st); 1648 } 1649 1650 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1651 HANDLE hProcess; 1652 1653 # define MAX_NUM_MODULES 128 1654 HMODULE modules[MAX_NUM_MODULES]; 1655 static char filename[MAX_PATH]; 1656 int result = 0; 1657 1658 int pid = os::current_process_id(); 1659 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1660 FALSE, pid); 1661 if (hProcess == NULL) return 0; 1662 1663 DWORD size_needed; 1664 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1665 CloseHandle(hProcess); 1666 return 0; 1667 } 1668 1669 // number of modules that are currently loaded 1670 int num_modules = size_needed / sizeof(HMODULE); 1671 1672 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1673 // Get Full pathname: 1674 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1675 filename[0] = '\0'; 1676 } 1677 1678 MODULEINFO modinfo; 1679 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1680 modinfo.lpBaseOfDll = NULL; 1681 modinfo.SizeOfImage = 0; 1682 } 1683 1684 // Invoke callback function 1685 result = callback(filename, (address)modinfo.lpBaseOfDll, 1686 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1687 if (result) break; 1688 } 1689 1690 CloseHandle(hProcess); 1691 return result; 1692 } 1693 1694 void os::print_os_info_brief(outputStream* st) { 1695 os::print_os_info(st); 1696 } 1697 1698 void os::print_os_info(outputStream* st) { 1699 st->print("OS:"); 1700 1701 os::win32::print_windows_version(st); 1702 } 1703 1704 void os::win32::print_windows_version(outputStream* st) { 1705 OSVERSIONINFOEX osvi; 1706 VS_FIXEDFILEINFO *file_info; 1707 TCHAR kernel32_path[MAX_PATH]; 1708 UINT len, ret; 1709 1710 // Use the GetVersionEx information to see if we're on a server or 1711 // workstation edition of Windows. Starting with Windows 8.1 we can't 1712 // trust the OS version information returned by this API. 1713 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1714 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1715 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1716 st->print_cr("Call to GetVersionEx failed"); 1717 return; 1718 } 1719 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1720 1721 // Get the full path to \Windows\System32\kernel32.dll and use that for 1722 // determining what version of Windows we're running on. 1723 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1724 ret = GetSystemDirectory(kernel32_path, len); 1725 if (ret == 0 || ret > len) { 1726 st->print_cr("Call to GetSystemDirectory failed"); 1727 return; 1728 } 1729 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1730 1731 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1732 if (version_size == 0) { 1733 st->print_cr("Call to GetFileVersionInfoSize failed"); 1734 return; 1735 } 1736 1737 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1738 if (version_info == NULL) { 1739 st->print_cr("Failed to allocate version_info"); 1740 return; 1741 } 1742 1743 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1744 os::free(version_info); 1745 st->print_cr("Call to GetFileVersionInfo failed"); 1746 return; 1747 } 1748 1749 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1750 os::free(version_info); 1751 st->print_cr("Call to VerQueryValue failed"); 1752 return; 1753 } 1754 1755 int major_version = HIWORD(file_info->dwProductVersionMS); 1756 int minor_version = LOWORD(file_info->dwProductVersionMS); 1757 int build_number = HIWORD(file_info->dwProductVersionLS); 1758 int build_minor = LOWORD(file_info->dwProductVersionLS); 1759 int os_vers = major_version * 1000 + minor_version; 1760 os::free(version_info); 1761 1762 st->print(" Windows "); 1763 switch (os_vers) { 1764 1765 case 6000: 1766 if (is_workstation) { 1767 st->print("Vista"); 1768 } else { 1769 st->print("Server 2008"); 1770 } 1771 break; 1772 1773 case 6001: 1774 if (is_workstation) { 1775 st->print("7"); 1776 } else { 1777 st->print("Server 2008 R2"); 1778 } 1779 break; 1780 1781 case 6002: 1782 if (is_workstation) { 1783 st->print("8"); 1784 } else { 1785 st->print("Server 2012"); 1786 } 1787 break; 1788 1789 case 6003: 1790 if (is_workstation) { 1791 st->print("8.1"); 1792 } else { 1793 st->print("Server 2012 R2"); 1794 } 1795 break; 1796 1797 case 6004: 1798 if (is_workstation) { 1799 st->print("10"); 1800 } else { 1801 st->print("Server 2016"); 1802 } 1803 break; 1804 1805 default: 1806 // Unrecognized windows, print out its major and minor versions 1807 st->print("%d.%d", major_version, minor_version); 1808 break; 1809 } 1810 1811 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1812 // find out whether we are running on 64 bit processor or not 1813 SYSTEM_INFO si; 1814 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1815 os::Kernel32Dll::GetNativeSystemInfo(&si); 1816 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1817 st->print(" , 64 bit"); 1818 } 1819 1820 st->print(" Build %d", build_number); 1821 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1822 st->cr(); 1823 } 1824 1825 void os::pd_print_cpu_info(outputStream* st) { 1826 // Nothing to do for now. 1827 } 1828 1829 void os::print_memory_info(outputStream* st) { 1830 st->print("Memory:"); 1831 st->print(" %dk page", os::vm_page_size()>>10); 1832 1833 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1834 // value if total memory is larger than 4GB 1835 MEMORYSTATUSEX ms; 1836 ms.dwLength = sizeof(ms); 1837 GlobalMemoryStatusEx(&ms); 1838 1839 st->print(", physical %uk", os::physical_memory() >> 10); 1840 st->print("(%uk free)", os::available_memory() >> 10); 1841 1842 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1843 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1844 st->cr(); 1845 } 1846 1847 void os::print_siginfo(outputStream *st, void *siginfo) { 1848 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1849 st->print("siginfo:"); 1850 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1851 1852 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1853 er->NumberParameters >= 2) { 1854 switch (er->ExceptionInformation[0]) { 1855 case 0: st->print(", reading address"); break; 1856 case 1: st->print(", writing address"); break; 1857 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1858 er->ExceptionInformation[0]); 1859 } 1860 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1861 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1862 er->NumberParameters >= 2 && UseSharedSpaces) { 1863 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1864 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1865 st->print("\n\nError accessing class data sharing archive." \ 1866 " Mapped file inaccessible during execution, " \ 1867 " possible disk/network problem."); 1868 } 1869 } else { 1870 int num = er->NumberParameters; 1871 if (num > 0) { 1872 st->print(", ExceptionInformation="); 1873 for (int i = 0; i < num; i++) { 1874 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1875 } 1876 } 1877 } 1878 st->cr(); 1879 } 1880 1881 1882 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1883 #if _MSC_VER >= 1900 1884 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1885 int result = ::vsnprintf(buf, len, fmt, args); 1886 // If an encoding error occurred (result < 0) then it's not clear 1887 // whether the buffer is NUL terminated, so ensure it is. 1888 if ((result < 0) && (len > 0)) { 1889 buf[len - 1] = '\0'; 1890 } 1891 return result; 1892 #else 1893 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1894 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1895 // versions. However, when len == 0, avoid _vsnprintf too, and just 1896 // go straight to _vscprintf. The output is going to be truncated in 1897 // that case, except in the unusual case of empty output. More 1898 // importantly, the documentation for various versions of Visual Studio 1899 // are inconsistent about the behavior of _vsnprintf when len == 0, 1900 // including it possibly being an error. 1901 int result = -1; 1902 if (len > 0) { 1903 result = _vsnprintf(buf, len, fmt, args); 1904 // If output (including NUL terminator) is truncated, the buffer 1905 // won't be NUL terminated. Add the trailing NUL specified by C99. 1906 if ((result < 0) || (result >= (int) len)) { 1907 buf[len - 1] = '\0'; 1908 } 1909 } 1910 if (result < 0) { 1911 result = _vscprintf(fmt, args); 1912 } 1913 return result; 1914 #endif // _MSC_VER dispatch 1915 } 1916 1917 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1918 // do nothing 1919 } 1920 1921 static char saved_jvm_path[MAX_PATH] = {0}; 1922 1923 // Find the full path to the current module, jvm.dll 1924 void os::jvm_path(char *buf, jint buflen) { 1925 // Error checking. 1926 if (buflen < MAX_PATH) { 1927 assert(false, "must use a large-enough buffer"); 1928 buf[0] = '\0'; 1929 return; 1930 } 1931 // Lazy resolve the path to current module. 1932 if (saved_jvm_path[0] != 0) { 1933 strcpy(buf, saved_jvm_path); 1934 return; 1935 } 1936 1937 buf[0] = '\0'; 1938 if (Arguments::created_by_gamma_launcher()) { 1939 // Support for the gamma launcher. Check for an 1940 // JAVA_HOME environment variable 1941 // and fix up the path so it looks like 1942 // libjvm.so is installed there (append a fake suffix 1943 // hotspot/libjvm.so). 1944 char* java_home_var = ::getenv("JAVA_HOME"); 1945 if (java_home_var != NULL && java_home_var[0] != 0 && 1946 strlen(java_home_var) < (size_t)buflen) { 1947 1948 strncpy(buf, java_home_var, buflen); 1949 1950 // determine if this is a legacy image or modules image 1951 // modules image doesn't have "jre" subdirectory 1952 size_t len = strlen(buf); 1953 char* jrebin_p = buf + len; 1954 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1955 if (0 != _access(buf, 0)) { 1956 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1957 } 1958 len = strlen(buf); 1959 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1960 } 1961 } 1962 1963 if(buf[0] == '\0') { 1964 GetModuleFileName(vm_lib_handle, buf, buflen); 1965 } 1966 strncpy(saved_jvm_path, buf, MAX_PATH); 1967 } 1968 1969 1970 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1971 #ifndef _WIN64 1972 st->print("_"); 1973 #endif 1974 } 1975 1976 1977 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1978 #ifndef _WIN64 1979 st->print("@%d", args_size * sizeof(int)); 1980 #endif 1981 } 1982 1983 // This method is a copy of JDK's sysGetLastErrorString 1984 // from src/windows/hpi/src/system_md.c 1985 1986 size_t os::lasterror(char* buf, size_t len) { 1987 DWORD errval; 1988 1989 if ((errval = GetLastError()) != 0) { 1990 // DOS error 1991 size_t n = (size_t)FormatMessage( 1992 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1993 NULL, 1994 errval, 1995 0, 1996 buf, 1997 (DWORD)len, 1998 NULL); 1999 if (n > 3) { 2000 // Drop final '.', CR, LF 2001 if (buf[n - 1] == '\n') n--; 2002 if (buf[n - 1] == '\r') n--; 2003 if (buf[n - 1] == '.') n--; 2004 buf[n] = '\0'; 2005 } 2006 return n; 2007 } 2008 2009 if (errno != 0) { 2010 // C runtime error that has no corresponding DOS error code 2011 const char* s = strerror(errno); 2012 size_t n = strlen(s); 2013 if (n >= len) n = len - 1; 2014 strncpy(buf, s, n); 2015 buf[n] = '\0'; 2016 return n; 2017 } 2018 2019 return 0; 2020 } 2021 2022 int os::get_last_error() { 2023 DWORD error = GetLastError(); 2024 if (error == 0) 2025 error = errno; 2026 return (int)error; 2027 } 2028 2029 // sun.misc.Signal 2030 // NOTE that this is a workaround for an apparent kernel bug where if 2031 // a signal handler for SIGBREAK is installed then that signal handler 2032 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 2033 // See bug 4416763. 2034 static void (*sigbreakHandler)(int) = NULL; 2035 2036 static void UserHandler(int sig, void *siginfo, void *context) { 2037 os::signal_notify(sig); 2038 // We need to reinstate the signal handler each time... 2039 os::signal(sig, (void*)UserHandler); 2040 } 2041 2042 void* os::user_handler() { 2043 return (void*) UserHandler; 2044 } 2045 2046 void* os::signal(int signal_number, void* handler) { 2047 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 2048 void (*oldHandler)(int) = sigbreakHandler; 2049 sigbreakHandler = (void (*)(int)) handler; 2050 return (void*) oldHandler; 2051 } else { 2052 return (void*)::signal(signal_number, (void (*)(int))handler); 2053 } 2054 } 2055 2056 void os::signal_raise(int signal_number) { 2057 raise(signal_number); 2058 } 2059 2060 // The Win32 C runtime library maps all console control events other than ^C 2061 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 2062 // logoff, and shutdown events. We therefore install our own console handler 2063 // that raises SIGTERM for the latter cases. 2064 // 2065 static BOOL WINAPI consoleHandler(DWORD event) { 2066 switch(event) { 2067 case CTRL_C_EVENT: 2068 if (is_error_reported()) { 2069 // Ctrl-C is pressed during error reporting, likely because the error 2070 // handler fails to abort. Let VM die immediately. 2071 os::die(); 2072 } 2073 2074 os::signal_raise(SIGINT); 2075 return TRUE; 2076 break; 2077 case CTRL_BREAK_EVENT: 2078 if (sigbreakHandler != NULL) { 2079 (*sigbreakHandler)(SIGBREAK); 2080 } 2081 return TRUE; 2082 break; 2083 case CTRL_LOGOFF_EVENT: { 2084 // Don't terminate JVM if it is running in a non-interactive session, 2085 // such as a service process. 2086 USEROBJECTFLAGS flags; 2087 HANDLE handle = GetProcessWindowStation(); 2088 if (handle != NULL && 2089 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 2090 sizeof( USEROBJECTFLAGS), NULL)) { 2091 // If it is a non-interactive session, let next handler to deal 2092 // with it. 2093 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 2094 return FALSE; 2095 } 2096 } 2097 } 2098 case CTRL_CLOSE_EVENT: 2099 case CTRL_SHUTDOWN_EVENT: 2100 os::signal_raise(SIGTERM); 2101 return TRUE; 2102 break; 2103 default: 2104 break; 2105 } 2106 return FALSE; 2107 } 2108 2109 /* 2110 * The following code is moved from os.cpp for making this 2111 * code platform specific, which it is by its very nature. 2112 */ 2113 2114 // Return maximum OS signal used + 1 for internal use only 2115 // Used as exit signal for signal_thread 2116 int os::sigexitnum_pd(){ 2117 return NSIG; 2118 } 2119 2120 // a counter for each possible signal value, including signal_thread exit signal 2121 static volatile jint pending_signals[NSIG+1] = { 0 }; 2122 static HANDLE sig_sem = NULL; 2123 2124 void os::signal_init_pd() { 2125 // Initialize signal structures 2126 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2127 2128 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2129 2130 // Programs embedding the VM do not want it to attempt to receive 2131 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2132 // shutdown hooks mechanism introduced in 1.3. For example, when 2133 // the VM is run as part of a Windows NT service (i.e., a servlet 2134 // engine in a web server), the correct behavior is for any console 2135 // control handler to return FALSE, not TRUE, because the OS's 2136 // "final" handler for such events allows the process to continue if 2137 // it is a service (while terminating it if it is not a service). 2138 // To make this behavior uniform and the mechanism simpler, we 2139 // completely disable the VM's usage of these console events if -Xrs 2140 // (=ReduceSignalUsage) is specified. This means, for example, that 2141 // the CTRL-BREAK thread dump mechanism is also disabled in this 2142 // case. See bugs 4323062, 4345157, and related bugs. 2143 2144 if (!ReduceSignalUsage) { 2145 // Add a CTRL-C handler 2146 SetConsoleCtrlHandler(consoleHandler, TRUE); 2147 } 2148 } 2149 2150 void os::signal_notify(int signal_number) { 2151 BOOL ret; 2152 if (sig_sem != NULL) { 2153 Atomic::inc(&pending_signals[signal_number]); 2154 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2155 assert(ret != 0, "ReleaseSemaphore() failed"); 2156 } 2157 } 2158 2159 static int check_pending_signals(bool wait_for_signal) { 2160 DWORD ret; 2161 while (true) { 2162 for (int i = 0; i < NSIG + 1; i++) { 2163 jint n = pending_signals[i]; 2164 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2165 return i; 2166 } 2167 } 2168 if (!wait_for_signal) { 2169 return -1; 2170 } 2171 2172 JavaThread *thread = JavaThread::current(); 2173 2174 ThreadBlockInVM tbivm(thread); 2175 2176 bool threadIsSuspended; 2177 do { 2178 thread->set_suspend_equivalent(); 2179 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2180 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2181 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2182 2183 // were we externally suspended while we were waiting? 2184 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2185 if (threadIsSuspended) { 2186 // 2187 // The semaphore has been incremented, but while we were waiting 2188 // another thread suspended us. We don't want to continue running 2189 // while suspended because that would surprise the thread that 2190 // suspended us. 2191 // 2192 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2193 assert(ret != 0, "ReleaseSemaphore() failed"); 2194 2195 thread->java_suspend_self(); 2196 } 2197 } while (threadIsSuspended); 2198 } 2199 } 2200 2201 int os::signal_lookup() { 2202 return check_pending_signals(false); 2203 } 2204 2205 int os::signal_wait() { 2206 return check_pending_signals(true); 2207 } 2208 2209 // Implicit OS exception handling 2210 2211 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2212 JavaThread* thread = JavaThread::current(); 2213 // Save pc in thread 2214 #ifdef _M_IA64 2215 // Do not blow up if no thread info available. 2216 if (thread) { 2217 // Saving PRECISE pc (with slot information) in thread. 2218 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2219 // Convert precise PC into "Unix" format 2220 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2221 thread->set_saved_exception_pc((address)precise_pc); 2222 } 2223 // Set pc to handler 2224 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2225 // Clear out psr.ri (= Restart Instruction) in order to continue 2226 // at the beginning of the target bundle. 2227 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2228 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2229 #else 2230 #ifdef _M_AMD64 2231 // Do not blow up if no thread info available. 2232 if (thread) { 2233 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2234 } 2235 // Set pc to handler 2236 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2237 #else 2238 // Do not blow up if no thread info available. 2239 if (thread) { 2240 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2241 } 2242 // Set pc to handler 2243 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2244 #endif 2245 #endif 2246 2247 // Continue the execution 2248 return EXCEPTION_CONTINUE_EXECUTION; 2249 } 2250 2251 2252 // Used for PostMortemDump 2253 extern "C" void safepoints(); 2254 extern "C" void find(int x); 2255 extern "C" void events(); 2256 2257 // According to Windows API documentation, an illegal instruction sequence should generate 2258 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2259 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2260 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2261 2262 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2263 2264 // From "Execution Protection in the Windows Operating System" draft 0.35 2265 // Once a system header becomes available, the "real" define should be 2266 // included or copied here. 2267 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2268 2269 // Handle NAT Bit consumption on IA64. 2270 #ifdef _M_IA64 2271 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2272 #endif 2273 2274 // Windows Vista/2008 heap corruption check 2275 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2276 2277 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2278 // C++ compiler contain this error code. Because this is a compiler-generated 2279 // error, the code is not listed in the Win32 API header files. 2280 // The code is actually a cryptic mnemonic device, with the initial "E" 2281 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2282 // ASCII values of "msc". 2283 2284 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2285 2286 #define def_excpt(val) { #val, (val) } 2287 2288 static const struct { char* name; uint number; } exceptlabels[] = { 2289 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2290 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2291 def_excpt(EXCEPTION_BREAKPOINT), 2292 def_excpt(EXCEPTION_SINGLE_STEP), 2293 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2294 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2295 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2296 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2297 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2298 def_excpt(EXCEPTION_FLT_OVERFLOW), 2299 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2300 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2301 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2302 def_excpt(EXCEPTION_INT_OVERFLOW), 2303 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2304 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2305 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2306 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2307 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2308 def_excpt(EXCEPTION_STACK_OVERFLOW), 2309 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2310 def_excpt(EXCEPTION_GUARD_PAGE), 2311 def_excpt(EXCEPTION_INVALID_HANDLE), 2312 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2313 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2314 #ifdef _M_IA64 2315 , def_excpt(EXCEPTION_REG_NAT_CONSUMPTION) 2316 #endif 2317 }; 2318 2319 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2320 uint code = static_cast<uint>(exception_code); 2321 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2322 if (exceptlabels[i].number == code) { 2323 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2324 return buf; 2325 } 2326 } 2327 2328 return NULL; 2329 } 2330 2331 //----------------------------------------------------------------------------- 2332 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2333 // handle exception caused by idiv; should only happen for -MinInt/-1 2334 // (division by zero is handled explicitly) 2335 #ifdef _M_IA64 2336 assert(0, "Fix Handle_IDiv_Exception"); 2337 #else 2338 #ifdef _M_AMD64 2339 PCONTEXT ctx = exceptionInfo->ContextRecord; 2340 address pc = (address)ctx->Rip; 2341 assert(pc[0] == 0xF7, "not an idiv opcode"); 2342 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2343 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2344 // set correct result values and continue after idiv instruction 2345 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2346 ctx->Rax = (DWORD64)min_jint; // result 2347 ctx->Rdx = (DWORD64)0; // remainder 2348 // Continue the execution 2349 #else 2350 PCONTEXT ctx = exceptionInfo->ContextRecord; 2351 address pc = (address)ctx->Eip; 2352 assert(pc[0] == 0xF7, "not an idiv opcode"); 2353 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2354 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2355 // set correct result values and continue after idiv instruction 2356 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2357 ctx->Eax = (DWORD)min_jint; // result 2358 ctx->Edx = (DWORD)0; // remainder 2359 // Continue the execution 2360 #endif 2361 #endif 2362 return EXCEPTION_CONTINUE_EXECUTION; 2363 } 2364 2365 #ifndef _WIN64 2366 //----------------------------------------------------------------------------- 2367 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2368 // handle exception caused by native method modifying control word 2369 PCONTEXT ctx = exceptionInfo->ContextRecord; 2370 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2371 2372 switch (exception_code) { 2373 case EXCEPTION_FLT_DENORMAL_OPERAND: 2374 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2375 case EXCEPTION_FLT_INEXACT_RESULT: 2376 case EXCEPTION_FLT_INVALID_OPERATION: 2377 case EXCEPTION_FLT_OVERFLOW: 2378 case EXCEPTION_FLT_STACK_CHECK: 2379 case EXCEPTION_FLT_UNDERFLOW: 2380 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2381 if (fp_control_word != ctx->FloatSave.ControlWord) { 2382 // Restore FPCW and mask out FLT exceptions 2383 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2384 // Mask out pending FLT exceptions 2385 ctx->FloatSave.StatusWord &= 0xffffff00; 2386 return EXCEPTION_CONTINUE_EXECUTION; 2387 } 2388 } 2389 2390 if (prev_uef_handler != NULL) { 2391 // We didn't handle this exception so pass it to the previous 2392 // UnhandledExceptionFilter. 2393 return (prev_uef_handler)(exceptionInfo); 2394 } 2395 2396 return EXCEPTION_CONTINUE_SEARCH; 2397 } 2398 #else //_WIN64 2399 /* 2400 On Windows, the mxcsr control bits are non-volatile across calls 2401 See also CR 6192333 2402 If EXCEPTION_FLT_* happened after some native method modified 2403 mxcsr - it is not a jvm fault. 2404 However should we decide to restore of mxcsr after a faulty 2405 native method we can uncomment following code 2406 jint MxCsr = INITIAL_MXCSR; 2407 // we can't use StubRoutines::addr_mxcsr_std() 2408 // because in Win64 mxcsr is not saved there 2409 if (MxCsr != ctx->MxCsr) { 2410 ctx->MxCsr = MxCsr; 2411 return EXCEPTION_CONTINUE_EXECUTION; 2412 } 2413 2414 */ 2415 #endif // _WIN64 2416 2417 2418 static inline void report_error(Thread* t, DWORD exception_code, 2419 address addr, void* siginfo, void* context) { 2420 VMError err(t, exception_code, addr, siginfo, context); 2421 err.report_and_die(); 2422 2423 // If UseOsErrorReporting, this will return here and save the error file 2424 // somewhere where we can find it in the minidump. 2425 } 2426 2427 //----------------------------------------------------------------------------- 2428 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2429 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2430 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2431 #ifdef _M_IA64 2432 // On Itanium, we need the "precise pc", which has the slot number coded 2433 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2434 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2435 // Convert the pc to "Unix format", which has the slot number coded 2436 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2437 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2438 // information is saved in the Unix format. 2439 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2440 #else 2441 #ifdef _M_AMD64 2442 address pc = (address) exceptionInfo->ContextRecord->Rip; 2443 #else 2444 address pc = (address) exceptionInfo->ContextRecord->Eip; 2445 #endif 2446 #endif 2447 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2448 2449 // Handle SafeFetch32 and SafeFetchN exceptions. 2450 if (StubRoutines::is_safefetch_fault(pc)) { 2451 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2452 } 2453 2454 #ifndef _WIN64 2455 // Execution protection violation - win32 running on AMD64 only 2456 // Handled first to avoid misdiagnosis as a "normal" access violation; 2457 // This is safe to do because we have a new/unique ExceptionInformation 2458 // code for this condition. 2459 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2460 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2461 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2462 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2463 2464 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2465 int page_size = os::vm_page_size(); 2466 2467 // Make sure the pc and the faulting address are sane. 2468 // 2469 // If an instruction spans a page boundary, and the page containing 2470 // the beginning of the instruction is executable but the following 2471 // page is not, the pc and the faulting address might be slightly 2472 // different - we still want to unguard the 2nd page in this case. 2473 // 2474 // 15 bytes seems to be a (very) safe value for max instruction size. 2475 bool pc_is_near_addr = 2476 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2477 bool instr_spans_page_boundary = 2478 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2479 (intptr_t) page_size) > 0); 2480 2481 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2482 static volatile address last_addr = 2483 (address) os::non_memory_address_word(); 2484 2485 // In conservative mode, don't unguard unless the address is in the VM 2486 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2487 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2488 2489 // Set memory to RWX and retry 2490 address page_start = 2491 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2492 bool res = os::protect_memory((char*) page_start, page_size, 2493 os::MEM_PROT_RWX); 2494 2495 if (PrintMiscellaneous && Verbose) { 2496 char buf[256]; 2497 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2498 "at " INTPTR_FORMAT 2499 ", unguarding " INTPTR_FORMAT ": %s", addr, 2500 page_start, (res ? "success" : strerror(errno))); 2501 tty->print_raw_cr(buf); 2502 } 2503 2504 // Set last_addr so if we fault again at the same address, we don't 2505 // end up in an endless loop. 2506 // 2507 // There are two potential complications here. Two threads trapping 2508 // at the same address at the same time could cause one of the 2509 // threads to think it already unguarded, and abort the VM. Likely 2510 // very rare. 2511 // 2512 // The other race involves two threads alternately trapping at 2513 // different addresses and failing to unguard the page, resulting in 2514 // an endless loop. This condition is probably even more unlikely 2515 // than the first. 2516 // 2517 // Although both cases could be avoided by using locks or thread 2518 // local last_addr, these solutions are unnecessary complication: 2519 // this handler is a best-effort safety net, not a complete solution. 2520 // It is disabled by default and should only be used as a workaround 2521 // in case we missed any no-execute-unsafe VM code. 2522 2523 last_addr = addr; 2524 2525 return EXCEPTION_CONTINUE_EXECUTION; 2526 } 2527 } 2528 2529 // Last unguard failed or not unguarding 2530 tty->print_raw_cr("Execution protection violation"); 2531 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2532 exceptionInfo->ContextRecord); 2533 return EXCEPTION_CONTINUE_SEARCH; 2534 } 2535 } 2536 #endif // _WIN64 2537 2538 // Check to see if we caught the safepoint code in the 2539 // process of write protecting the memory serialization page. 2540 // It write enables the page immediately after protecting it 2541 // so just return. 2542 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 2543 JavaThread* thread = (JavaThread*) t; 2544 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2545 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2546 if ( os::is_memory_serialize_page(thread, addr) ) { 2547 // Block current thread until the memory serialize page permission restored. 2548 os::block_on_serialize_page_trap(); 2549 return EXCEPTION_CONTINUE_EXECUTION; 2550 } 2551 } 2552 2553 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2554 VM_Version::is_cpuinfo_segv_addr(pc)) { 2555 // Verify that OS save/restore AVX registers. 2556 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2557 } 2558 2559 if (t != NULL && t->is_Java_thread()) { 2560 JavaThread* thread = (JavaThread*) t; 2561 bool in_java = thread->thread_state() == _thread_in_Java; 2562 2563 // Handle potential stack overflows up front. 2564 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2565 if (os::uses_stack_guard_pages()) { 2566 #ifdef _M_IA64 2567 // Use guard page for register stack. 2568 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2569 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2570 // Check for a register stack overflow on Itanium 2571 if (thread->addr_inside_register_stack_red_zone(addr)) { 2572 // Fatal red zone violation happens if the Java program 2573 // catches a StackOverflow error and does so much processing 2574 // that it runs beyond the unprotected yellow guard zone. As 2575 // a result, we are out of here. 2576 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2577 } else if(thread->addr_inside_register_stack(addr)) { 2578 // Disable the yellow zone which sets the state that 2579 // we've got a stack overflow problem. 2580 if (thread->stack_yellow_zone_enabled()) { 2581 thread->disable_stack_yellow_zone(); 2582 } 2583 // Give us some room to process the exception. 2584 thread->disable_register_stack_guard(); 2585 // Tracing with +Verbose. 2586 if (Verbose) { 2587 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2588 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2589 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2590 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2591 thread->register_stack_base(), 2592 thread->register_stack_base() + thread->stack_size()); 2593 } 2594 2595 // Reguard the permanent register stack red zone just to be sure. 2596 // We saw Windows silently disabling this without telling us. 2597 thread->enable_register_stack_red_zone(); 2598 2599 return Handle_Exception(exceptionInfo, 2600 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2601 } 2602 #endif 2603 if (thread->stack_yellow_zone_enabled()) { 2604 // Yellow zone violation. The o/s has unprotected the first yellow 2605 // zone page for us. Note: must call disable_stack_yellow_zone to 2606 // update the enabled status, even if the zone contains only one page. 2607 thread->disable_stack_yellow_zone(); 2608 // If not in java code, return and hope for the best. 2609 return in_java ? Handle_Exception(exceptionInfo, 2610 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2611 : EXCEPTION_CONTINUE_EXECUTION; 2612 } else { 2613 // Fatal red zone violation. 2614 thread->disable_stack_red_zone(); 2615 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2616 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2617 exceptionInfo->ContextRecord); 2618 return EXCEPTION_CONTINUE_SEARCH; 2619 } 2620 } else if (in_java) { 2621 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2622 // a one-time-only guard page, which it has released to us. The next 2623 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2624 return Handle_Exception(exceptionInfo, 2625 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2626 } else { 2627 // Can only return and hope for the best. Further stack growth will 2628 // result in an ACCESS_VIOLATION. 2629 return EXCEPTION_CONTINUE_EXECUTION; 2630 } 2631 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2632 // Either stack overflow or null pointer exception. 2633 if (in_java) { 2634 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2635 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2636 address stack_end = thread->stack_base() - thread->stack_size(); 2637 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2638 // Stack overflow. 2639 assert(!os::uses_stack_guard_pages(), 2640 "should be caught by red zone code above."); 2641 return Handle_Exception(exceptionInfo, 2642 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2643 } 2644 // 2645 // Check for safepoint polling and implicit null 2646 // We only expect null pointers in the stubs (vtable) 2647 // the rest are checked explicitly now. 2648 // 2649 CodeBlob* cb = CodeCache::find_blob(pc); 2650 if (cb != NULL) { 2651 if (os::is_poll_address(addr)) { 2652 address stub = SharedRuntime::get_poll_stub(pc); 2653 return Handle_Exception(exceptionInfo, stub); 2654 } 2655 } 2656 { 2657 #ifdef _WIN64 2658 // 2659 // If it's a legal stack address map the entire region in 2660 // 2661 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2662 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2663 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2664 addr = (address)((uintptr_t)addr & 2665 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2666 os::commit_memory((char *)addr, thread->stack_base() - addr, 2667 !ExecMem); 2668 return EXCEPTION_CONTINUE_EXECUTION; 2669 } 2670 else 2671 #endif 2672 { 2673 // Null pointer exception. 2674 #ifdef _M_IA64 2675 // Process implicit null checks in compiled code. Note: Implicit null checks 2676 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2677 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2678 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2679 // Handle implicit null check in UEP method entry 2680 if (cb && (cb->is_frame_complete_at(pc) || 2681 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2682 if (Verbose) { 2683 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2684 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2685 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2686 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2687 *(bundle_start + 1), *bundle_start); 2688 } 2689 return Handle_Exception(exceptionInfo, 2690 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2691 } 2692 } 2693 2694 // Implicit null checks were processed above. Hence, we should not reach 2695 // here in the usual case => die! 2696 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2697 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2698 exceptionInfo->ContextRecord); 2699 return EXCEPTION_CONTINUE_SEARCH; 2700 2701 #else // !IA64 2702 2703 // Windows 98 reports faulting addresses incorrectly 2704 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2705 !os::win32::is_nt()) { 2706 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2707 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2708 } 2709 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2710 exceptionInfo->ContextRecord); 2711 return EXCEPTION_CONTINUE_SEARCH; 2712 #endif 2713 } 2714 } 2715 } 2716 2717 #ifdef _WIN64 2718 // Special care for fast JNI field accessors. 2719 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2720 // in and the heap gets shrunk before the field access. 2721 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2722 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2723 if (addr != (address)-1) { 2724 return Handle_Exception(exceptionInfo, addr); 2725 } 2726 } 2727 #endif 2728 2729 // Stack overflow or null pointer exception in native code. 2730 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2731 exceptionInfo->ContextRecord); 2732 return EXCEPTION_CONTINUE_SEARCH; 2733 } // /EXCEPTION_ACCESS_VIOLATION 2734 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2735 #if defined _M_IA64 2736 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2737 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2738 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2739 2740 // Compiled method patched to be non entrant? Following conditions must apply: 2741 // 1. must be first instruction in bundle 2742 // 2. must be a break instruction with appropriate code 2743 if((((uint64_t) pc & 0x0F) == 0) && 2744 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2745 return Handle_Exception(exceptionInfo, 2746 (address)SharedRuntime::get_handle_wrong_method_stub()); 2747 } 2748 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2749 #endif 2750 2751 2752 if (in_java) { 2753 switch (exception_code) { 2754 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2755 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2756 2757 case EXCEPTION_INT_OVERFLOW: 2758 return Handle_IDiv_Exception(exceptionInfo); 2759 2760 } // switch 2761 } 2762 #ifndef _WIN64 2763 if (((thread->thread_state() == _thread_in_Java) || 2764 (thread->thread_state() == _thread_in_native)) && 2765 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2766 { 2767 LONG result=Handle_FLT_Exception(exceptionInfo); 2768 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2769 } 2770 #endif //_WIN64 2771 } 2772 2773 if (exception_code != EXCEPTION_BREAKPOINT) { 2774 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2775 exceptionInfo->ContextRecord); 2776 } 2777 return EXCEPTION_CONTINUE_SEARCH; 2778 } 2779 2780 #ifndef _WIN64 2781 // Special care for fast JNI accessors. 2782 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2783 // the heap gets shrunk before the field access. 2784 // Need to install our own structured exception handler since native code may 2785 // install its own. 2786 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2787 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2788 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2789 address pc = (address) exceptionInfo->ContextRecord->Eip; 2790 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2791 if (addr != (address)-1) { 2792 return Handle_Exception(exceptionInfo, addr); 2793 } 2794 } 2795 return EXCEPTION_CONTINUE_SEARCH; 2796 } 2797 2798 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2799 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2800 __try { \ 2801 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2802 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2803 } \ 2804 return 0; \ 2805 } 2806 2807 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2808 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2809 DEFINE_FAST_GETFIELD(jchar, char, Char) 2810 DEFINE_FAST_GETFIELD(jshort, short, Short) 2811 DEFINE_FAST_GETFIELD(jint, int, Int) 2812 DEFINE_FAST_GETFIELD(jlong, long, Long) 2813 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2814 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2815 2816 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2817 switch (type) { 2818 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2819 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2820 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2821 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2822 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2823 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2824 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2825 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2826 default: ShouldNotReachHere(); 2827 } 2828 return (address)-1; 2829 } 2830 #endif 2831 2832 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2833 // Install a win32 structured exception handler around the test 2834 // function call so the VM can generate an error dump if needed. 2835 __try { 2836 (*funcPtr)(); 2837 } __except(topLevelExceptionFilter( 2838 (_EXCEPTION_POINTERS*)_exception_info())) { 2839 // Nothing to do. 2840 } 2841 } 2842 2843 // Virtual Memory 2844 2845 int os::vm_page_size() { return os::win32::vm_page_size(); } 2846 int os::vm_allocation_granularity() { 2847 return os::win32::vm_allocation_granularity(); 2848 } 2849 2850 // Windows large page support is available on Windows 2003. In order to use 2851 // large page memory, the administrator must first assign additional privilege 2852 // to the user: 2853 // + select Control Panel -> Administrative Tools -> Local Security Policy 2854 // + select Local Policies -> User Rights Assignment 2855 // + double click "Lock pages in memory", add users and/or groups 2856 // + reboot 2857 // Note the above steps are needed for administrator as well, as administrators 2858 // by default do not have the privilege to lock pages in memory. 2859 // 2860 // Note about Windows 2003: although the API supports committing large page 2861 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2862 // scenario, I found through experiment it only uses large page if the entire 2863 // memory region is reserved and committed in a single VirtualAlloc() call. 2864 // This makes Windows large page support more or less like Solaris ISM, in 2865 // that the entire heap must be committed upfront. This probably will change 2866 // in the future, if so the code below needs to be revisited. 2867 2868 #ifndef MEM_LARGE_PAGES 2869 #define MEM_LARGE_PAGES 0x20000000 2870 #endif 2871 2872 static HANDLE _hProcess; 2873 static HANDLE _hToken; 2874 2875 // Container for NUMA node list info 2876 class NUMANodeListHolder { 2877 private: 2878 int *_numa_used_node_list; // allocated below 2879 int _numa_used_node_count; 2880 2881 void free_node_list() { 2882 if (_numa_used_node_list != NULL) { 2883 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2884 } 2885 } 2886 2887 public: 2888 NUMANodeListHolder() { 2889 _numa_used_node_count = 0; 2890 _numa_used_node_list = NULL; 2891 // do rest of initialization in build routine (after function pointers are set up) 2892 } 2893 2894 ~NUMANodeListHolder() { 2895 free_node_list(); 2896 } 2897 2898 bool build() { 2899 DWORD_PTR proc_aff_mask; 2900 DWORD_PTR sys_aff_mask; 2901 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2902 ULONG highest_node_number; 2903 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2904 free_node_list(); 2905 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2906 for (unsigned int i = 0; i <= highest_node_number; i++) { 2907 ULONGLONG proc_mask_numa_node; 2908 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2909 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2910 _numa_used_node_list[_numa_used_node_count++] = i; 2911 } 2912 } 2913 return (_numa_used_node_count > 1); 2914 } 2915 2916 int get_count() {return _numa_used_node_count;} 2917 int get_node_list_entry(int n) { 2918 // for indexes out of range, returns -1 2919 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2920 } 2921 2922 } numa_node_list_holder; 2923 2924 2925 2926 static size_t _large_page_size = 0; 2927 2928 static bool resolve_functions_for_large_page_init() { 2929 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2930 os::Advapi32Dll::AdvapiAvailable(); 2931 } 2932 2933 static bool request_lock_memory_privilege() { 2934 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2935 os::current_process_id()); 2936 2937 LUID luid; 2938 if (_hProcess != NULL && 2939 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2940 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2941 2942 TOKEN_PRIVILEGES tp; 2943 tp.PrivilegeCount = 1; 2944 tp.Privileges[0].Luid = luid; 2945 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2946 2947 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2948 // privilege. Check GetLastError() too. See MSDN document. 2949 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2950 (GetLastError() == ERROR_SUCCESS)) { 2951 return true; 2952 } 2953 } 2954 2955 return false; 2956 } 2957 2958 static void cleanup_after_large_page_init() { 2959 if (_hProcess) CloseHandle(_hProcess); 2960 _hProcess = NULL; 2961 if (_hToken) CloseHandle(_hToken); 2962 _hToken = NULL; 2963 } 2964 2965 static bool numa_interleaving_init() { 2966 bool success = false; 2967 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2968 2969 // print a warning if UseNUMAInterleaving flag is specified on command line 2970 bool warn_on_failure = use_numa_interleaving_specified; 2971 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2972 2973 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2974 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2975 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2976 2977 if (os::Kernel32Dll::NumaCallsAvailable()) { 2978 if (numa_node_list_holder.build()) { 2979 if (PrintMiscellaneous && Verbose) { 2980 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2981 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2982 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2983 } 2984 tty->print("\n"); 2985 } 2986 success = true; 2987 } else { 2988 WARN("Process does not cover multiple NUMA nodes."); 2989 } 2990 } else { 2991 WARN("NUMA Interleaving is not supported by the operating system."); 2992 } 2993 if (!success) { 2994 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2995 } 2996 return success; 2997 #undef WARN 2998 } 2999 3000 // this routine is used whenever we need to reserve a contiguous VA range 3001 // but we need to make separate VirtualAlloc calls for each piece of the range 3002 // Reasons for doing this: 3003 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 3004 // * UseNUMAInterleaving requires a separate node for each piece 3005 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 3006 bool should_inject_error=false) { 3007 char * p_buf; 3008 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 3009 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 3010 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 3011 3012 // first reserve enough address space in advance since we want to be 3013 // able to break a single contiguous virtual address range into multiple 3014 // large page commits but WS2003 does not allow reserving large page space 3015 // so we just use 4K pages for reserve, this gives us a legal contiguous 3016 // address space. then we will deallocate that reservation, and re alloc 3017 // using large pages 3018 const size_t size_of_reserve = bytes + chunk_size; 3019 if (bytes > size_of_reserve) { 3020 // Overflowed. 3021 return NULL; 3022 } 3023 p_buf = (char *) VirtualAlloc(addr, 3024 size_of_reserve, // size of Reserve 3025 MEM_RESERVE, 3026 PAGE_READWRITE); 3027 // If reservation failed, return NULL 3028 if (p_buf == NULL) return NULL; 3029 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 3030 os::release_memory(p_buf, bytes + chunk_size); 3031 3032 // we still need to round up to a page boundary (in case we are using large pages) 3033 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 3034 // instead we handle this in the bytes_to_rq computation below 3035 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 3036 3037 // now go through and allocate one chunk at a time until all bytes are 3038 // allocated 3039 size_t bytes_remaining = bytes; 3040 // An overflow of align_size_up() would have been caught above 3041 // in the calculation of size_of_reserve. 3042 char * next_alloc_addr = p_buf; 3043 HANDLE hProc = GetCurrentProcess(); 3044 3045 #ifdef ASSERT 3046 // Variable for the failure injection 3047 long ran_num = os::random(); 3048 size_t fail_after = ran_num % bytes; 3049 #endif 3050 3051 int count=0; 3052 while (bytes_remaining) { 3053 // select bytes_to_rq to get to the next chunk_size boundary 3054 3055 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 3056 // Note allocate and commit 3057 char * p_new; 3058 3059 #ifdef ASSERT 3060 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 3061 #else 3062 const bool inject_error_now = false; 3063 #endif 3064 3065 if (inject_error_now) { 3066 p_new = NULL; 3067 } else { 3068 if (!UseNUMAInterleaving) { 3069 p_new = (char *) VirtualAlloc(next_alloc_addr, 3070 bytes_to_rq, 3071 flags, 3072 prot); 3073 } else { 3074 // get the next node to use from the used_node_list 3075 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 3076 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 3077 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 3078 next_alloc_addr, 3079 bytes_to_rq, 3080 flags, 3081 prot, 3082 node); 3083 } 3084 } 3085 3086 if (p_new == NULL) { 3087 // Free any allocated pages 3088 if (next_alloc_addr > p_buf) { 3089 // Some memory was committed so release it. 3090 size_t bytes_to_release = bytes - bytes_remaining; 3091 // NMT has yet to record any individual blocks, so it 3092 // need to create a dummy 'reserve' record to match 3093 // the release. 3094 MemTracker::record_virtual_memory_reserve((address)p_buf, 3095 bytes_to_release, CALLER_PC); 3096 os::release_memory(p_buf, bytes_to_release); 3097 } 3098 #ifdef ASSERT 3099 if (should_inject_error) { 3100 if (TracePageSizes && Verbose) { 3101 tty->print_cr("Reserving pages individually failed."); 3102 } 3103 } 3104 #endif 3105 return NULL; 3106 } 3107 3108 bytes_remaining -= bytes_to_rq; 3109 next_alloc_addr += bytes_to_rq; 3110 count++; 3111 } 3112 // Although the memory is allocated individually, it is returned as one. 3113 // NMT records it as one block. 3114 if ((flags & MEM_COMMIT) != 0) { 3115 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3116 } else { 3117 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3118 } 3119 3120 // made it this far, success 3121 return p_buf; 3122 } 3123 3124 3125 3126 void os::large_page_init() { 3127 if (!UseLargePages) return; 3128 3129 // print a warning if any large page related flag is specified on command line 3130 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3131 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3132 bool success = false; 3133 3134 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3135 if (resolve_functions_for_large_page_init()) { 3136 if (request_lock_memory_privilege()) { 3137 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3138 if (s) { 3139 #if defined(IA32) || defined(AMD64) 3140 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3141 WARN("JVM cannot use large pages bigger than 4mb."); 3142 } else { 3143 #endif 3144 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3145 _large_page_size = LargePageSizeInBytes; 3146 } else { 3147 _large_page_size = s; 3148 } 3149 success = true; 3150 #if defined(IA32) || defined(AMD64) 3151 } 3152 #endif 3153 } else { 3154 WARN("Large page is not supported by the processor."); 3155 } 3156 } else { 3157 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3158 } 3159 } else { 3160 WARN("Large page is not supported by the operating system."); 3161 } 3162 #undef WARN 3163 3164 const size_t default_page_size = (size_t) vm_page_size(); 3165 if (success && _large_page_size > default_page_size) { 3166 _page_sizes[0] = _large_page_size; 3167 _page_sizes[1] = default_page_size; 3168 _page_sizes[2] = 0; 3169 } 3170 3171 cleanup_after_large_page_init(); 3172 UseLargePages = success; 3173 } 3174 3175 // On win32, one cannot release just a part of reserved memory, it's an 3176 // all or nothing deal. When we split a reservation, we must break the 3177 // reservation into two reservations. 3178 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3179 bool realloc) { 3180 if (size > 0) { 3181 release_memory(base, size); 3182 if (realloc) { 3183 reserve_memory(split, base); 3184 } 3185 if (size != split) { 3186 reserve_memory(size - split, base + split); 3187 } 3188 } 3189 } 3190 3191 // Multiple threads can race in this code but it's not possible to unmap small sections of 3192 // virtual space to get requested alignment, like posix-like os's. 3193 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3194 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3195 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3196 "Alignment must be a multiple of allocation granularity (page size)"); 3197 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3198 3199 size_t extra_size = size + alignment; 3200 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3201 3202 char* aligned_base = NULL; 3203 3204 do { 3205 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3206 if (extra_base == NULL) { 3207 return NULL; 3208 } 3209 // Do manual alignment 3210 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3211 3212 os::release_memory(extra_base, extra_size); 3213 3214 aligned_base = os::reserve_memory(size, aligned_base); 3215 3216 } while (aligned_base == NULL); 3217 3218 return aligned_base; 3219 } 3220 3221 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3222 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3223 "reserve alignment"); 3224 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3225 char* res; 3226 // note that if UseLargePages is on, all the areas that require interleaving 3227 // will go thru reserve_memory_special rather than thru here. 3228 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3229 if (!use_individual) { 3230 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3231 } else { 3232 elapsedTimer reserveTimer; 3233 if( Verbose && PrintMiscellaneous ) reserveTimer.start(); 3234 // in numa interleaving, we have to allocate pages individually 3235 // (well really chunks of NUMAInterleaveGranularity size) 3236 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3237 if (res == NULL) { 3238 warning("NUMA page allocation failed"); 3239 } 3240 if( Verbose && PrintMiscellaneous ) { 3241 reserveTimer.stop(); 3242 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3243 reserveTimer.milliseconds(), reserveTimer.ticks()); 3244 } 3245 } 3246 assert(res == NULL || addr == NULL || addr == res, 3247 "Unexpected address from reserve."); 3248 3249 return res; 3250 } 3251 3252 // Reserve memory at an arbitrary address, only if that area is 3253 // available (and not reserved for something else). 3254 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3255 // Windows os::reserve_memory() fails of the requested address range is 3256 // not avilable. 3257 return reserve_memory(bytes, requested_addr); 3258 } 3259 3260 size_t os::large_page_size() { 3261 return _large_page_size; 3262 } 3263 3264 bool os::can_commit_large_page_memory() { 3265 // Windows only uses large page memory when the entire region is reserved 3266 // and committed in a single VirtualAlloc() call. This may change in the 3267 // future, but with Windows 2003 it's not possible to commit on demand. 3268 return false; 3269 } 3270 3271 bool os::can_execute_large_page_memory() { 3272 return true; 3273 } 3274 3275 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3276 assert(UseLargePages, "only for large pages"); 3277 3278 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3279 return NULL; // Fallback to small pages. 3280 } 3281 3282 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3283 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3284 3285 // with large pages, there are two cases where we need to use Individual Allocation 3286 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3287 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3288 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3289 if (TracePageSizes && Verbose) { 3290 tty->print_cr("Reserving large pages individually."); 3291 } 3292 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3293 if (p_buf == NULL) { 3294 // give an appropriate warning message 3295 if (UseNUMAInterleaving) { 3296 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3297 } 3298 if (UseLargePagesIndividualAllocation) { 3299 warning("Individually allocated large pages failed, " 3300 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3301 } 3302 return NULL; 3303 } 3304 3305 return p_buf; 3306 3307 } else { 3308 if (TracePageSizes && Verbose) { 3309 tty->print_cr("Reserving large pages in a single large chunk."); 3310 } 3311 // normal policy just allocate it all at once 3312 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3313 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3314 if (res != NULL) { 3315 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3316 } 3317 3318 return res; 3319 } 3320 } 3321 3322 bool os::release_memory_special(char* base, size_t bytes) { 3323 assert(base != NULL, "Sanity check"); 3324 return release_memory(base, bytes); 3325 } 3326 3327 void os::print_statistics() { 3328 } 3329 3330 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3331 int err = os::get_last_error(); 3332 char buf[256]; 3333 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3334 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3335 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3336 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3337 } 3338 3339 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3340 if (bytes == 0) { 3341 // Don't bother the OS with noops. 3342 return true; 3343 } 3344 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3345 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3346 // Don't attempt to print anything if the OS call fails. We're 3347 // probably low on resources, so the print itself may cause crashes. 3348 3349 // unless we have NUMAInterleaving enabled, the range of a commit 3350 // is always within a reserve covered by a single VirtualAlloc 3351 // in that case we can just do a single commit for the requested size 3352 if (!UseNUMAInterleaving) { 3353 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3354 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3355 return false; 3356 } 3357 if (exec) { 3358 DWORD oldprot; 3359 // Windows doc says to use VirtualProtect to get execute permissions 3360 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3361 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3362 return false; 3363 } 3364 } 3365 return true; 3366 } else { 3367 3368 // when NUMAInterleaving is enabled, the commit might cover a range that 3369 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3370 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3371 // returns represents the number of bytes that can be committed in one step. 3372 size_t bytes_remaining = bytes; 3373 char * next_alloc_addr = addr; 3374 while (bytes_remaining > 0) { 3375 MEMORY_BASIC_INFORMATION alloc_info; 3376 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3377 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3378 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3379 PAGE_READWRITE) == NULL) { 3380 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3381 exec);) 3382 return false; 3383 } 3384 if (exec) { 3385 DWORD oldprot; 3386 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3387 PAGE_EXECUTE_READWRITE, &oldprot)) { 3388 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3389 exec);) 3390 return false; 3391 } 3392 } 3393 bytes_remaining -= bytes_to_rq; 3394 next_alloc_addr += bytes_to_rq; 3395 } 3396 } 3397 // if we made it this far, return true 3398 return true; 3399 } 3400 3401 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3402 bool exec) { 3403 // alignment_hint is ignored on this OS 3404 return pd_commit_memory(addr, size, exec); 3405 } 3406 3407 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3408 const char* mesg) { 3409 assert(mesg != NULL, "mesg must be specified"); 3410 if (!pd_commit_memory(addr, size, exec)) { 3411 warn_fail_commit_memory(addr, size, exec); 3412 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3413 } 3414 } 3415 3416 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3417 size_t alignment_hint, bool exec, 3418 const char* mesg) { 3419 // alignment_hint is ignored on this OS 3420 pd_commit_memory_or_exit(addr, size, exec, mesg); 3421 } 3422 3423 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3424 if (bytes == 0) { 3425 // Don't bother the OS with noops. 3426 return true; 3427 } 3428 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3429 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3430 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3431 } 3432 3433 bool os::pd_release_memory(char* addr, size_t bytes) { 3434 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3435 } 3436 3437 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3438 return os::commit_memory(addr, size, !ExecMem); 3439 } 3440 3441 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3442 return os::uncommit_memory(addr, size); 3443 } 3444 3445 // Set protections specified 3446 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3447 bool is_committed) { 3448 unsigned int p = 0; 3449 switch (prot) { 3450 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3451 case MEM_PROT_READ: p = PAGE_READONLY; break; 3452 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3453 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3454 default: 3455 ShouldNotReachHere(); 3456 } 3457 3458 DWORD old_status; 3459 3460 // Strange enough, but on Win32 one can change protection only for committed 3461 // memory, not a big deal anyway, as bytes less or equal than 64K 3462 if (!is_committed) { 3463 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3464 "cannot commit protection page"); 3465 } 3466 // One cannot use os::guard_memory() here, as on Win32 guard page 3467 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3468 // 3469 // Pages in the region become guard pages. Any attempt to access a guard page 3470 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3471 // the guard page status. Guard pages thus act as a one-time access alarm. 3472 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3473 } 3474 3475 bool os::guard_memory(char* addr, size_t bytes) { 3476 DWORD old_status; 3477 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3478 } 3479 3480 bool os::unguard_memory(char* addr, size_t bytes) { 3481 DWORD old_status; 3482 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3483 } 3484 3485 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3486 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3487 void os::numa_make_global(char *addr, size_t bytes) { } 3488 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3489 bool os::numa_topology_changed() { return false; } 3490 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3491 int os::numa_get_group_id() { return 0; } 3492 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3493 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3494 // Provide an answer for UMA systems 3495 ids[0] = 0; 3496 return 1; 3497 } else { 3498 // check for size bigger than actual groups_num 3499 size = MIN2(size, numa_get_groups_num()); 3500 for (int i = 0; i < (int)size; i++) { 3501 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3502 } 3503 return size; 3504 } 3505 } 3506 3507 bool os::get_page_info(char *start, page_info* info) { 3508 return false; 3509 } 3510 3511 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3512 return end; 3513 } 3514 3515 char* os::non_memory_address_word() { 3516 // Must never look like an address returned by reserve_memory, 3517 // even in its subfields (as defined by the CPU immediate fields, 3518 // if the CPU splits constants across multiple instructions). 3519 return (char*)-1; 3520 } 3521 3522 #define MAX_ERROR_COUNT 100 3523 #define SYS_THREAD_ERROR 0xffffffffUL 3524 3525 void os::pd_start_thread(Thread* thread) { 3526 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3527 // Returns previous suspend state: 3528 // 0: Thread was not suspended 3529 // 1: Thread is running now 3530 // >1: Thread is still suspended. 3531 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3532 } 3533 3534 class HighResolutionInterval : public CHeapObj<mtThread> { 3535 // The default timer resolution seems to be 10 milliseconds. 3536 // (Where is this written down?) 3537 // If someone wants to sleep for only a fraction of the default, 3538 // then we set the timer resolution down to 1 millisecond for 3539 // the duration of their interval. 3540 // We carefully set the resolution back, since otherwise we 3541 // seem to incur an overhead (3%?) that we don't need. 3542 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3543 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3544 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3545 // timeBeginPeriod() if the relative error exceeded some threshold. 3546 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3547 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3548 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3549 // resolution timers running. 3550 private: 3551 jlong resolution; 3552 public: 3553 HighResolutionInterval(jlong ms) { 3554 resolution = ms % 10L; 3555 if (resolution != 0) { 3556 MMRESULT result = timeBeginPeriod(1L); 3557 } 3558 } 3559 ~HighResolutionInterval() { 3560 if (resolution != 0) { 3561 MMRESULT result = timeEndPeriod(1L); 3562 } 3563 resolution = 0L; 3564 } 3565 }; 3566 3567 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3568 jlong limit = (jlong) MAXDWORD; 3569 3570 while(ms > limit) { 3571 int res; 3572 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3573 return res; 3574 ms -= limit; 3575 } 3576 3577 assert(thread == Thread::current(), "thread consistency check"); 3578 OSThread* osthread = thread->osthread(); 3579 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3580 int result; 3581 if (interruptable) { 3582 assert(thread->is_Java_thread(), "must be java thread"); 3583 JavaThread *jt = (JavaThread *) thread; 3584 ThreadBlockInVM tbivm(jt); 3585 3586 jt->set_suspend_equivalent(); 3587 // cleared by handle_special_suspend_equivalent_condition() or 3588 // java_suspend_self() via check_and_wait_while_suspended() 3589 3590 HANDLE events[1]; 3591 events[0] = osthread->interrupt_event(); 3592 HighResolutionInterval *phri=NULL; 3593 if(!ForceTimeHighResolution) 3594 phri = new HighResolutionInterval( ms ); 3595 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3596 result = OS_TIMEOUT; 3597 } else { 3598 ResetEvent(osthread->interrupt_event()); 3599 osthread->set_interrupted(false); 3600 result = OS_INTRPT; 3601 } 3602 delete phri; //if it is NULL, harmless 3603 3604 // were we externally suspended while we were waiting? 3605 jt->check_and_wait_while_suspended(); 3606 } else { 3607 assert(!thread->is_Java_thread(), "must not be java thread"); 3608 Sleep((long) ms); 3609 result = OS_TIMEOUT; 3610 } 3611 return result; 3612 } 3613 3614 // 3615 // Short sleep, direct OS call. 3616 // 3617 // ms = 0, means allow others (if any) to run. 3618 // 3619 void os::naked_short_sleep(jlong ms) { 3620 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3621 Sleep(ms); 3622 } 3623 3624 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3625 void os::infinite_sleep() { 3626 while (true) { // sleep forever ... 3627 Sleep(100000); // ... 100 seconds at a time 3628 } 3629 } 3630 3631 typedef BOOL (WINAPI * STTSignature)(void) ; 3632 3633 os::YieldResult os::NakedYield() { 3634 // Use either SwitchToThread() or Sleep(0) 3635 // Consider passing back the return value from SwitchToThread(). 3636 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3637 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; 3638 } else { 3639 Sleep(0); 3640 } 3641 return os::YIELD_UNKNOWN ; 3642 } 3643 3644 void os::yield() { os::NakedYield(); } 3645 3646 void os::yield_all(int attempts) { 3647 // Yields to all threads, including threads with lower priorities 3648 Sleep(1); 3649 } 3650 3651 // Win32 only gives you access to seven real priorities at a time, 3652 // so we compress Java's ten down to seven. It would be better 3653 // if we dynamically adjusted relative priorities. 3654 3655 int os::java_to_os_priority[CriticalPriority + 1] = { 3656 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3657 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3658 THREAD_PRIORITY_LOWEST, // 2 3659 THREAD_PRIORITY_BELOW_NORMAL, // 3 3660 THREAD_PRIORITY_BELOW_NORMAL, // 4 3661 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3662 THREAD_PRIORITY_NORMAL, // 6 3663 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3664 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3665 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3666 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3667 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3668 }; 3669 3670 int prio_policy1[CriticalPriority + 1] = { 3671 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3672 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3673 THREAD_PRIORITY_LOWEST, // 2 3674 THREAD_PRIORITY_BELOW_NORMAL, // 3 3675 THREAD_PRIORITY_BELOW_NORMAL, // 4 3676 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3677 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3678 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3679 THREAD_PRIORITY_HIGHEST, // 8 3680 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3681 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3682 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3683 }; 3684 3685 static int prio_init() { 3686 // If ThreadPriorityPolicy is 1, switch tables 3687 if (ThreadPriorityPolicy == 1) { 3688 int i; 3689 for (i = 0; i < CriticalPriority + 1; i++) { 3690 os::java_to_os_priority[i] = prio_policy1[i]; 3691 } 3692 } 3693 if (UseCriticalJavaThreadPriority) { 3694 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ; 3695 } 3696 return 0; 3697 } 3698 3699 OSReturn os::set_native_priority(Thread* thread, int priority) { 3700 if (!UseThreadPriorities) return OS_OK; 3701 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3702 return ret ? OS_OK : OS_ERR; 3703 } 3704 3705 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3706 if ( !UseThreadPriorities ) { 3707 *priority_ptr = java_to_os_priority[NormPriority]; 3708 return OS_OK; 3709 } 3710 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3711 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3712 assert(false, "GetThreadPriority failed"); 3713 return OS_ERR; 3714 } 3715 *priority_ptr = os_prio; 3716 return OS_OK; 3717 } 3718 3719 3720 // Hint to the underlying OS that a task switch would not be good. 3721 // Void return because it's a hint and can fail. 3722 void os::hint_no_preempt() {} 3723 3724 void os::interrupt(Thread* thread) { 3725 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3726 "possibility of dangling Thread pointer"); 3727 3728 OSThread* osthread = thread->osthread(); 3729 osthread->set_interrupted(true); 3730 // More than one thread can get here with the same value of osthread, 3731 // resulting in multiple notifications. We do, however, want the store 3732 // to interrupted() to be visible to other threads before we post 3733 // the interrupt event. 3734 OrderAccess::release(); 3735 SetEvent(osthread->interrupt_event()); 3736 // For JSR166: unpark after setting status 3737 if (thread->is_Java_thread()) 3738 ((JavaThread*)thread)->parker()->unpark(); 3739 3740 ParkEvent * ev = thread->_ParkEvent ; 3741 if (ev != NULL) ev->unpark() ; 3742 3743 } 3744 3745 3746 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3747 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3748 "possibility of dangling Thread pointer"); 3749 3750 OSThread* osthread = thread->osthread(); 3751 // There is no synchronization between the setting of the interrupt 3752 // and it being cleared here. It is critical - see 6535709 - that 3753 // we only clear the interrupt state, and reset the interrupt event, 3754 // if we are going to report that we were indeed interrupted - else 3755 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3756 // depending on the timing. By checking thread interrupt event to see 3757 // if the thread gets real interrupt thus prevent spurious wakeup. 3758 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3759 if (interrupted && clear_interrupted) { 3760 osthread->set_interrupted(false); 3761 ResetEvent(osthread->interrupt_event()); 3762 } // Otherwise leave the interrupted state alone 3763 3764 return interrupted; 3765 } 3766 3767 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3768 ExtendedPC os::get_thread_pc(Thread* thread) { 3769 CONTEXT context; 3770 context.ContextFlags = CONTEXT_CONTROL; 3771 HANDLE handle = thread->osthread()->thread_handle(); 3772 #ifdef _M_IA64 3773 assert(0, "Fix get_thread_pc"); 3774 return ExtendedPC(NULL); 3775 #else 3776 if (GetThreadContext(handle, &context)) { 3777 #ifdef _M_AMD64 3778 return ExtendedPC((address) context.Rip); 3779 #else 3780 return ExtendedPC((address) context.Eip); 3781 #endif 3782 } else { 3783 return ExtendedPC(NULL); 3784 } 3785 #endif 3786 } 3787 3788 // GetCurrentThreadId() returns DWORD 3789 intx os::current_thread_id() { return GetCurrentThreadId(); } 3790 3791 static int _initial_pid = 0; 3792 3793 int os::current_process_id() 3794 { 3795 return (_initial_pid ? _initial_pid : _getpid()); 3796 } 3797 3798 int os::win32::_vm_page_size = 0; 3799 int os::win32::_vm_allocation_granularity = 0; 3800 int os::win32::_processor_type = 0; 3801 // Processor level is not available on non-NT systems, use vm_version instead 3802 int os::win32::_processor_level = 0; 3803 julong os::win32::_physical_memory = 0; 3804 size_t os::win32::_default_stack_size = 0; 3805 3806 intx os::win32::_os_thread_limit = 0; 3807 volatile intx os::win32::_os_thread_count = 0; 3808 3809 bool os::win32::_is_nt = false; 3810 bool os::win32::_is_windows_2003 = false; 3811 bool os::win32::_is_windows_server = false; 3812 3813 void os::win32::initialize_system_info() { 3814 SYSTEM_INFO si; 3815 GetSystemInfo(&si); 3816 _vm_page_size = si.dwPageSize; 3817 _vm_allocation_granularity = si.dwAllocationGranularity; 3818 _processor_type = si.dwProcessorType; 3819 _processor_level = si.wProcessorLevel; 3820 set_processor_count(si.dwNumberOfProcessors); 3821 3822 MEMORYSTATUSEX ms; 3823 ms.dwLength = sizeof(ms); 3824 3825 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3826 // dwMemoryLoad (% of memory in use) 3827 GlobalMemoryStatusEx(&ms); 3828 _physical_memory = ms.ullTotalPhys; 3829 3830 OSVERSIONINFOEX oi; 3831 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3832 GetVersionEx((OSVERSIONINFO*)&oi); 3833 switch(oi.dwPlatformId) { 3834 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3835 case VER_PLATFORM_WIN32_NT: 3836 _is_nt = true; 3837 { 3838 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3839 if (os_vers == 5002) { 3840 _is_windows_2003 = true; 3841 } 3842 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3843 oi.wProductType == VER_NT_SERVER) { 3844 _is_windows_server = true; 3845 } 3846 } 3847 break; 3848 default: fatal("Unknown platform"); 3849 } 3850 3851 _default_stack_size = os::current_stack_size(); 3852 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3853 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3854 "stack size not a multiple of page size"); 3855 3856 initialize_performance_counter(); 3857 3858 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3859 // known to deadlock the system, if the VM issues to thread operations with 3860 // a too high frequency, e.g., such as changing the priorities. 3861 // The 6000 seems to work well - no deadlocks has been notices on the test 3862 // programs that we have seen experience this problem. 3863 if (!os::win32::is_nt()) { 3864 StarvationMonitorInterval = 6000; 3865 } 3866 } 3867 3868 3869 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3870 char path[MAX_PATH]; 3871 DWORD size; 3872 DWORD pathLen = (DWORD)sizeof(path); 3873 HINSTANCE result = NULL; 3874 3875 // only allow library name without path component 3876 assert(strchr(name, '\\') == NULL, "path not allowed"); 3877 assert(strchr(name, ':') == NULL, "path not allowed"); 3878 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3879 jio_snprintf(ebuf, ebuflen, 3880 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3881 return NULL; 3882 } 3883 3884 // search system directory 3885 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3886 strcat(path, "\\"); 3887 strcat(path, name); 3888 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3889 return result; 3890 } 3891 } 3892 3893 // try Windows directory 3894 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3895 strcat(path, "\\"); 3896 strcat(path, name); 3897 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3898 return result; 3899 } 3900 } 3901 3902 jio_snprintf(ebuf, ebuflen, 3903 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3904 return NULL; 3905 } 3906 3907 void os::win32::setmode_streams() { 3908 _setmode(_fileno(stdin), _O_BINARY); 3909 _setmode(_fileno(stdout), _O_BINARY); 3910 _setmode(_fileno(stderr), _O_BINARY); 3911 } 3912 3913 3914 bool os::is_debugger_attached() { 3915 return IsDebuggerPresent() ? true : false; 3916 } 3917 3918 3919 void os::wait_for_keypress_at_exit(void) { 3920 if (PauseAtExit) { 3921 fprintf(stderr, "Press any key to continue...\n"); 3922 fgetc(stdin); 3923 } 3924 } 3925 3926 3927 int os::message_box(const char* title, const char* message) { 3928 int result = MessageBox(NULL, message, title, 3929 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3930 return result == IDYES; 3931 } 3932 3933 int os::allocate_thread_local_storage() { 3934 return TlsAlloc(); 3935 } 3936 3937 3938 void os::free_thread_local_storage(int index) { 3939 TlsFree(index); 3940 } 3941 3942 3943 void os::thread_local_storage_at_put(int index, void* value) { 3944 TlsSetValue(index, value); 3945 assert(thread_local_storage_at(index) == value, "Just checking"); 3946 } 3947 3948 3949 void* os::thread_local_storage_at(int index) { 3950 return TlsGetValue(index); 3951 } 3952 3953 3954 #ifndef PRODUCT 3955 #ifndef _WIN64 3956 // Helpers to check whether NX protection is enabled 3957 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3958 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3959 pex->ExceptionRecord->NumberParameters > 0 && 3960 pex->ExceptionRecord->ExceptionInformation[0] == 3961 EXCEPTION_INFO_EXEC_VIOLATION) { 3962 return EXCEPTION_EXECUTE_HANDLER; 3963 } 3964 return EXCEPTION_CONTINUE_SEARCH; 3965 } 3966 3967 void nx_check_protection() { 3968 // If NX is enabled we'll get an exception calling into code on the stack 3969 char code[] = { (char)0xC3 }; // ret 3970 void *code_ptr = (void *)code; 3971 __try { 3972 __asm call code_ptr 3973 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3974 tty->print_raw_cr("NX protection detected."); 3975 } 3976 } 3977 #endif // _WIN64 3978 #endif // PRODUCT 3979 3980 // this is called _before_ the global arguments have been parsed 3981 void os::init(void) { 3982 _initial_pid = _getpid(); 3983 3984 init_random(1234567); 3985 3986 win32::initialize_system_info(); 3987 win32::setmode_streams(); 3988 init_page_sizes((size_t) win32::vm_page_size()); 3989 3990 // For better scalability on MP systems (must be called after initialize_system_info) 3991 #ifndef PRODUCT 3992 if (is_MP()) { 3993 NoYieldsInMicrolock = true; 3994 } 3995 #endif 3996 // This may be overridden later when argument processing is done. 3997 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3998 os::win32::is_windows_2003()); 3999 4000 // Initialize main_process and main_thread 4001 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4002 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4003 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4004 fatal("DuplicateHandle failed\n"); 4005 } 4006 main_thread_id = (int) GetCurrentThreadId(); 4007 } 4008 4009 // To install functions for atexit processing 4010 extern "C" { 4011 static void perfMemory_exit_helper() { 4012 perfMemory_exit(); 4013 } 4014 } 4015 4016 static jint initSock(); 4017 4018 // this is called _after_ the global arguments have been parsed 4019 jint os::init_2(void) { 4020 // Allocate a single page and mark it as readable for safepoint polling 4021 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4022 guarantee( polling_page != NULL, "Reserve Failed for polling page"); 4023 4024 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4025 guarantee( return_page != NULL, "Commit Failed for polling page"); 4026 4027 os::set_polling_page( polling_page ); 4028 4029 #ifndef PRODUCT 4030 if( Verbose && PrintMiscellaneous ) 4031 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 4032 #endif 4033 4034 if (!UseMembar) { 4035 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4036 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4037 4038 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4039 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); 4040 4041 os::set_memory_serialize_page( mem_serialize_page ); 4042 4043 #ifndef PRODUCT 4044 if(Verbose && PrintMiscellaneous) 4045 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 4046 #endif 4047 } 4048 4049 // Setup Windows Exceptions 4050 4051 // for debugging float code generation bugs 4052 if (ForceFloatExceptions) { 4053 #ifndef _WIN64 4054 static long fp_control_word = 0; 4055 __asm { fstcw fp_control_word } 4056 // see Intel PPro Manual, Vol. 2, p 7-16 4057 const long precision = 0x20; 4058 const long underflow = 0x10; 4059 const long overflow = 0x08; 4060 const long zero_div = 0x04; 4061 const long denorm = 0x02; 4062 const long invalid = 0x01; 4063 fp_control_word |= invalid; 4064 __asm { fldcw fp_control_word } 4065 #endif 4066 } 4067 4068 // If stack_commit_size is 0, windows will reserve the default size, 4069 // but only commit a small portion of it. 4070 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4071 size_t default_reserve_size = os::win32::default_stack_size(); 4072 size_t actual_reserve_size = stack_commit_size; 4073 if (stack_commit_size < default_reserve_size) { 4074 // If stack_commit_size == 0, we want this too 4075 actual_reserve_size = default_reserve_size; 4076 } 4077 4078 // Check minimum allowable stack size for thread creation and to initialize 4079 // the java system classes, including StackOverflowError - depends on page 4080 // size. Add a page for compiler2 recursion in main thread. 4081 // Add in 2*BytesPerWord times page size to account for VM stack during 4082 // class initialization depending on 32 or 64 bit VM. 4083 size_t min_stack_allowed = 4084 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4085 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4086 if (actual_reserve_size < min_stack_allowed) { 4087 tty->print_cr("\nThe stack size specified is too small, " 4088 "Specify at least %dk", 4089 min_stack_allowed / K); 4090 return JNI_ERR; 4091 } 4092 4093 JavaThread::set_stack_size_at_create(stack_commit_size); 4094 4095 // Calculate theoretical max. size of Threads to guard gainst artifical 4096 // out-of-memory situations, where all available address-space has been 4097 // reserved by thread stacks. 4098 assert(actual_reserve_size != 0, "Must have a stack"); 4099 4100 // Calculate the thread limit when we should start doing Virtual Memory 4101 // banging. Currently when the threads will have used all but 200Mb of space. 4102 // 4103 // TODO: consider performing a similar calculation for commit size instead 4104 // as reserve size, since on a 64-bit platform we'll run into that more 4105 // often than running out of virtual memory space. We can use the 4106 // lower value of the two calculations as the os_thread_limit. 4107 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4108 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4109 4110 // at exit methods are called in the reverse order of their registration. 4111 // there is no limit to the number of functions registered. atexit does 4112 // not set errno. 4113 4114 if (PerfAllowAtExitRegistration) { 4115 // only register atexit functions if PerfAllowAtExitRegistration is set. 4116 // atexit functions can be delayed until process exit time, which 4117 // can be problematic for embedded VM situations. Embedded VMs should 4118 // call DestroyJavaVM() to assure that VM resources are released. 4119 4120 // note: perfMemory_exit_helper atexit function may be removed in 4121 // the future if the appropriate cleanup code can be added to the 4122 // VM_Exit VMOperation's doit method. 4123 if (atexit(perfMemory_exit_helper) != 0) { 4124 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4125 } 4126 } 4127 4128 #ifndef _WIN64 4129 // Print something if NX is enabled (win32 on AMD64) 4130 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4131 #endif 4132 4133 // initialize thread priority policy 4134 prio_init(); 4135 4136 if (UseNUMA && !ForceNUMA) { 4137 UseNUMA = false; // We don't fully support this yet 4138 } 4139 4140 if (UseNUMAInterleaving) { 4141 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4142 bool success = numa_interleaving_init(); 4143 if (!success) UseNUMAInterleaving = false; 4144 } 4145 4146 if (initSock() != JNI_OK) { 4147 return JNI_ERR; 4148 } 4149 4150 return JNI_OK; 4151 } 4152 4153 // Mark the polling page as unreadable 4154 void os::make_polling_page_unreadable(void) { 4155 DWORD old_status; 4156 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) 4157 fatal("Could not disable polling page"); 4158 }; 4159 4160 // Mark the polling page as readable 4161 void os::make_polling_page_readable(void) { 4162 DWORD old_status; 4163 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) 4164 fatal("Could not enable polling page"); 4165 }; 4166 4167 4168 int os::stat(const char *path, struct stat *sbuf) { 4169 char pathbuf[MAX_PATH]; 4170 if (strlen(path) > MAX_PATH - 1) { 4171 errno = ENAMETOOLONG; 4172 return -1; 4173 } 4174 os::native_path(strcpy(pathbuf, path)); 4175 int ret = ::stat(pathbuf, sbuf); 4176 if (sbuf != NULL && UseUTCFileTimestamp) { 4177 // Fix for 6539723. st_mtime returned from stat() is dependent on 4178 // the system timezone and so can return different values for the 4179 // same file if/when daylight savings time changes. This adjustment 4180 // makes sure the same timestamp is returned regardless of the TZ. 4181 // 4182 // See: 4183 // http://msdn.microsoft.com/library/ 4184 // default.asp?url=/library/en-us/sysinfo/base/ 4185 // time_zone_information_str.asp 4186 // and 4187 // http://msdn.microsoft.com/library/default.asp?url= 4188 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4189 // 4190 // NOTE: there is a insidious bug here: If the timezone is changed 4191 // after the call to stat() but before 'GetTimeZoneInformation()', then 4192 // the adjustment we do here will be wrong and we'll return the wrong 4193 // value (which will likely end up creating an invalid class data 4194 // archive). Absent a better API for this, or some time zone locking 4195 // mechanism, we'll have to live with this risk. 4196 TIME_ZONE_INFORMATION tz; 4197 DWORD tzid = GetTimeZoneInformation(&tz); 4198 int daylightBias = 4199 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4200 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4201 } 4202 return ret; 4203 } 4204 4205 4206 #define FT2INT64(ft) \ 4207 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4208 4209 4210 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4211 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4212 // of a thread. 4213 // 4214 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4215 // the fast estimate available on the platform. 4216 4217 // current_thread_cpu_time() is not optimized for Windows yet 4218 jlong os::current_thread_cpu_time() { 4219 // return user + sys since the cost is the same 4220 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4221 } 4222 4223 jlong os::thread_cpu_time(Thread* thread) { 4224 // consistent with what current_thread_cpu_time() returns. 4225 return os::thread_cpu_time(thread, true /* user+sys */); 4226 } 4227 4228 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4229 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4230 } 4231 4232 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4233 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4234 // If this function changes, os::is_thread_cpu_time_supported() should too 4235 if (os::win32::is_nt()) { 4236 FILETIME CreationTime; 4237 FILETIME ExitTime; 4238 FILETIME KernelTime; 4239 FILETIME UserTime; 4240 4241 if ( GetThreadTimes(thread->osthread()->thread_handle(), 4242 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4243 return -1; 4244 else 4245 if (user_sys_cpu_time) { 4246 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4247 } else { 4248 return FT2INT64(UserTime) * 100; 4249 } 4250 } else { 4251 return (jlong) timeGetTime() * 1000000; 4252 } 4253 } 4254 4255 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4256 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4257 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4258 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4259 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4260 } 4261 4262 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4263 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4264 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4265 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4266 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4267 } 4268 4269 bool os::is_thread_cpu_time_supported() { 4270 // see os::thread_cpu_time 4271 if (os::win32::is_nt()) { 4272 FILETIME CreationTime; 4273 FILETIME ExitTime; 4274 FILETIME KernelTime; 4275 FILETIME UserTime; 4276 4277 if ( GetThreadTimes(GetCurrentThread(), 4278 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4279 return false; 4280 else 4281 return true; 4282 } else { 4283 return false; 4284 } 4285 } 4286 4287 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4288 // It does have primitives (PDH API) to get CPU usage and run queue length. 4289 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4290 // If we wanted to implement loadavg on Windows, we have a few options: 4291 // 4292 // a) Query CPU usage and run queue length and "fake" an answer by 4293 // returning the CPU usage if it's under 100%, and the run queue 4294 // length otherwise. It turns out that querying is pretty slow 4295 // on Windows, on the order of 200 microseconds on a fast machine. 4296 // Note that on the Windows the CPU usage value is the % usage 4297 // since the last time the API was called (and the first call 4298 // returns 100%), so we'd have to deal with that as well. 4299 // 4300 // b) Sample the "fake" answer using a sampling thread and store 4301 // the answer in a global variable. The call to loadavg would 4302 // just return the value of the global, avoiding the slow query. 4303 // 4304 // c) Sample a better answer using exponential decay to smooth the 4305 // value. This is basically the algorithm used by UNIX kernels. 4306 // 4307 // Note that sampling thread starvation could affect both (b) and (c). 4308 int os::loadavg(double loadavg[], int nelem) { 4309 return -1; 4310 } 4311 4312 4313 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4314 bool os::dont_yield() { 4315 return DontYieldALot; 4316 } 4317 4318 // This method is a slightly reworked copy of JDK's sysOpen 4319 // from src/windows/hpi/src/sys_api_md.c 4320 4321 int os::open(const char *path, int oflag, int mode) { 4322 char pathbuf[MAX_PATH]; 4323 4324 if (strlen(path) > MAX_PATH - 1) { 4325 errno = ENAMETOOLONG; 4326 return -1; 4327 } 4328 os::native_path(strcpy(pathbuf, path)); 4329 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4330 } 4331 4332 FILE* os::open(int fd, const char* mode) { 4333 return ::_fdopen(fd, mode); 4334 } 4335 4336 // Is a (classpath) directory empty? 4337 bool os::dir_is_empty(const char* path) { 4338 WIN32_FIND_DATA fd; 4339 HANDLE f = FindFirstFile(path, &fd); 4340 if (f == INVALID_HANDLE_VALUE) { 4341 return true; 4342 } 4343 FindClose(f); 4344 return false; 4345 } 4346 4347 // create binary file, rewriting existing file if required 4348 int os::create_binary_file(const char* path, bool rewrite_existing) { 4349 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4350 if (!rewrite_existing) { 4351 oflags |= _O_EXCL; 4352 } 4353 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4354 } 4355 4356 // return current position of file pointer 4357 jlong os::current_file_offset(int fd) { 4358 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4359 } 4360 4361 // move file pointer to the specified offset 4362 jlong os::seek_to_file_offset(int fd, jlong offset) { 4363 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4364 } 4365 4366 4367 jlong os::lseek(int fd, jlong offset, int whence) { 4368 return (jlong) ::_lseeki64(fd, offset, whence); 4369 } 4370 4371 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4372 OVERLAPPED ov; 4373 DWORD nread; 4374 BOOL result; 4375 4376 ZeroMemory(&ov, sizeof(ov)); 4377 ov.Offset = (DWORD)offset; 4378 ov.OffsetHigh = (DWORD)(offset >> 32); 4379 4380 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4381 4382 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4383 4384 return result ? nread : 0; 4385 } 4386 4387 // This method is a slightly reworked copy of JDK's sysNativePath 4388 // from src/windows/hpi/src/path_md.c 4389 4390 /* Convert a pathname to native format. On win32, this involves forcing all 4391 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4392 sometimes rejects '/') and removing redundant separators. The input path is 4393 assumed to have been converted into the character encoding used by the local 4394 system. Because this might be a double-byte encoding, care is taken to 4395 treat double-byte lead characters correctly. 4396 4397 This procedure modifies the given path in place, as the result is never 4398 longer than the original. There is no error return; this operation always 4399 succeeds. */ 4400 char * os::native_path(char *path) { 4401 char *src = path, *dst = path, *end = path; 4402 char *colon = NULL; /* If a drive specifier is found, this will 4403 point to the colon following the drive 4404 letter */ 4405 4406 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4407 assert(((!::IsDBCSLeadByte('/')) 4408 && (!::IsDBCSLeadByte('\\')) 4409 && (!::IsDBCSLeadByte(':'))), 4410 "Illegal lead byte"); 4411 4412 /* Check for leading separators */ 4413 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4414 while (isfilesep(*src)) { 4415 src++; 4416 } 4417 4418 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4419 /* Remove leading separators if followed by drive specifier. This 4420 hack is necessary to support file URLs containing drive 4421 specifiers (e.g., "file://c:/path"). As a side effect, 4422 "/c:/path" can be used as an alternative to "c:/path". */ 4423 *dst++ = *src++; 4424 colon = dst; 4425 *dst++ = ':'; 4426 src++; 4427 } else { 4428 src = path; 4429 if (isfilesep(src[0]) && isfilesep(src[1])) { 4430 /* UNC pathname: Retain first separator; leave src pointed at 4431 second separator so that further separators will be collapsed 4432 into the second separator. The result will be a pathname 4433 beginning with "\\\\" followed (most likely) by a host name. */ 4434 src = dst = path + 1; 4435 path[0] = '\\'; /* Force first separator to '\\' */ 4436 } 4437 } 4438 4439 end = dst; 4440 4441 /* Remove redundant separators from remainder of path, forcing all 4442 separators to be '\\' rather than '/'. Also, single byte space 4443 characters are removed from the end of the path because those 4444 are not legal ending characters on this operating system. 4445 */ 4446 while (*src != '\0') { 4447 if (isfilesep(*src)) { 4448 *dst++ = '\\'; src++; 4449 while (isfilesep(*src)) src++; 4450 if (*src == '\0') { 4451 /* Check for trailing separator */ 4452 end = dst; 4453 if (colon == dst - 2) break; /* "z:\\" */ 4454 if (dst == path + 1) break; /* "\\" */ 4455 if (dst == path + 2 && isfilesep(path[0])) { 4456 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4457 beginning of a UNC pathname. Even though it is not, by 4458 itself, a valid UNC pathname, we leave it as is in order 4459 to be consistent with the path canonicalizer as well 4460 as the win32 APIs, which treat this case as an invalid 4461 UNC pathname rather than as an alias for the root 4462 directory of the current drive. */ 4463 break; 4464 } 4465 end = --dst; /* Path does not denote a root directory, so 4466 remove trailing separator */ 4467 break; 4468 } 4469 end = dst; 4470 } else { 4471 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4472 *dst++ = *src++; 4473 if (*src) *dst++ = *src++; 4474 end = dst; 4475 } else { /* Copy a single-byte character */ 4476 char c = *src++; 4477 *dst++ = c; 4478 /* Space is not a legal ending character */ 4479 if (c != ' ') end = dst; 4480 } 4481 } 4482 } 4483 4484 *end = '\0'; 4485 4486 /* For "z:", add "." to work around a bug in the C runtime library */ 4487 if (colon == dst - 1) { 4488 path[2] = '.'; 4489 path[3] = '\0'; 4490 } 4491 4492 return path; 4493 } 4494 4495 // This code is a copy of JDK's sysSetLength 4496 // from src/windows/hpi/src/sys_api_md.c 4497 4498 int os::ftruncate(int fd, jlong length) { 4499 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4500 long high = (long)(length >> 32); 4501 DWORD ret; 4502 4503 if (h == (HANDLE)(-1)) { 4504 return -1; 4505 } 4506 4507 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4508 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4509 return -1; 4510 } 4511 4512 if (::SetEndOfFile(h) == FALSE) { 4513 return -1; 4514 } 4515 4516 return 0; 4517 } 4518 4519 4520 // This code is a copy of JDK's sysSync 4521 // from src/windows/hpi/src/sys_api_md.c 4522 // except for the legacy workaround for a bug in Win 98 4523 4524 int os::fsync(int fd) { 4525 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4526 4527 if ( (!::FlushFileBuffers(handle)) && 4528 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4529 /* from winerror.h */ 4530 return -1; 4531 } 4532 return 0; 4533 } 4534 4535 static int nonSeekAvailable(int, long *); 4536 static int stdinAvailable(int, long *); 4537 4538 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4539 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4540 4541 // This code is a copy of JDK's sysAvailable 4542 // from src/windows/hpi/src/sys_api_md.c 4543 4544 int os::available(int fd, jlong *bytes) { 4545 jlong cur, end; 4546 struct _stati64 stbuf64; 4547 4548 if (::_fstati64(fd, &stbuf64) >= 0) { 4549 int mode = stbuf64.st_mode; 4550 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4551 int ret; 4552 long lpbytes; 4553 if (fd == 0) { 4554 ret = stdinAvailable(fd, &lpbytes); 4555 } else { 4556 ret = nonSeekAvailable(fd, &lpbytes); 4557 } 4558 (*bytes) = (jlong)(lpbytes); 4559 return ret; 4560 } 4561 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4562 return FALSE; 4563 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4564 return FALSE; 4565 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4566 return FALSE; 4567 } 4568 *bytes = end - cur; 4569 return TRUE; 4570 } else { 4571 return FALSE; 4572 } 4573 } 4574 4575 // This code is a copy of JDK's nonSeekAvailable 4576 // from src/windows/hpi/src/sys_api_md.c 4577 4578 static int nonSeekAvailable(int fd, long *pbytes) { 4579 /* This is used for available on non-seekable devices 4580 * (like both named and anonymous pipes, such as pipes 4581 * connected to an exec'd process). 4582 * Standard Input is a special case. 4583 * 4584 */ 4585 HANDLE han; 4586 4587 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4588 return FALSE; 4589 } 4590 4591 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4592 /* PeekNamedPipe fails when at EOF. In that case we 4593 * simply make *pbytes = 0 which is consistent with the 4594 * behavior we get on Solaris when an fd is at EOF. 4595 * The only alternative is to raise an Exception, 4596 * which isn't really warranted. 4597 */ 4598 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4599 return FALSE; 4600 } 4601 *pbytes = 0; 4602 } 4603 return TRUE; 4604 } 4605 4606 #define MAX_INPUT_EVENTS 2000 4607 4608 // This code is a copy of JDK's stdinAvailable 4609 // from src/windows/hpi/src/sys_api_md.c 4610 4611 static int stdinAvailable(int fd, long *pbytes) { 4612 HANDLE han; 4613 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4614 DWORD numEvents = 0; /* Number of events in buffer */ 4615 DWORD i = 0; /* Loop index */ 4616 DWORD curLength = 0; /* Position marker */ 4617 DWORD actualLength = 0; /* Number of bytes readable */ 4618 BOOL error = FALSE; /* Error holder */ 4619 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4620 4621 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4622 return FALSE; 4623 } 4624 4625 /* Construct an array of input records in the console buffer */ 4626 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4627 if (error == 0) { 4628 return nonSeekAvailable(fd, pbytes); 4629 } 4630 4631 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4632 if (numEvents > MAX_INPUT_EVENTS) { 4633 numEvents = MAX_INPUT_EVENTS; 4634 } 4635 4636 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4637 if (lpBuffer == NULL) { 4638 return FALSE; 4639 } 4640 4641 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4642 if (error == 0) { 4643 os::free(lpBuffer, mtInternal); 4644 return FALSE; 4645 } 4646 4647 /* Examine input records for the number of bytes available */ 4648 for(i=0; i<numEvents; i++) { 4649 if (lpBuffer[i].EventType == KEY_EVENT) { 4650 4651 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4652 &(lpBuffer[i].Event); 4653 if (keyRecord->bKeyDown == TRUE) { 4654 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4655 curLength++; 4656 if (*keyPressed == '\r') { 4657 actualLength = curLength; 4658 } 4659 } 4660 } 4661 } 4662 4663 if(lpBuffer != NULL) { 4664 os::free(lpBuffer, mtInternal); 4665 } 4666 4667 *pbytes = (long) actualLength; 4668 return TRUE; 4669 } 4670 4671 // Map a block of memory. 4672 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4673 char *addr, size_t bytes, bool read_only, 4674 bool allow_exec) { 4675 HANDLE hFile; 4676 char* base; 4677 4678 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4679 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4680 if (hFile == NULL) { 4681 if (PrintMiscellaneous && Verbose) { 4682 DWORD err = GetLastError(); 4683 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4684 } 4685 return NULL; 4686 } 4687 4688 if (allow_exec) { 4689 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4690 // unless it comes from a PE image (which the shared archive is not.) 4691 // Even VirtualProtect refuses to give execute access to mapped memory 4692 // that was not previously executable. 4693 // 4694 // Instead, stick the executable region in anonymous memory. Yuck. 4695 // Penalty is that ~4 pages will not be shareable - in the future 4696 // we might consider DLLizing the shared archive with a proper PE 4697 // header so that mapping executable + sharing is possible. 4698 4699 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4700 PAGE_READWRITE); 4701 if (base == NULL) { 4702 if (PrintMiscellaneous && Verbose) { 4703 DWORD err = GetLastError(); 4704 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4705 } 4706 CloseHandle(hFile); 4707 return NULL; 4708 } 4709 4710 DWORD bytes_read; 4711 OVERLAPPED overlapped; 4712 overlapped.Offset = (DWORD)file_offset; 4713 overlapped.OffsetHigh = 0; 4714 overlapped.hEvent = NULL; 4715 // ReadFile guarantees that if the return value is true, the requested 4716 // number of bytes were read before returning. 4717 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4718 if (!res) { 4719 if (PrintMiscellaneous && Verbose) { 4720 DWORD err = GetLastError(); 4721 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4722 } 4723 release_memory(base, bytes); 4724 CloseHandle(hFile); 4725 return NULL; 4726 } 4727 } else { 4728 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4729 NULL /*file_name*/); 4730 if (hMap == NULL) { 4731 if (PrintMiscellaneous && Verbose) { 4732 DWORD err = GetLastError(); 4733 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4734 } 4735 CloseHandle(hFile); 4736 return NULL; 4737 } 4738 4739 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4740 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4741 (DWORD)bytes, addr); 4742 if (base == NULL) { 4743 if (PrintMiscellaneous && Verbose) { 4744 DWORD err = GetLastError(); 4745 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4746 } 4747 CloseHandle(hMap); 4748 CloseHandle(hFile); 4749 return NULL; 4750 } 4751 4752 if (CloseHandle(hMap) == 0) { 4753 if (PrintMiscellaneous && Verbose) { 4754 DWORD err = GetLastError(); 4755 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4756 } 4757 CloseHandle(hFile); 4758 return base; 4759 } 4760 } 4761 4762 if (allow_exec) { 4763 DWORD old_protect; 4764 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4765 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4766 4767 if (!res) { 4768 if (PrintMiscellaneous && Verbose) { 4769 DWORD err = GetLastError(); 4770 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4771 } 4772 // Don't consider this a hard error, on IA32 even if the 4773 // VirtualProtect fails, we should still be able to execute 4774 CloseHandle(hFile); 4775 return base; 4776 } 4777 } 4778 4779 if (CloseHandle(hFile) == 0) { 4780 if (PrintMiscellaneous && Verbose) { 4781 DWORD err = GetLastError(); 4782 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4783 } 4784 return base; 4785 } 4786 4787 return base; 4788 } 4789 4790 4791 // Remap a block of memory. 4792 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4793 char *addr, size_t bytes, bool read_only, 4794 bool allow_exec) { 4795 // This OS does not allow existing memory maps to be remapped so we 4796 // have to unmap the memory before we remap it. 4797 if (!os::unmap_memory(addr, bytes)) { 4798 return NULL; 4799 } 4800 4801 // There is a very small theoretical window between the unmap_memory() 4802 // call above and the map_memory() call below where a thread in native 4803 // code may be able to access an address that is no longer mapped. 4804 4805 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4806 read_only, allow_exec); 4807 } 4808 4809 4810 // Unmap a block of memory. 4811 // Returns true=success, otherwise false. 4812 4813 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4814 BOOL result = UnmapViewOfFile(addr); 4815 if (result == 0) { 4816 if (PrintMiscellaneous && Verbose) { 4817 DWORD err = GetLastError(); 4818 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4819 } 4820 return false; 4821 } 4822 return true; 4823 } 4824 4825 void os::pause() { 4826 char filename[MAX_PATH]; 4827 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4828 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4829 } else { 4830 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4831 } 4832 4833 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4834 if (fd != -1) { 4835 struct stat buf; 4836 ::close(fd); 4837 while (::stat(filename, &buf) == 0) { 4838 Sleep(100); 4839 } 4840 } else { 4841 jio_fprintf(stderr, 4842 "Could not open pause file '%s', continuing immediately.\n", filename); 4843 } 4844 } 4845 4846 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4847 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4848 } 4849 4850 /* 4851 * See the caveats for this class in os_windows.hpp 4852 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4853 * into this method and returns false. If no OS EXCEPTION was raised, returns 4854 * true. 4855 * The callback is supposed to provide the method that should be protected. 4856 */ 4857 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4858 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4859 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4860 "crash_protection already set?"); 4861 4862 bool success = true; 4863 __try { 4864 WatcherThread::watcher_thread()->set_crash_protection(this); 4865 cb.call(); 4866 } __except(EXCEPTION_EXECUTE_HANDLER) { 4867 // only for protection, nothing to do 4868 success = false; 4869 } 4870 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4871 return success; 4872 } 4873 4874 // An Event wraps a win32 "CreateEvent" kernel handle. 4875 // 4876 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4877 // 4878 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4879 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4880 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4881 // In addition, an unpark() operation might fetch the handle field, but the 4882 // event could recycle between the fetch and the SetEvent() operation. 4883 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4884 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4885 // on an stale but recycled handle would be harmless, but in practice this might 4886 // confuse other non-Sun code, so it's not a viable approach. 4887 // 4888 // 2: Once a win32 event handle is associated with an Event, it remains associated 4889 // with the Event. The event handle is never closed. This could be construed 4890 // as handle leakage, but only up to the maximum # of threads that have been extant 4891 // at any one time. This shouldn't be an issue, as windows platforms typically 4892 // permit a process to have hundreds of thousands of open handles. 4893 // 4894 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4895 // and release unused handles. 4896 // 4897 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4898 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4899 // 4900 // 5. Use an RCU-like mechanism (Read-Copy Update). 4901 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4902 // 4903 // We use (2). 4904 // 4905 // TODO-FIXME: 4906 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4907 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4908 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4909 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4910 // into a single win32 CreateEvent() handle. 4911 // 4912 // _Event transitions in park() 4913 // -1 => -1 : illegal 4914 // 1 => 0 : pass - return immediately 4915 // 0 => -1 : block 4916 // 4917 // _Event serves as a restricted-range semaphore : 4918 // -1 : thread is blocked 4919 // 0 : neutral - thread is running or ready 4920 // 1 : signaled - thread is running or ready 4921 // 4922 // Another possible encoding of _Event would be 4923 // with explicit "PARKED" and "SIGNALED" bits. 4924 4925 int os::PlatformEvent::park (jlong Millis) { 4926 guarantee (_ParkHandle != NULL , "Invariant") ; 4927 guarantee (Millis > 0 , "Invariant") ; 4928 int v ; 4929 4930 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4931 // the initial park() operation. 4932 4933 for (;;) { 4934 v = _Event ; 4935 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4936 } 4937 guarantee ((v == 0) || (v == 1), "invariant") ; 4938 if (v != 0) return OS_OK ; 4939 4940 // Do this the hard way by blocking ... 4941 // TODO: consider a brief spin here, gated on the success of recent 4942 // spin attempts by this thread. 4943 // 4944 // We decompose long timeouts into series of shorter timed waits. 4945 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4946 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4947 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4948 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4949 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4950 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4951 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4952 // for the already waited time. This policy does not admit any new outcomes. 4953 // In the future, however, we might want to track the accumulated wait time and 4954 // adjust Millis accordingly if we encounter a spurious wakeup. 4955 4956 const int MAXTIMEOUT = 0x10000000 ; 4957 DWORD rv = WAIT_TIMEOUT ; 4958 while (_Event < 0 && Millis > 0) { 4959 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) 4960 if (Millis > MAXTIMEOUT) { 4961 prd = MAXTIMEOUT ; 4962 } 4963 rv = ::WaitForSingleObject (_ParkHandle, prd) ; 4964 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; 4965 if (rv == WAIT_TIMEOUT) { 4966 Millis -= prd ; 4967 } 4968 } 4969 v = _Event ; 4970 _Event = 0 ; 4971 // see comment at end of os::PlatformEvent::park() below: 4972 OrderAccess::fence() ; 4973 // If we encounter a nearly simultanous timeout expiry and unpark() 4974 // we return OS_OK indicating we awoke via unpark(). 4975 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4976 return (v >= 0) ? OS_OK : OS_TIMEOUT ; 4977 } 4978 4979 void os::PlatformEvent::park () { 4980 guarantee (_ParkHandle != NULL, "Invariant") ; 4981 // Invariant: Only the thread associated with the Event/PlatformEvent 4982 // may call park(). 4983 int v ; 4984 for (;;) { 4985 v = _Event ; 4986 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4987 } 4988 guarantee ((v == 0) || (v == 1), "invariant") ; 4989 if (v != 0) return ; 4990 4991 // Do this the hard way by blocking ... 4992 // TODO: consider a brief spin here, gated on the success of recent 4993 // spin attempts by this thread. 4994 while (_Event < 0) { 4995 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; 4996 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; 4997 } 4998 4999 // Usually we'll find _Event == 0 at this point, but as 5000 // an optional optimization we clear it, just in case can 5001 // multiple unpark() operations drove _Event up to 1. 5002 _Event = 0 ; 5003 OrderAccess::fence() ; 5004 guarantee (_Event >= 0, "invariant") ; 5005 } 5006 5007 void os::PlatformEvent::unpark() { 5008 guarantee (_ParkHandle != NULL, "Invariant") ; 5009 5010 // Transitions for _Event: 5011 // 0 :=> 1 5012 // 1 :=> 1 5013 // -1 :=> either 0 or 1; must signal target thread 5014 // That is, we can safely transition _Event from -1 to either 5015 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 5016 // unpark() calls. 5017 // See also: "Semaphores in Plan 9" by Mullender & Cox 5018 // 5019 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5020 // that it will take two back-to-back park() calls for the owning 5021 // thread to block. This has the benefit of forcing a spurious return 5022 // from the first park() call after an unpark() call which will help 5023 // shake out uses of park() and unpark() without condition variables. 5024 5025 if (Atomic::xchg(1, &_Event) >= 0) return; 5026 5027 ::SetEvent(_ParkHandle); 5028 } 5029 5030 5031 // JSR166 5032 // ------------------------------------------------------- 5033 5034 /* 5035 * The Windows implementation of Park is very straightforward: Basic 5036 * operations on Win32 Events turn out to have the right semantics to 5037 * use them directly. We opportunistically resuse the event inherited 5038 * from Monitor. 5039 */ 5040 5041 5042 void Parker::park(bool isAbsolute, jlong time) { 5043 guarantee (_ParkEvent != NULL, "invariant") ; 5044 // First, demultiplex/decode time arguments 5045 if (time < 0) { // don't wait 5046 return; 5047 } 5048 else if (time == 0 && !isAbsolute) { 5049 time = INFINITE; 5050 } 5051 else if (isAbsolute) { 5052 time -= os::javaTimeMillis(); // convert to relative time 5053 if (time <= 0) // already elapsed 5054 return; 5055 } 5056 else { // relative 5057 time /= 1000000; // Must coarsen from nanos to millis 5058 if (time == 0) // Wait for the minimal time unit if zero 5059 time = 1; 5060 } 5061 5062 JavaThread* thread = (JavaThread*)(Thread::current()); 5063 assert(thread->is_Java_thread(), "Must be JavaThread"); 5064 JavaThread *jt = (JavaThread *)thread; 5065 5066 // Don't wait if interrupted or already triggered 5067 if (Thread::is_interrupted(thread, false) || 5068 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5069 ResetEvent(_ParkEvent); 5070 return; 5071 } 5072 else { 5073 ThreadBlockInVM tbivm(jt); 5074 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5075 jt->set_suspend_equivalent(); 5076 5077 WaitForSingleObject(_ParkEvent, time); 5078 ResetEvent(_ParkEvent); 5079 5080 // If externally suspended while waiting, re-suspend 5081 if (jt->handle_special_suspend_equivalent_condition()) { 5082 jt->java_suspend_self(); 5083 } 5084 } 5085 } 5086 5087 void Parker::unpark() { 5088 guarantee (_ParkEvent != NULL, "invariant") ; 5089 SetEvent(_ParkEvent); 5090 } 5091 5092 // Run the specified command in a separate process. Return its exit value, 5093 // or -1 on failure (e.g. can't create a new process). 5094 int os::fork_and_exec(char* cmd) { 5095 STARTUPINFO si; 5096 PROCESS_INFORMATION pi; 5097 5098 memset(&si, 0, sizeof(si)); 5099 si.cb = sizeof(si); 5100 memset(&pi, 0, sizeof(pi)); 5101 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5102 cmd, // command line 5103 NULL, // process security attribute 5104 NULL, // thread security attribute 5105 TRUE, // inherits system handles 5106 0, // no creation flags 5107 NULL, // use parent's environment block 5108 NULL, // use parent's starting directory 5109 &si, // (in) startup information 5110 &pi); // (out) process information 5111 5112 if (rslt) { 5113 // Wait until child process exits. 5114 WaitForSingleObject(pi.hProcess, INFINITE); 5115 5116 DWORD exit_code; 5117 GetExitCodeProcess(pi.hProcess, &exit_code); 5118 5119 // Close process and thread handles. 5120 CloseHandle(pi.hProcess); 5121 CloseHandle(pi.hThread); 5122 5123 return (int)exit_code; 5124 } else { 5125 return -1; 5126 } 5127 } 5128 5129 //-------------------------------------------------------------------------------------------------- 5130 // Non-product code 5131 5132 static int mallocDebugIntervalCounter = 0; 5133 static int mallocDebugCounter = 0; 5134 bool os::check_heap(bool force) { 5135 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5136 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5137 // Note: HeapValidate executes two hardware breakpoints when it finds something 5138 // wrong; at these points, eax contains the address of the offending block (I think). 5139 // To get to the exlicit error message(s) below, just continue twice. 5140 HANDLE heap = GetProcessHeap(); 5141 { HeapLock(heap); 5142 PROCESS_HEAP_ENTRY phe; 5143 phe.lpData = NULL; 5144 while (HeapWalk(heap, &phe) != 0) { 5145 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5146 !HeapValidate(heap, 0, phe.lpData)) { 5147 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5148 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5149 fatal("corrupted C heap"); 5150 } 5151 } 5152 DWORD err = GetLastError(); 5153 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5154 fatal(err_msg("heap walk aborted with error %d", err)); 5155 } 5156 HeapUnlock(heap); 5157 } 5158 mallocDebugIntervalCounter = 0; 5159 } 5160 return true; 5161 } 5162 5163 5164 bool os::find(address addr, outputStream* st) { 5165 // Nothing yet 5166 return false; 5167 } 5168 5169 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5170 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5171 5172 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 5173 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5174 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5175 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5176 5177 if (os::is_memory_serialize_page(thread, addr)) 5178 return EXCEPTION_CONTINUE_EXECUTION; 5179 } 5180 5181 return EXCEPTION_CONTINUE_SEARCH; 5182 } 5183 5184 // We don't build a headless jre for Windows 5185 bool os::is_headless_jre() { return false; } 5186 5187 static jint initSock() { 5188 WSADATA wsadata; 5189 5190 if (!os::WinSock2Dll::WinSock2Available()) { 5191 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5192 ::GetLastError()); 5193 return JNI_ERR; 5194 } 5195 5196 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5197 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5198 ::GetLastError()); 5199 return JNI_ERR; 5200 } 5201 return JNI_OK; 5202 } 5203 5204 struct hostent* os::get_host_by_name(char* name) { 5205 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5206 } 5207 5208 int os::socket_close(int fd) { 5209 return ::closesocket(fd); 5210 } 5211 5212 int os::socket_available(int fd, jint *pbytes) { 5213 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5214 return (ret < 0) ? 0 : 1; 5215 } 5216 5217 int os::socket(int domain, int type, int protocol) { 5218 return ::socket(domain, type, protocol); 5219 } 5220 5221 int os::listen(int fd, int count) { 5222 return ::listen(fd, count); 5223 } 5224 5225 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5226 return ::connect(fd, him, len); 5227 } 5228 5229 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5230 return ::accept(fd, him, len); 5231 } 5232 5233 int os::sendto(int fd, char* buf, size_t len, uint flags, 5234 struct sockaddr* to, socklen_t tolen) { 5235 5236 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5237 } 5238 5239 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5240 sockaddr* from, socklen_t* fromlen) { 5241 5242 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5243 } 5244 5245 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5246 return ::recv(fd, buf, (int)nBytes, flags); 5247 } 5248 5249 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5250 return ::send(fd, buf, (int)nBytes, flags); 5251 } 5252 5253 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5254 return ::send(fd, buf, (int)nBytes, flags); 5255 } 5256 5257 int os::timeout(int fd, long timeout) { 5258 fd_set tbl; 5259 struct timeval t; 5260 5261 t.tv_sec = timeout / 1000; 5262 t.tv_usec = (timeout % 1000) * 1000; 5263 5264 tbl.fd_count = 1; 5265 tbl.fd_array[0] = fd; 5266 5267 return ::select(1, &tbl, 0, 0, &t); 5268 } 5269 5270 int os::get_host_name(char* name, int namelen) { 5271 return ::gethostname(name, namelen); 5272 } 5273 5274 int os::socket_shutdown(int fd, int howto) { 5275 return ::shutdown(fd, howto); 5276 } 5277 5278 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5279 return ::bind(fd, him, len); 5280 } 5281 5282 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5283 return ::getsockname(fd, him, len); 5284 } 5285 5286 int os::get_sock_opt(int fd, int level, int optname, 5287 char* optval, socklen_t* optlen) { 5288 return ::getsockopt(fd, level, optname, optval, optlen); 5289 } 5290 5291 int os::set_sock_opt(int fd, int level, int optname, 5292 const char* optval, socklen_t optlen) { 5293 return ::setsockopt(fd, level, optname, optval, optlen); 5294 } 5295 5296 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5297 #if defined(IA32) 5298 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5299 #elif defined (AMD64) 5300 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5301 #endif 5302 5303 // returns true if thread could be suspended, 5304 // false otherwise 5305 static bool do_suspend(HANDLE* h) { 5306 if (h != NULL) { 5307 if (SuspendThread(*h) != ~0) { 5308 return true; 5309 } 5310 } 5311 return false; 5312 } 5313 5314 // resume the thread 5315 // calling resume on an active thread is a no-op 5316 static void do_resume(HANDLE* h) { 5317 if (h != NULL) { 5318 ResumeThread(*h); 5319 } 5320 } 5321 5322 // retrieve a suspend/resume context capable handle 5323 // from the tid. Caller validates handle return value. 5324 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5325 if (h != NULL) { 5326 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5327 } 5328 } 5329 5330 // 5331 // Thread sampling implementation 5332 // 5333 void os::SuspendedThreadTask::internal_do_task() { 5334 CONTEXT ctxt; 5335 HANDLE h = NULL; 5336 5337 // get context capable handle for thread 5338 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5339 5340 // sanity 5341 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5342 return; 5343 } 5344 5345 // suspend the thread 5346 if (do_suspend(&h)) { 5347 ctxt.ContextFlags = sampling_context_flags; 5348 // get thread context 5349 GetThreadContext(h, &ctxt); 5350 SuspendedThreadTaskContext context(_thread, &ctxt); 5351 // pass context to Thread Sampling impl 5352 do_task(context); 5353 // resume thread 5354 do_resume(&h); 5355 } 5356 5357 // close handle 5358 CloseHandle(h); 5359 } 5360 5361 5362 // Kernel32 API 5363 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5364 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5365 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5366 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5367 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5368 5369 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5370 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5371 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5372 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5373 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5374 5375 5376 BOOL os::Kernel32Dll::initialized = FALSE; 5377 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5378 assert(initialized && _GetLargePageMinimum != NULL, 5379 "GetLargePageMinimumAvailable() not yet called"); 5380 return _GetLargePageMinimum(); 5381 } 5382 5383 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5384 if (!initialized) { 5385 initialize(); 5386 } 5387 return _GetLargePageMinimum != NULL; 5388 } 5389 5390 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5391 if (!initialized) { 5392 initialize(); 5393 } 5394 return _VirtualAllocExNuma != NULL; 5395 } 5396 5397 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5398 assert(initialized && _VirtualAllocExNuma != NULL, 5399 "NUMACallsAvailable() not yet called"); 5400 5401 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5402 } 5403 5404 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5405 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5406 "NUMACallsAvailable() not yet called"); 5407 5408 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5409 } 5410 5411 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5412 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5413 "NUMACallsAvailable() not yet called"); 5414 5415 return _GetNumaNodeProcessorMask(node, proc_mask); 5416 } 5417 5418 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5419 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5420 if (!initialized) { 5421 initialize(); 5422 } 5423 5424 if (_RtlCaptureStackBackTrace != NULL) { 5425 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5426 BackTrace, BackTraceHash); 5427 } else { 5428 return 0; 5429 } 5430 } 5431 5432 void os::Kernel32Dll::initializeCommon() { 5433 if (!initialized) { 5434 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5435 assert(handle != NULL, "Just check"); 5436 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5437 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5438 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5439 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5440 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5441 initialized = TRUE; 5442 } 5443 } 5444 5445 5446 5447 #ifndef JDK6_OR_EARLIER 5448 5449 void os::Kernel32Dll::initialize() { 5450 initializeCommon(); 5451 } 5452 5453 5454 // Kernel32 API 5455 inline BOOL os::Kernel32Dll::SwitchToThread() { 5456 return ::SwitchToThread(); 5457 } 5458 5459 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5460 return true; 5461 } 5462 5463 // Help tools 5464 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5465 return true; 5466 } 5467 5468 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5469 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5470 } 5471 5472 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5473 return ::Module32First(hSnapshot, lpme); 5474 } 5475 5476 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5477 return ::Module32Next(hSnapshot, lpme); 5478 } 5479 5480 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5481 ::GetNativeSystemInfo(lpSystemInfo); 5482 } 5483 5484 // PSAPI API 5485 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5486 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5487 } 5488 5489 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5490 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5491 } 5492 5493 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5494 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5495 } 5496 5497 inline BOOL os::PSApiDll::PSApiAvailable() { 5498 return true; 5499 } 5500 5501 5502 // WinSock2 API 5503 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5504 return ::WSAStartup(wVersionRequested, lpWSAData); 5505 } 5506 5507 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5508 return ::gethostbyname(name); 5509 } 5510 5511 inline BOOL os::WinSock2Dll::WinSock2Available() { 5512 return true; 5513 } 5514 5515 // Advapi API 5516 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5517 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5518 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5519 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5520 BufferLength, PreviousState, ReturnLength); 5521 } 5522 5523 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5524 PHANDLE TokenHandle) { 5525 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5526 } 5527 5528 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5529 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5530 } 5531 5532 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5533 return true; 5534 } 5535 5536 void* os::get_default_process_handle() { 5537 return (void*)GetModuleHandle(NULL); 5538 } 5539 5540 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5541 // which is used to find statically linked in agents. 5542 // Additionally for windows, takes into account __stdcall names. 5543 // Parameters: 5544 // sym_name: Symbol in library we are looking for 5545 // lib_name: Name of library to look in, NULL for shared libs. 5546 // is_absolute_path == true if lib_name is absolute path to agent 5547 // such as "C:/a/b/L.dll" 5548 // == false if only the base name of the library is passed in 5549 // such as "L" 5550 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5551 bool is_absolute_path) { 5552 char *agent_entry_name; 5553 size_t len; 5554 size_t name_len; 5555 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5556 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5557 const char *start; 5558 5559 if (lib_name != NULL) { 5560 len = name_len = strlen(lib_name); 5561 if (is_absolute_path) { 5562 // Need to strip path, prefix and suffix 5563 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5564 lib_name = ++start; 5565 } else { 5566 // Need to check for drive prefix 5567 if ((start = strchr(lib_name, ':')) != NULL) { 5568 lib_name = ++start; 5569 } 5570 } 5571 if (len <= (prefix_len + suffix_len)) { 5572 return NULL; 5573 } 5574 lib_name += prefix_len; 5575 name_len = strlen(lib_name) - suffix_len; 5576 } 5577 } 5578 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5579 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5580 if (agent_entry_name == NULL) { 5581 return NULL; 5582 } 5583 if (lib_name != NULL) { 5584 const char *p = strrchr(sym_name, '@'); 5585 if (p != NULL && p != sym_name) { 5586 // sym_name == _Agent_OnLoad@XX 5587 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5588 agent_entry_name[(p-sym_name)] = '\0'; 5589 // agent_entry_name == _Agent_OnLoad 5590 strcat(agent_entry_name, "_"); 5591 strncat(agent_entry_name, lib_name, name_len); 5592 strcat(agent_entry_name, p); 5593 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5594 } else { 5595 strcpy(agent_entry_name, sym_name); 5596 strcat(agent_entry_name, "_"); 5597 strncat(agent_entry_name, lib_name, name_len); 5598 } 5599 } else { 5600 strcpy(agent_entry_name, sym_name); 5601 } 5602 return agent_entry_name; 5603 } 5604 5605 #else 5606 // Kernel32 API 5607 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5608 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5609 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5610 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5611 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5612 5613 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5614 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5615 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5616 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5617 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5618 5619 void os::Kernel32Dll::initialize() { 5620 if (!initialized) { 5621 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5622 assert(handle != NULL, "Just check"); 5623 5624 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5625 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5626 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5627 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5628 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5629 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5630 initializeCommon(); // resolve the functions that always need resolving 5631 5632 initialized = TRUE; 5633 } 5634 } 5635 5636 BOOL os::Kernel32Dll::SwitchToThread() { 5637 assert(initialized && _SwitchToThread != NULL, 5638 "SwitchToThreadAvailable() not yet called"); 5639 return _SwitchToThread(); 5640 } 5641 5642 5643 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5644 if (!initialized) { 5645 initialize(); 5646 } 5647 return _SwitchToThread != NULL; 5648 } 5649 5650 // Help tools 5651 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5652 if (!initialized) { 5653 initialize(); 5654 } 5655 return _CreateToolhelp32Snapshot != NULL && 5656 _Module32First != NULL && 5657 _Module32Next != NULL; 5658 } 5659 5660 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5661 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5662 "HelpToolsAvailable() not yet called"); 5663 5664 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5665 } 5666 5667 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5668 assert(initialized && _Module32First != NULL, 5669 "HelpToolsAvailable() not yet called"); 5670 5671 return _Module32First(hSnapshot, lpme); 5672 } 5673 5674 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5675 assert(initialized && _Module32Next != NULL, 5676 "HelpToolsAvailable() not yet called"); 5677 5678 return _Module32Next(hSnapshot, lpme); 5679 } 5680 5681 5682 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5683 if (!initialized) { 5684 initialize(); 5685 } 5686 return _GetNativeSystemInfo != NULL; 5687 } 5688 5689 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5690 assert(initialized && _GetNativeSystemInfo != NULL, 5691 "GetNativeSystemInfoAvailable() not yet called"); 5692 5693 _GetNativeSystemInfo(lpSystemInfo); 5694 } 5695 5696 // PSAPI API 5697 5698 5699 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5700 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5701 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5702 5703 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5704 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5705 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5706 BOOL os::PSApiDll::initialized = FALSE; 5707 5708 void os::PSApiDll::initialize() { 5709 if (!initialized) { 5710 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5711 if (handle != NULL) { 5712 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5713 "EnumProcessModules"); 5714 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5715 "GetModuleFileNameExA"); 5716 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5717 "GetModuleInformation"); 5718 } 5719 initialized = TRUE; 5720 } 5721 } 5722 5723 5724 5725 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5726 assert(initialized && _EnumProcessModules != NULL, 5727 "PSApiAvailable() not yet called"); 5728 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5729 } 5730 5731 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5732 assert(initialized && _GetModuleFileNameEx != NULL, 5733 "PSApiAvailable() not yet called"); 5734 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5735 } 5736 5737 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5738 assert(initialized && _GetModuleInformation != NULL, 5739 "PSApiAvailable() not yet called"); 5740 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5741 } 5742 5743 BOOL os::PSApiDll::PSApiAvailable() { 5744 if (!initialized) { 5745 initialize(); 5746 } 5747 return _EnumProcessModules != NULL && 5748 _GetModuleFileNameEx != NULL && 5749 _GetModuleInformation != NULL; 5750 } 5751 5752 5753 // WinSock2 API 5754 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5755 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5756 5757 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5758 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5759 BOOL os::WinSock2Dll::initialized = FALSE; 5760 5761 void os::WinSock2Dll::initialize() { 5762 if (!initialized) { 5763 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5764 if (handle != NULL) { 5765 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5766 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5767 } 5768 initialized = TRUE; 5769 } 5770 } 5771 5772 5773 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5774 assert(initialized && _WSAStartup != NULL, 5775 "WinSock2Available() not yet called"); 5776 return _WSAStartup(wVersionRequested, lpWSAData); 5777 } 5778 5779 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5780 assert(initialized && _gethostbyname != NULL, 5781 "WinSock2Available() not yet called"); 5782 return _gethostbyname(name); 5783 } 5784 5785 BOOL os::WinSock2Dll::WinSock2Available() { 5786 if (!initialized) { 5787 initialize(); 5788 } 5789 return _WSAStartup != NULL && 5790 _gethostbyname != NULL; 5791 } 5792 5793 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5794 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5795 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5796 5797 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5798 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5799 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5800 BOOL os::Advapi32Dll::initialized = FALSE; 5801 5802 void os::Advapi32Dll::initialize() { 5803 if (!initialized) { 5804 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5805 if (handle != NULL) { 5806 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5807 "AdjustTokenPrivileges"); 5808 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5809 "OpenProcessToken"); 5810 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5811 "LookupPrivilegeValueA"); 5812 } 5813 initialized = TRUE; 5814 } 5815 } 5816 5817 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5818 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5819 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5820 assert(initialized && _AdjustTokenPrivileges != NULL, 5821 "AdvapiAvailable() not yet called"); 5822 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5823 BufferLength, PreviousState, ReturnLength); 5824 } 5825 5826 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5827 PHANDLE TokenHandle) { 5828 assert(initialized && _OpenProcessToken != NULL, 5829 "AdvapiAvailable() not yet called"); 5830 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5831 } 5832 5833 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5834 assert(initialized && _LookupPrivilegeValue != NULL, 5835 "AdvapiAvailable() not yet called"); 5836 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5837 } 5838 5839 BOOL os::Advapi32Dll::AdvapiAvailable() { 5840 if (!initialized) { 5841 initialize(); 5842 } 5843 return _AdjustTokenPrivileges != NULL && 5844 _OpenProcessToken != NULL && 5845 _LookupPrivilegeValue != NULL; 5846 } 5847 5848 #endif 5849 5850 #ifndef PRODUCT 5851 5852 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5853 // contiguous memory block at a particular address. 5854 // The test first tries to find a good approximate address to allocate at by using the same 5855 // method to allocate some memory at any address. The test then tries to allocate memory in 5856 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5857 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5858 // the previously allocated memory is available for allocation. The only actual failure 5859 // that is reported is when the test tries to allocate at a particular location but gets a 5860 // different valid one. A NULL return value at this point is not considered an error but may 5861 // be legitimate. 5862 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5863 void TestReserveMemorySpecial_test() { 5864 if (!UseLargePages) { 5865 if (VerboseInternalVMTests) { 5866 gclog_or_tty->print("Skipping test because large pages are disabled"); 5867 } 5868 return; 5869 } 5870 // save current value of globals 5871 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5872 bool old_use_numa_interleaving = UseNUMAInterleaving; 5873 5874 // set globals to make sure we hit the correct code path 5875 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5876 5877 // do an allocation at an address selected by the OS to get a good one. 5878 const size_t large_allocation_size = os::large_page_size() * 4; 5879 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5880 if (result == NULL) { 5881 if (VerboseInternalVMTests) { 5882 gclog_or_tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5883 large_allocation_size); 5884 } 5885 } else { 5886 os::release_memory_special(result, large_allocation_size); 5887 5888 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5889 // we managed to get it once. 5890 const size_t expected_allocation_size = os::large_page_size(); 5891 char* expected_location = result + os::large_page_size(); 5892 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5893 if (actual_location == NULL) { 5894 if (VerboseInternalVMTests) { 5895 gclog_or_tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5896 expected_location, large_allocation_size); 5897 } 5898 } else { 5899 // release memory 5900 os::release_memory_special(actual_location, expected_allocation_size); 5901 // only now check, after releasing any memory to avoid any leaks. 5902 assert(actual_location == expected_location, 5903 err_msg("Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5904 expected_location, expected_allocation_size, actual_location)); 5905 } 5906 } 5907 5908 // restore globals 5909 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5910 UseNUMAInterleaving = old_use_numa_interleaving; 5911 } 5912 #endif // PRODUCT 5913