1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "services/attachListener.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/vmError.hpp" 74 75 #ifdef _DEBUG 76 #include <crtdbg.h> 77 #endif 78 79 80 #include <windows.h> 81 #include <sys/types.h> 82 #include <sys/stat.h> 83 #include <sys/timeb.h> 84 #include <objidl.h> 85 #include <shlobj.h> 86 87 #include <malloc.h> 88 #include <signal.h> 89 #include <direct.h> 90 #include <errno.h> 91 #include <fcntl.h> 92 #include <io.h> 93 #include <process.h> // For _beginthreadex(), _endthreadex() 94 #include <imagehlp.h> // For os::dll_address_to_function_name 95 // for enumerating dll libraries 96 #include <vdmdbg.h> 97 98 // for timer info max values which include all bits 99 #define ALL_64_BITS CONST64(-1) 100 101 // For DLL loading/load error detection 102 // Values of PE COFF 103 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 104 #define IMAGE_FILE_SIGNATURE_LENGTH 4 105 106 static HANDLE main_process; 107 static HANDLE main_thread; 108 static int main_thread_id; 109 110 static FILETIME process_creation_time; 111 static FILETIME process_exit_time; 112 static FILETIME process_user_time; 113 static FILETIME process_kernel_time; 114 115 #ifdef _M_IA64 116 #define __CPU__ ia64 117 #elif _M_AMD64 118 #define __CPU__ amd64 119 #else 120 #define __CPU__ i486 121 #endif 122 123 // save DLL module handle, used by GetModuleFileName 124 125 HINSTANCE vm_lib_handle; 126 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 128 switch (reason) { 129 case DLL_PROCESS_ATTACH: 130 vm_lib_handle = hinst; 131 if (ForceTimeHighResolution) { 132 timeBeginPeriod(1L); 133 } 134 break; 135 case DLL_PROCESS_DETACH: 136 if (ForceTimeHighResolution) { 137 timeEndPeriod(1L); 138 } 139 break; 140 default: 141 break; 142 } 143 return true; 144 } 145 146 static inline double fileTimeAsDouble(FILETIME* time) { 147 const double high = (double) ((unsigned int) ~0); 148 const double split = 10000000.0; 149 double result = (time->dwLowDateTime / split) + 150 time->dwHighDateTime * (high/split); 151 return result; 152 } 153 154 // Implementation of os 155 156 bool os::getenv(const char* name, char* buffer, int len) { 157 int result = GetEnvironmentVariable(name, buffer, len); 158 return result > 0 && result < len; 159 } 160 161 bool os::unsetenv(const char* name) { 162 assert(name != NULL, "Null pointer"); 163 return (SetEnvironmentVariable(name, NULL) == TRUE); 164 } 165 166 // No setuid programs under Windows. 167 bool os::have_special_privileges() { 168 return false; 169 } 170 171 172 // This method is a periodic task to check for misbehaving JNI applications 173 // under CheckJNI, we can add any periodic checks here. 174 // For Windows at the moment does nothing 175 void os::run_periodic_checks() { 176 return; 177 } 178 179 // previous UnhandledExceptionFilter, if there is one 180 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 181 182 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 183 184 void os::init_system_properties_values() { 185 // sysclasspath, java_home, dll_dir 186 { 187 char *home_path; 188 char *dll_path; 189 char *pslash; 190 char *bin = "\\bin"; 191 char home_dir[MAX_PATH]; 192 193 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 194 os::jvm_path(home_dir, sizeof(home_dir)); 195 // Found the full path to jvm.dll. 196 // Now cut the path to <java_home>/jre if we can. 197 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 198 pslash = strrchr(home_dir, '\\'); 199 if (pslash != NULL) { 200 *pslash = '\0'; // get rid of \{client|server} 201 pslash = strrchr(home_dir, '\\'); 202 if (pslash != NULL) { 203 *pslash = '\0'; // get rid of \bin 204 } 205 } 206 } 207 208 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 209 if (home_path == NULL) { 210 return; 211 } 212 strcpy(home_path, home_dir); 213 Arguments::set_java_home(home_path); 214 FREE_C_HEAP_ARRAY(char, home_path); 215 216 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 217 mtInternal); 218 if (dll_path == NULL) { 219 return; 220 } 221 strcpy(dll_path, home_dir); 222 strcat(dll_path, bin); 223 Arguments::set_dll_dir(dll_path); 224 FREE_C_HEAP_ARRAY(char, dll_path); 225 226 if (!set_boot_path('\\', ';')) { 227 return; 228 } 229 } 230 231 // library_path 232 #define EXT_DIR "\\lib\\ext" 233 #define BIN_DIR "\\bin" 234 #define PACKAGE_DIR "\\Sun\\Java" 235 { 236 // Win32 library search order (See the documentation for LoadLibrary): 237 // 238 // 1. The directory from which application is loaded. 239 // 2. The system wide Java Extensions directory (Java only) 240 // 3. System directory (GetSystemDirectory) 241 // 4. Windows directory (GetWindowsDirectory) 242 // 5. The PATH environment variable 243 // 6. The current directory 244 245 char *library_path; 246 char tmp[MAX_PATH]; 247 char *path_str = ::getenv("PATH"); 248 249 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 250 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 251 252 library_path[0] = '\0'; 253 254 GetModuleFileName(NULL, tmp, sizeof(tmp)); 255 *(strrchr(tmp, '\\')) = '\0'; 256 strcat(library_path, tmp); 257 258 GetWindowsDirectory(tmp, sizeof(tmp)); 259 strcat(library_path, ";"); 260 strcat(library_path, tmp); 261 strcat(library_path, PACKAGE_DIR BIN_DIR); 262 263 GetSystemDirectory(tmp, sizeof(tmp)); 264 strcat(library_path, ";"); 265 strcat(library_path, tmp); 266 267 GetWindowsDirectory(tmp, sizeof(tmp)); 268 strcat(library_path, ";"); 269 strcat(library_path, tmp); 270 271 if (path_str) { 272 strcat(library_path, ";"); 273 strcat(library_path, path_str); 274 } 275 276 strcat(library_path, ";."); 277 278 Arguments::set_library_path(library_path); 279 FREE_C_HEAP_ARRAY(char, library_path); 280 } 281 282 // Default extensions directory 283 { 284 char path[MAX_PATH]; 285 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 286 GetWindowsDirectory(path, MAX_PATH); 287 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 288 path, PACKAGE_DIR, EXT_DIR); 289 Arguments::set_ext_dirs(buf); 290 } 291 #undef EXT_DIR 292 #undef BIN_DIR 293 #undef PACKAGE_DIR 294 295 #ifndef _WIN64 296 // set our UnhandledExceptionFilter and save any previous one 297 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 298 #endif 299 300 // Done 301 return; 302 } 303 304 void os::breakpoint() { 305 DebugBreak(); 306 } 307 308 // Invoked from the BREAKPOINT Macro 309 extern "C" void breakpoint() { 310 os::breakpoint(); 311 } 312 313 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 314 // So far, this method is only used by Native Memory Tracking, which is 315 // only supported on Windows XP or later. 316 // 317 int os::get_native_stack(address* stack, int frames, int toSkip) { 318 #ifdef _NMT_NOINLINE_ 319 toSkip++; 320 #endif 321 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 322 (PVOID*)stack, NULL); 323 for (int index = captured; index < frames; index ++) { 324 stack[index] = NULL; 325 } 326 return captured; 327 } 328 329 330 // os::current_stack_base() 331 // 332 // Returns the base of the stack, which is the stack's 333 // starting address. This function must be called 334 // while running on the stack of the thread being queried. 335 336 address os::current_stack_base() { 337 MEMORY_BASIC_INFORMATION minfo; 338 address stack_bottom; 339 size_t stack_size; 340 341 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 342 stack_bottom = (address)minfo.AllocationBase; 343 stack_size = minfo.RegionSize; 344 345 // Add up the sizes of all the regions with the same 346 // AllocationBase. 347 while (1) { 348 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 349 if (stack_bottom == (address)minfo.AllocationBase) { 350 stack_size += minfo.RegionSize; 351 } else { 352 break; 353 } 354 } 355 356 #ifdef _M_IA64 357 // IA64 has memory and register stacks 358 // 359 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 360 // at thread creation (1MB backing store growing upwards, 1MB memory stack 361 // growing downwards, 2MB summed up) 362 // 363 // ... 364 // ------- top of stack (high address) ----- 365 // | 366 // | 1MB 367 // | Backing Store (Register Stack) 368 // | 369 // | / \ 370 // | | 371 // | | 372 // | | 373 // ------------------------ stack base ----- 374 // | 1MB 375 // | Memory Stack 376 // | 377 // | | 378 // | | 379 // | | 380 // | \ / 381 // | 382 // ----- bottom of stack (low address) ----- 383 // ... 384 385 stack_size = stack_size / 2; 386 #endif 387 return stack_bottom + stack_size; 388 } 389 390 size_t os::current_stack_size() { 391 size_t sz; 392 MEMORY_BASIC_INFORMATION minfo; 393 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 394 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 395 return sz; 396 } 397 398 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 399 const struct tm* time_struct_ptr = localtime(clock); 400 if (time_struct_ptr != NULL) { 401 *res = *time_struct_ptr; 402 return res; 403 } 404 return NULL; 405 } 406 407 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 408 409 // Thread start routine for all new Java threads 410 static unsigned __stdcall java_start(Thread* thread) { 411 // Try to randomize the cache line index of hot stack frames. 412 // This helps when threads of the same stack traces evict each other's 413 // cache lines. The threads can be either from the same JVM instance, or 414 // from different JVM instances. The benefit is especially true for 415 // processors with hyperthreading technology. 416 static int counter = 0; 417 int pid = os::current_process_id(); 418 _alloca(((pid ^ counter++) & 7) * 128); 419 420 OSThread* osthr = thread->osthread(); 421 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 422 423 if (UseNUMA) { 424 int lgrp_id = os::numa_get_group_id(); 425 if (lgrp_id != -1) { 426 thread->set_lgrp_id(lgrp_id); 427 } 428 } 429 430 // Diagnostic code to investigate JDK-6573254 431 int res = 30115; // non-java thread 432 if (thread->is_Java_thread()) { 433 res = 20115; // java thread 434 } 435 436 // Install a win32 structured exception handler around every thread created 437 // by VM, so VM can generate error dump when an exception occurred in non- 438 // Java thread (e.g. VM thread). 439 __try { 440 thread->run(); 441 } __except(topLevelExceptionFilter( 442 (_EXCEPTION_POINTERS*)_exception_info())) { 443 // Nothing to do. 444 } 445 446 // One less thread is executing 447 // When the VMThread gets here, the main thread may have already exited 448 // which frees the CodeHeap containing the Atomic::add code 449 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 450 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 451 } 452 453 // Thread must not return from exit_process_or_thread(), but if it does, 454 // let it proceed to exit normally 455 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 456 } 457 458 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 459 int thread_id) { 460 // Allocate the OSThread object 461 OSThread* osthread = new OSThread(NULL, NULL); 462 if (osthread == NULL) return NULL; 463 464 // Initialize support for Java interrupts 465 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 466 if (interrupt_event == NULL) { 467 delete osthread; 468 return NULL; 469 } 470 osthread->set_interrupt_event(interrupt_event); 471 472 // Store info on the Win32 thread into the OSThread 473 osthread->set_thread_handle(thread_handle); 474 osthread->set_thread_id(thread_id); 475 476 if (UseNUMA) { 477 int lgrp_id = os::numa_get_group_id(); 478 if (lgrp_id != -1) { 479 thread->set_lgrp_id(lgrp_id); 480 } 481 } 482 483 // Initial thread state is INITIALIZED, not SUSPENDED 484 osthread->set_state(INITIALIZED); 485 486 return osthread; 487 } 488 489 490 bool os::create_attached_thread(JavaThread* thread) { 491 #ifdef ASSERT 492 thread->verify_not_published(); 493 #endif 494 HANDLE thread_h; 495 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 496 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 497 fatal("DuplicateHandle failed\n"); 498 } 499 OSThread* osthread = create_os_thread(thread, thread_h, 500 (int)current_thread_id()); 501 if (osthread == NULL) { 502 return false; 503 } 504 505 // Initial thread state is RUNNABLE 506 osthread->set_state(RUNNABLE); 507 508 thread->set_osthread(osthread); 509 return true; 510 } 511 512 bool os::create_main_thread(JavaThread* thread) { 513 #ifdef ASSERT 514 thread->verify_not_published(); 515 #endif 516 if (_starting_thread == NULL) { 517 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 518 if (_starting_thread == NULL) { 519 return false; 520 } 521 } 522 523 // The primordial thread is runnable from the start) 524 _starting_thread->set_state(RUNNABLE); 525 526 thread->set_osthread(_starting_thread); 527 return true; 528 } 529 530 // Allocate and initialize a new OSThread 531 bool os::create_thread(Thread* thread, ThreadType thr_type, 532 size_t stack_size) { 533 unsigned thread_id; 534 535 // Allocate the OSThread object 536 OSThread* osthread = new OSThread(NULL, NULL); 537 if (osthread == NULL) { 538 return false; 539 } 540 541 // Initialize support for Java interrupts 542 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 543 if (interrupt_event == NULL) { 544 delete osthread; 545 return NULL; 546 } 547 osthread->set_interrupt_event(interrupt_event); 548 osthread->set_interrupted(false); 549 550 thread->set_osthread(osthread); 551 552 if (stack_size == 0) { 553 switch (thr_type) { 554 case os::java_thread: 555 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 556 if (JavaThread::stack_size_at_create() > 0) { 557 stack_size = JavaThread::stack_size_at_create(); 558 } 559 break; 560 case os::compiler_thread: 561 if (CompilerThreadStackSize > 0) { 562 stack_size = (size_t)(CompilerThreadStackSize * K); 563 break; 564 } // else fall through: 565 // use VMThreadStackSize if CompilerThreadStackSize is not defined 566 case os::vm_thread: 567 case os::pgc_thread: 568 case os::cgc_thread: 569 case os::watcher_thread: 570 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 571 break; 572 } 573 } 574 575 // Create the Win32 thread 576 // 577 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 578 // does not specify stack size. Instead, it specifies the size of 579 // initially committed space. The stack size is determined by 580 // PE header in the executable. If the committed "stack_size" is larger 581 // than default value in the PE header, the stack is rounded up to the 582 // nearest multiple of 1MB. For example if the launcher has default 583 // stack size of 320k, specifying any size less than 320k does not 584 // affect the actual stack size at all, it only affects the initial 585 // commitment. On the other hand, specifying 'stack_size' larger than 586 // default value may cause significant increase in memory usage, because 587 // not only the stack space will be rounded up to MB, but also the 588 // entire space is committed upfront. 589 // 590 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 591 // for CreateThread() that can treat 'stack_size' as stack size. However we 592 // are not supposed to call CreateThread() directly according to MSDN 593 // document because JVM uses C runtime library. The good news is that the 594 // flag appears to work with _beginthredex() as well. 595 596 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 597 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 598 #endif 599 600 HANDLE thread_handle = 601 (HANDLE)_beginthreadex(NULL, 602 (unsigned)stack_size, 603 (unsigned (__stdcall *)(void*)) java_start, 604 thread, 605 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 606 &thread_id); 607 if (thread_handle == NULL) { 608 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 609 // without the flag. 610 thread_handle = 611 (HANDLE)_beginthreadex(NULL, 612 (unsigned)stack_size, 613 (unsigned (__stdcall *)(void*)) java_start, 614 thread, 615 CREATE_SUSPENDED, 616 &thread_id); 617 } 618 if (thread_handle == NULL) { 619 // Need to clean up stuff we've allocated so far 620 CloseHandle(osthread->interrupt_event()); 621 thread->set_osthread(NULL); 622 delete osthread; 623 return NULL; 624 } 625 626 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 627 628 // Store info on the Win32 thread into the OSThread 629 osthread->set_thread_handle(thread_handle); 630 osthread->set_thread_id(thread_id); 631 632 // Initial thread state is INITIALIZED, not SUSPENDED 633 osthread->set_state(INITIALIZED); 634 635 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 636 return true; 637 } 638 639 640 // Free Win32 resources related to the OSThread 641 void os::free_thread(OSThread* osthread) { 642 assert(osthread != NULL, "osthread not set"); 643 CloseHandle(osthread->thread_handle()); 644 CloseHandle(osthread->interrupt_event()); 645 delete osthread; 646 } 647 648 static jlong first_filetime; 649 static jlong initial_performance_count; 650 static jlong performance_frequency; 651 652 653 jlong as_long(LARGE_INTEGER x) { 654 jlong result = 0; // initialization to avoid warning 655 set_high(&result, x.HighPart); 656 set_low(&result, x.LowPart); 657 return result; 658 } 659 660 661 jlong os::elapsed_counter() { 662 LARGE_INTEGER count; 663 if (win32::_has_performance_count) { 664 QueryPerformanceCounter(&count); 665 return as_long(count) - initial_performance_count; 666 } else { 667 FILETIME wt; 668 GetSystemTimeAsFileTime(&wt); 669 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 670 } 671 } 672 673 674 jlong os::elapsed_frequency() { 675 if (win32::_has_performance_count) { 676 return performance_frequency; 677 } else { 678 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 679 return 10000000; 680 } 681 } 682 683 684 julong os::available_memory() { 685 return win32::available_memory(); 686 } 687 688 julong os::win32::available_memory() { 689 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 690 // value if total memory is larger than 4GB 691 MEMORYSTATUSEX ms; 692 ms.dwLength = sizeof(ms); 693 GlobalMemoryStatusEx(&ms); 694 695 return (julong)ms.ullAvailPhys; 696 } 697 698 julong os::physical_memory() { 699 return win32::physical_memory(); 700 } 701 702 bool os::has_allocatable_memory_limit(julong* limit) { 703 MEMORYSTATUSEX ms; 704 ms.dwLength = sizeof(ms); 705 GlobalMemoryStatusEx(&ms); 706 #ifdef _LP64 707 *limit = (julong)ms.ullAvailVirtual; 708 return true; 709 #else 710 // Limit to 1400m because of the 2gb address space wall 711 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 712 return true; 713 #endif 714 } 715 716 // VC6 lacks DWORD_PTR 717 #if _MSC_VER < 1300 718 typedef UINT_PTR DWORD_PTR; 719 #endif 720 721 int os::active_processor_count() { 722 DWORD_PTR lpProcessAffinityMask = 0; 723 DWORD_PTR lpSystemAffinityMask = 0; 724 int proc_count = processor_count(); 725 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 726 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 727 // Nof active processors is number of bits in process affinity mask 728 int bitcount = 0; 729 while (lpProcessAffinityMask != 0) { 730 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 731 bitcount++; 732 } 733 return bitcount; 734 } else { 735 return proc_count; 736 } 737 } 738 739 void os::set_native_thread_name(const char *name) { 740 741 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 742 // 743 // Note that unfortunately this only works if the process 744 // is already attached to a debugger; debugger must observe 745 // the exception below to show the correct name. 746 747 const DWORD MS_VC_EXCEPTION = 0x406D1388; 748 struct { 749 DWORD dwType; // must be 0x1000 750 LPCSTR szName; // pointer to name (in user addr space) 751 DWORD dwThreadID; // thread ID (-1=caller thread) 752 DWORD dwFlags; // reserved for future use, must be zero 753 } info; 754 755 info.dwType = 0x1000; 756 info.szName = name; 757 info.dwThreadID = -1; 758 info.dwFlags = 0; 759 760 __try { 761 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 762 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 763 } 764 765 bool os::distribute_processes(uint length, uint* distribution) { 766 // Not yet implemented. 767 return false; 768 } 769 770 bool os::bind_to_processor(uint processor_id) { 771 // Not yet implemented. 772 return false; 773 } 774 775 void os::win32::initialize_performance_counter() { 776 LARGE_INTEGER count; 777 if (QueryPerformanceFrequency(&count)) { 778 win32::_has_performance_count = 1; 779 performance_frequency = as_long(count); 780 QueryPerformanceCounter(&count); 781 initial_performance_count = as_long(count); 782 } else { 783 win32::_has_performance_count = 0; 784 FILETIME wt; 785 GetSystemTimeAsFileTime(&wt); 786 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 787 } 788 } 789 790 791 double os::elapsedTime() { 792 return (double) elapsed_counter() / (double) elapsed_frequency(); 793 } 794 795 796 // Windows format: 797 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 798 // Java format: 799 // Java standards require the number of milliseconds since 1/1/1970 800 801 // Constant offset - calculated using offset() 802 static jlong _offset = 116444736000000000; 803 // Fake time counter for reproducible results when debugging 804 static jlong fake_time = 0; 805 806 #ifdef ASSERT 807 // Just to be safe, recalculate the offset in debug mode 808 static jlong _calculated_offset = 0; 809 static int _has_calculated_offset = 0; 810 811 jlong offset() { 812 if (_has_calculated_offset) return _calculated_offset; 813 SYSTEMTIME java_origin; 814 java_origin.wYear = 1970; 815 java_origin.wMonth = 1; 816 java_origin.wDayOfWeek = 0; // ignored 817 java_origin.wDay = 1; 818 java_origin.wHour = 0; 819 java_origin.wMinute = 0; 820 java_origin.wSecond = 0; 821 java_origin.wMilliseconds = 0; 822 FILETIME jot; 823 if (!SystemTimeToFileTime(&java_origin, &jot)) { 824 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 825 } 826 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 827 _has_calculated_offset = 1; 828 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 829 return _calculated_offset; 830 } 831 #else 832 jlong offset() { 833 return _offset; 834 } 835 #endif 836 837 jlong windows_to_java_time(FILETIME wt) { 838 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 839 return (a - offset()) / 10000; 840 } 841 842 // Returns time ticks in (10th of micro seconds) 843 jlong windows_to_time_ticks(FILETIME wt) { 844 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 845 return (a - offset()); 846 } 847 848 FILETIME java_to_windows_time(jlong l) { 849 jlong a = (l * 10000) + offset(); 850 FILETIME result; 851 result.dwHighDateTime = high(a); 852 result.dwLowDateTime = low(a); 853 return result; 854 } 855 856 bool os::supports_vtime() { return true; } 857 bool os::enable_vtime() { return false; } 858 bool os::vtime_enabled() { return false; } 859 860 double os::elapsedVTime() { 861 FILETIME created; 862 FILETIME exited; 863 FILETIME kernel; 864 FILETIME user; 865 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 866 // the resolution of windows_to_java_time() should be sufficient (ms) 867 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 868 } else { 869 return elapsedTime(); 870 } 871 } 872 873 jlong os::javaTimeMillis() { 874 if (UseFakeTimers) { 875 return fake_time++; 876 } else { 877 FILETIME wt; 878 GetSystemTimeAsFileTime(&wt); 879 return windows_to_java_time(wt); 880 } 881 } 882 883 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 884 FILETIME wt; 885 GetSystemTimeAsFileTime(&wt); 886 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 887 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 888 seconds = secs; 889 nanos = jlong(ticks - (secs*10000000)) * 100; 890 } 891 892 jlong os::javaTimeNanos() { 893 if (!win32::_has_performance_count) { 894 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 895 } else { 896 LARGE_INTEGER current_count; 897 QueryPerformanceCounter(¤t_count); 898 double current = as_long(current_count); 899 double freq = performance_frequency; 900 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 901 return time; 902 } 903 } 904 905 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 906 if (!win32::_has_performance_count) { 907 // javaTimeMillis() doesn't have much percision, 908 // but it is not going to wrap -- so all 64 bits 909 info_ptr->max_value = ALL_64_BITS; 910 911 // this is a wall clock timer, so may skip 912 info_ptr->may_skip_backward = true; 913 info_ptr->may_skip_forward = true; 914 } else { 915 jlong freq = performance_frequency; 916 if (freq < NANOSECS_PER_SEC) { 917 // the performance counter is 64 bits and we will 918 // be multiplying it -- so no wrap in 64 bits 919 info_ptr->max_value = ALL_64_BITS; 920 } else if (freq > NANOSECS_PER_SEC) { 921 // use the max value the counter can reach to 922 // determine the max value which could be returned 923 julong max_counter = (julong)ALL_64_BITS; 924 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 925 } else { 926 // the performance counter is 64 bits and we will 927 // be using it directly -- so no wrap in 64 bits 928 info_ptr->max_value = ALL_64_BITS; 929 } 930 931 // using a counter, so no skipping 932 info_ptr->may_skip_backward = false; 933 info_ptr->may_skip_forward = false; 934 } 935 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 936 } 937 938 char* os::local_time_string(char *buf, size_t buflen) { 939 SYSTEMTIME st; 940 GetLocalTime(&st); 941 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 942 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 943 return buf; 944 } 945 946 bool os::getTimesSecs(double* process_real_time, 947 double* process_user_time, 948 double* process_system_time) { 949 HANDLE h_process = GetCurrentProcess(); 950 FILETIME create_time, exit_time, kernel_time, user_time; 951 BOOL result = GetProcessTimes(h_process, 952 &create_time, 953 &exit_time, 954 &kernel_time, 955 &user_time); 956 if (result != 0) { 957 FILETIME wt; 958 GetSystemTimeAsFileTime(&wt); 959 jlong rtc_millis = windows_to_java_time(wt); 960 jlong user_millis = windows_to_java_time(user_time); 961 jlong system_millis = windows_to_java_time(kernel_time); 962 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 963 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 964 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 965 return true; 966 } else { 967 return false; 968 } 969 } 970 971 void os::shutdown() { 972 // allow PerfMemory to attempt cleanup of any persistent resources 973 perfMemory_exit(); 974 975 // flush buffered output, finish log files 976 ostream_abort(); 977 978 // Check for abort hook 979 abort_hook_t abort_hook = Arguments::abort_hook(); 980 if (abort_hook != NULL) { 981 abort_hook(); 982 } 983 } 984 985 986 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 987 PMINIDUMP_EXCEPTION_INFORMATION, 988 PMINIDUMP_USER_STREAM_INFORMATION, 989 PMINIDUMP_CALLBACK_INFORMATION); 990 991 void os::check_create_dump_limit(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 992 // First presume we can dump core file, detail checking done in abort() 993 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", get_current_directory(NULL, 0), current_process_id()); 994 VMError::report_coredump_status(buffer, true); 995 } 996 997 void os::abort(bool dump_core, void* exceptionRecord, void* contextRecord) { 998 HINSTANCE dbghelp; 999 EXCEPTION_POINTERS ep; 1000 MINIDUMP_EXCEPTION_INFORMATION mei; 1001 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1002 1003 HANDLE hProcess = GetCurrentProcess(); 1004 DWORD processId = GetCurrentProcessId(); 1005 HANDLE dumpFile; 1006 MINIDUMP_TYPE dumpType; 1007 static const char* cwd; 1008 static char buffer[O_BUFLEN]; 1009 1010 os::shutdown(); 1011 if (!dump_core) return; 1012 1013 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 1014 #ifndef ASSERT 1015 // If running on a client version of Windows and user has not explicitly enabled dumping 1016 if (!os::win32::is_windows_server() && !CreateCoredumpOnCrash) { 1017 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 1018 return; 1019 // If running on a server version of Windows and user has explictly disabled dumping 1020 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 1021 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 1022 return; 1023 } 1024 #else 1025 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 1026 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 1027 return; 1028 } 1029 #endif 1030 1031 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1032 1033 if (dbghelp == NULL) { 1034 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 1035 return; 1036 } 1037 1038 _MiniDumpWriteDump = 1039 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1040 PMINIDUMP_EXCEPTION_INFORMATION, 1041 PMINIDUMP_USER_STREAM_INFORMATION, 1042 PMINIDUMP_CALLBACK_INFORMATION), 1043 GetProcAddress(dbghelp, 1044 "MiniDumpWriteDump")); 1045 1046 if (_MiniDumpWriteDump == NULL) { 1047 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1048 return; 1049 } 1050 1051 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1052 1053 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1054 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1055 #if API_VERSION_NUMBER >= 11 1056 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1057 MiniDumpWithUnloadedModules); 1058 #endif 1059 1060 cwd = get_current_directory(NULL, 0); 1061 jio_snprintf(buffer, sizeof(buffer), "%s\\hs_err_pid%u.mdmp", cwd, current_process_id()); 1062 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1063 1064 if (dumpFile == INVALID_HANDLE_VALUE) { 1065 VMError::report_coredump_status("Failed to create file for dumping", false); 1066 return; 1067 } 1068 1069 if (exceptionRecord != NULL && contextRecord != NULL) { 1070 ep.ContextRecord = (PCONTEXT) contextRecord; 1071 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1072 1073 mei.ThreadId = GetCurrentThreadId(); 1074 mei.ExceptionPointers = &ep; 1075 pmei = &mei; 1076 } else { 1077 pmei = NULL; 1078 } 1079 1080 1081 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1082 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1083 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1084 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1085 DWORD error = GetLastError(); 1086 LPTSTR msgbuf = NULL; 1087 1088 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1089 FORMAT_MESSAGE_FROM_SYSTEM | 1090 FORMAT_MESSAGE_IGNORE_INSERTS, 1091 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1092 1093 jio_snprintf(buffer, sizeof(buffer), "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1094 LocalFree(msgbuf); 1095 } else { 1096 // Call to FormatMessage failed, just include the result from GetLastError 1097 jio_snprintf(buffer, sizeof(buffer), "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1098 } 1099 VMError::report_coredump_status(buffer, false); 1100 } else { 1101 VMError::report_coredump_status(buffer, true); 1102 } 1103 1104 CloseHandle(dumpFile); 1105 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1106 } 1107 1108 // Die immediately, no exit hook, no abort hook, no cleanup. 1109 void os::die() { 1110 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1111 } 1112 1113 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1114 // * dirent_md.c 1.15 00/02/02 1115 // 1116 // The declarations for DIR and struct dirent are in jvm_win32.h. 1117 1118 // Caller must have already run dirname through JVM_NativePath, which removes 1119 // duplicate slashes and converts all instances of '/' into '\\'. 1120 1121 DIR * os::opendir(const char *dirname) { 1122 assert(dirname != NULL, "just checking"); // hotspot change 1123 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1124 DWORD fattr; // hotspot change 1125 char alt_dirname[4] = { 0, 0, 0, 0 }; 1126 1127 if (dirp == 0) { 1128 errno = ENOMEM; 1129 return 0; 1130 } 1131 1132 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1133 // as a directory in FindFirstFile(). We detect this case here and 1134 // prepend the current drive name. 1135 // 1136 if (dirname[1] == '\0' && dirname[0] == '\\') { 1137 alt_dirname[0] = _getdrive() + 'A' - 1; 1138 alt_dirname[1] = ':'; 1139 alt_dirname[2] = '\\'; 1140 alt_dirname[3] = '\0'; 1141 dirname = alt_dirname; 1142 } 1143 1144 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1145 if (dirp->path == 0) { 1146 free(dirp); 1147 errno = ENOMEM; 1148 return 0; 1149 } 1150 strcpy(dirp->path, dirname); 1151 1152 fattr = GetFileAttributes(dirp->path); 1153 if (fattr == 0xffffffff) { 1154 free(dirp->path); 1155 free(dirp); 1156 errno = ENOENT; 1157 return 0; 1158 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1159 free(dirp->path); 1160 free(dirp); 1161 errno = ENOTDIR; 1162 return 0; 1163 } 1164 1165 // Append "*.*", or possibly "\\*.*", to path 1166 if (dirp->path[1] == ':' && 1167 (dirp->path[2] == '\0' || 1168 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1169 // No '\\' needed for cases like "Z:" or "Z:\" 1170 strcat(dirp->path, "*.*"); 1171 } else { 1172 strcat(dirp->path, "\\*.*"); 1173 } 1174 1175 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1176 if (dirp->handle == INVALID_HANDLE_VALUE) { 1177 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1178 free(dirp->path); 1179 free(dirp); 1180 errno = EACCES; 1181 return 0; 1182 } 1183 } 1184 return dirp; 1185 } 1186 1187 // parameter dbuf unused on Windows 1188 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1189 assert(dirp != NULL, "just checking"); // hotspot change 1190 if (dirp->handle == INVALID_HANDLE_VALUE) { 1191 return 0; 1192 } 1193 1194 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1195 1196 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1197 if (GetLastError() == ERROR_INVALID_HANDLE) { 1198 errno = EBADF; 1199 return 0; 1200 } 1201 FindClose(dirp->handle); 1202 dirp->handle = INVALID_HANDLE_VALUE; 1203 } 1204 1205 return &dirp->dirent; 1206 } 1207 1208 int os::closedir(DIR *dirp) { 1209 assert(dirp != NULL, "just checking"); // hotspot change 1210 if (dirp->handle != INVALID_HANDLE_VALUE) { 1211 if (!FindClose(dirp->handle)) { 1212 errno = EBADF; 1213 return -1; 1214 } 1215 dirp->handle = INVALID_HANDLE_VALUE; 1216 } 1217 free(dirp->path); 1218 free(dirp); 1219 return 0; 1220 } 1221 1222 // This must be hard coded because it's the system's temporary 1223 // directory not the java application's temp directory, ala java.io.tmpdir. 1224 const char* os::get_temp_directory() { 1225 static char path_buf[MAX_PATH]; 1226 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1227 return path_buf; 1228 } else { 1229 path_buf[0] = '\0'; 1230 return path_buf; 1231 } 1232 } 1233 1234 static bool file_exists(const char* filename) { 1235 if (filename == NULL || strlen(filename) == 0) { 1236 return false; 1237 } 1238 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1239 } 1240 1241 bool os::dll_build_name(char *buffer, size_t buflen, 1242 const char* pname, const char* fname) { 1243 bool retval = false; 1244 const size_t pnamelen = pname ? strlen(pname) : 0; 1245 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1246 1247 // Return error on buffer overflow. 1248 if (pnamelen + strlen(fname) + 10 > buflen) { 1249 return retval; 1250 } 1251 1252 if (pnamelen == 0) { 1253 jio_snprintf(buffer, buflen, "%s.dll", fname); 1254 retval = true; 1255 } else if (c == ':' || c == '\\') { 1256 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1257 retval = true; 1258 } else if (strchr(pname, *os::path_separator()) != NULL) { 1259 int n; 1260 char** pelements = split_path(pname, &n); 1261 if (pelements == NULL) { 1262 return false; 1263 } 1264 for (int i = 0; i < n; i++) { 1265 char* path = pelements[i]; 1266 // Really shouldn't be NULL, but check can't hurt 1267 size_t plen = (path == NULL) ? 0 : strlen(path); 1268 if (plen == 0) { 1269 continue; // skip the empty path values 1270 } 1271 const char lastchar = path[plen - 1]; 1272 if (lastchar == ':' || lastchar == '\\') { 1273 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1274 } else { 1275 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1276 } 1277 if (file_exists(buffer)) { 1278 retval = true; 1279 break; 1280 } 1281 } 1282 // release the storage 1283 for (int i = 0; i < n; i++) { 1284 if (pelements[i] != NULL) { 1285 FREE_C_HEAP_ARRAY(char, pelements[i]); 1286 } 1287 } 1288 if (pelements != NULL) { 1289 FREE_C_HEAP_ARRAY(char*, pelements); 1290 } 1291 } else { 1292 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1293 retval = true; 1294 } 1295 return retval; 1296 } 1297 1298 // Needs to be in os specific directory because windows requires another 1299 // header file <direct.h> 1300 const char* os::get_current_directory(char *buf, size_t buflen) { 1301 int n = static_cast<int>(buflen); 1302 if (buflen > INT_MAX) n = INT_MAX; 1303 return _getcwd(buf, n); 1304 } 1305 1306 //----------------------------------------------------------- 1307 // Helper functions for fatal error handler 1308 #ifdef _WIN64 1309 // Helper routine which returns true if address in 1310 // within the NTDLL address space. 1311 // 1312 static bool _addr_in_ntdll(address addr) { 1313 HMODULE hmod; 1314 MODULEINFO minfo; 1315 1316 hmod = GetModuleHandle("NTDLL.DLL"); 1317 if (hmod == NULL) return false; 1318 if (!os::PSApiDll::GetModuleInformation(GetCurrentProcess(), hmod, 1319 &minfo, sizeof(MODULEINFO))) { 1320 return false; 1321 } 1322 1323 if ((addr >= minfo.lpBaseOfDll) && 1324 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1325 return true; 1326 } else { 1327 return false; 1328 } 1329 } 1330 #endif 1331 1332 struct _modinfo { 1333 address addr; 1334 char* full_path; // point to a char buffer 1335 int buflen; // size of the buffer 1336 address base_addr; 1337 }; 1338 1339 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1340 address top_address, void * param) { 1341 struct _modinfo *pmod = (struct _modinfo *)param; 1342 if (!pmod) return -1; 1343 1344 if (base_addr <= pmod->addr && 1345 top_address > pmod->addr) { 1346 // if a buffer is provided, copy path name to the buffer 1347 if (pmod->full_path) { 1348 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1349 } 1350 pmod->base_addr = base_addr; 1351 return 1; 1352 } 1353 return 0; 1354 } 1355 1356 bool os::dll_address_to_library_name(address addr, char* buf, 1357 int buflen, int* offset) { 1358 // buf is not optional, but offset is optional 1359 assert(buf != NULL, "sanity check"); 1360 1361 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1362 // return the full path to the DLL file, sometimes it returns path 1363 // to the corresponding PDB file (debug info); sometimes it only 1364 // returns partial path, which makes life painful. 1365 1366 struct _modinfo mi; 1367 mi.addr = addr; 1368 mi.full_path = buf; 1369 mi.buflen = buflen; 1370 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1371 // buf already contains path name 1372 if (offset) *offset = addr - mi.base_addr; 1373 return true; 1374 } 1375 1376 buf[0] = '\0'; 1377 if (offset) *offset = -1; 1378 return false; 1379 } 1380 1381 bool os::dll_address_to_function_name(address addr, char *buf, 1382 int buflen, int *offset) { 1383 // buf is not optional, but offset is optional 1384 assert(buf != NULL, "sanity check"); 1385 1386 if (Decoder::decode(addr, buf, buflen, offset)) { 1387 return true; 1388 } 1389 if (offset != NULL) *offset = -1; 1390 buf[0] = '\0'; 1391 return false; 1392 } 1393 1394 // save the start and end address of jvm.dll into param[0] and param[1] 1395 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1396 address top_address, void * param) { 1397 if (!param) return -1; 1398 1399 if (base_addr <= (address)_locate_jvm_dll && 1400 top_address > (address)_locate_jvm_dll) { 1401 ((address*)param)[0] = base_addr; 1402 ((address*)param)[1] = top_address; 1403 return 1; 1404 } 1405 return 0; 1406 } 1407 1408 address vm_lib_location[2]; // start and end address of jvm.dll 1409 1410 // check if addr is inside jvm.dll 1411 bool os::address_is_in_vm(address addr) { 1412 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1413 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1414 assert(false, "Can't find jvm module."); 1415 return false; 1416 } 1417 } 1418 1419 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1420 } 1421 1422 // print module info; param is outputStream* 1423 static int _print_module(const char* fname, address base_address, 1424 address top_address, void* param) { 1425 if (!param) return -1; 1426 1427 outputStream* st = (outputStream*)param; 1428 1429 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1430 return 0; 1431 } 1432 1433 // Loads .dll/.so and 1434 // in case of error it checks if .dll/.so was built for the 1435 // same architecture as Hotspot is running on 1436 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1437 void * result = LoadLibrary(name); 1438 if (result != NULL) { 1439 return result; 1440 } 1441 1442 DWORD errcode = GetLastError(); 1443 if (errcode == ERROR_MOD_NOT_FOUND) { 1444 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1445 ebuf[ebuflen - 1] = '\0'; 1446 return NULL; 1447 } 1448 1449 // Parsing dll below 1450 // If we can read dll-info and find that dll was built 1451 // for an architecture other than Hotspot is running in 1452 // - then print to buffer "DLL was built for a different architecture" 1453 // else call os::lasterror to obtain system error message 1454 1455 // Read system error message into ebuf 1456 // It may or may not be overwritten below (in the for loop and just above) 1457 lasterror(ebuf, (size_t) ebuflen); 1458 ebuf[ebuflen - 1] = '\0'; 1459 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1460 if (fd < 0) { 1461 return NULL; 1462 } 1463 1464 uint32_t signature_offset; 1465 uint16_t lib_arch = 0; 1466 bool failed_to_get_lib_arch = 1467 ( // Go to position 3c in the dll 1468 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1469 || 1470 // Read location of signature 1471 (sizeof(signature_offset) != 1472 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1473 || 1474 // Go to COFF File Header in dll 1475 // that is located after "signature" (4 bytes long) 1476 (os::seek_to_file_offset(fd, 1477 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1478 || 1479 // Read field that contains code of architecture 1480 // that dll was built for 1481 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1482 ); 1483 1484 ::close(fd); 1485 if (failed_to_get_lib_arch) { 1486 // file i/o error - report os::lasterror(...) msg 1487 return NULL; 1488 } 1489 1490 typedef struct { 1491 uint16_t arch_code; 1492 char* arch_name; 1493 } arch_t; 1494 1495 static const arch_t arch_array[] = { 1496 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1497 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1498 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1499 }; 1500 #if (defined _M_IA64) 1501 static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64; 1502 #elif (defined _M_AMD64) 1503 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1504 #elif (defined _M_IX86) 1505 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1506 #else 1507 #error Method os::dll_load requires that one of following \ 1508 is defined :_M_IA64,_M_AMD64 or _M_IX86 1509 #endif 1510 1511 1512 // Obtain a string for printf operation 1513 // lib_arch_str shall contain string what platform this .dll was built for 1514 // running_arch_str shall string contain what platform Hotspot was built for 1515 char *running_arch_str = NULL, *lib_arch_str = NULL; 1516 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1517 if (lib_arch == arch_array[i].arch_code) { 1518 lib_arch_str = arch_array[i].arch_name; 1519 } 1520 if (running_arch == arch_array[i].arch_code) { 1521 running_arch_str = arch_array[i].arch_name; 1522 } 1523 } 1524 1525 assert(running_arch_str, 1526 "Didn't find running architecture code in arch_array"); 1527 1528 // If the architecture is right 1529 // but some other error took place - report os::lasterror(...) msg 1530 if (lib_arch == running_arch) { 1531 return NULL; 1532 } 1533 1534 if (lib_arch_str != NULL) { 1535 ::_snprintf(ebuf, ebuflen - 1, 1536 "Can't load %s-bit .dll on a %s-bit platform", 1537 lib_arch_str, running_arch_str); 1538 } else { 1539 // don't know what architecture this dll was build for 1540 ::_snprintf(ebuf, ebuflen - 1, 1541 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1542 lib_arch, running_arch_str); 1543 } 1544 1545 return NULL; 1546 } 1547 1548 void os::print_dll_info(outputStream *st) { 1549 st->print_cr("Dynamic libraries:"); 1550 get_loaded_modules_info(_print_module, (void *)st); 1551 } 1552 1553 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1554 HANDLE hProcess; 1555 1556 # define MAX_NUM_MODULES 128 1557 HMODULE modules[MAX_NUM_MODULES]; 1558 static char filename[MAX_PATH]; 1559 int result = 0; 1560 1561 if (!os::PSApiDll::PSApiAvailable()) { 1562 return 0; 1563 } 1564 1565 int pid = os::current_process_id(); 1566 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1567 FALSE, pid); 1568 if (hProcess == NULL) return 0; 1569 1570 DWORD size_needed; 1571 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1572 sizeof(modules), &size_needed)) { 1573 CloseHandle(hProcess); 1574 return 0; 1575 } 1576 1577 // number of modules that are currently loaded 1578 int num_modules = size_needed / sizeof(HMODULE); 1579 1580 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1581 // Get Full pathname: 1582 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1583 filename, sizeof(filename))) { 1584 filename[0] = '\0'; 1585 } 1586 1587 MODULEINFO modinfo; 1588 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1589 &modinfo, sizeof(modinfo))) { 1590 modinfo.lpBaseOfDll = NULL; 1591 modinfo.SizeOfImage = 0; 1592 } 1593 1594 // Invoke callback function 1595 result = callback(filename, (address)modinfo.lpBaseOfDll, 1596 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1597 if (result) break; 1598 } 1599 1600 CloseHandle(hProcess); 1601 return result; 1602 } 1603 1604 void os::print_os_info_brief(outputStream* st) { 1605 os::print_os_info(st); 1606 } 1607 1608 void os::print_os_info(outputStream* st) { 1609 #ifdef ASSERT 1610 char buffer[1024]; 1611 DWORD size = sizeof(buffer); 1612 st->print(" HostName: "); 1613 if (GetComputerNameEx(ComputerNameDnsHostname, buffer, &size)) { 1614 st->print("%s", buffer); 1615 } else { 1616 st->print("N/A"); 1617 } 1618 #endif 1619 st->print(" OS:"); 1620 os::win32::print_windows_version(st); 1621 } 1622 1623 void os::win32::print_windows_version(outputStream* st) { 1624 OSVERSIONINFOEX osvi; 1625 VS_FIXEDFILEINFO *file_info; 1626 TCHAR kernel32_path[MAX_PATH]; 1627 UINT len, ret; 1628 1629 // Use the GetVersionEx information to see if we're on a server or 1630 // workstation edition of Windows. Starting with Windows 8.1 we can't 1631 // trust the OS version information returned by this API. 1632 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1633 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1634 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1635 st->print_cr("Call to GetVersionEx failed"); 1636 return; 1637 } 1638 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1639 1640 // Get the full path to \Windows\System32\kernel32.dll and use that for 1641 // determining what version of Windows we're running on. 1642 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1643 ret = GetSystemDirectory(kernel32_path, len); 1644 if (ret == 0 || ret > len) { 1645 st->print_cr("Call to GetSystemDirectory failed"); 1646 return; 1647 } 1648 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1649 1650 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1651 if (version_size == 0) { 1652 st->print_cr("Call to GetFileVersionInfoSize failed"); 1653 return; 1654 } 1655 1656 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1657 if (version_info == NULL) { 1658 st->print_cr("Failed to allocate version_info"); 1659 return; 1660 } 1661 1662 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1663 os::free(version_info); 1664 st->print_cr("Call to GetFileVersionInfo failed"); 1665 return; 1666 } 1667 1668 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1669 os::free(version_info); 1670 st->print_cr("Call to VerQueryValue failed"); 1671 return; 1672 } 1673 1674 int major_version = HIWORD(file_info->dwProductVersionMS); 1675 int minor_version = LOWORD(file_info->dwProductVersionMS); 1676 int build_number = HIWORD(file_info->dwProductVersionLS); 1677 int build_minor = LOWORD(file_info->dwProductVersionLS); 1678 int os_vers = major_version * 1000 + minor_version; 1679 os::free(version_info); 1680 1681 st->print(" Windows "); 1682 switch (os_vers) { 1683 1684 case 6000: 1685 if (is_workstation) { 1686 st->print("Vista"); 1687 } else { 1688 st->print("Server 2008"); 1689 } 1690 break; 1691 1692 case 6001: 1693 if (is_workstation) { 1694 st->print("7"); 1695 } else { 1696 st->print("Server 2008 R2"); 1697 } 1698 break; 1699 1700 case 6002: 1701 if (is_workstation) { 1702 st->print("8"); 1703 } else { 1704 st->print("Server 2012"); 1705 } 1706 break; 1707 1708 case 6003: 1709 if (is_workstation) { 1710 st->print("8.1"); 1711 } else { 1712 st->print("Server 2012 R2"); 1713 } 1714 break; 1715 1716 case 10000: 1717 if (is_workstation) { 1718 st->print("10"); 1719 } else { 1720 // The server version name of Windows 10 is not known at this time 1721 st->print("%d.%d", major_version, minor_version); 1722 } 1723 break; 1724 1725 default: 1726 // Unrecognized windows, print out its major and minor versions 1727 st->print("%d.%d", major_version, minor_version); 1728 break; 1729 } 1730 1731 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1732 // find out whether we are running on 64 bit processor or not 1733 SYSTEM_INFO si; 1734 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1735 os::Kernel32Dll::GetNativeSystemInfo(&si); 1736 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1737 st->print(" , 64 bit"); 1738 } 1739 1740 st->print(" Build %d", build_number); 1741 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1742 st->cr(); 1743 } 1744 1745 void os::pd_print_cpu_info(outputStream* st) { 1746 // Nothing to do for now. 1747 } 1748 1749 void os::print_memory_info(outputStream* st) { 1750 st->print("Memory:"); 1751 st->print(" %dk page", os::vm_page_size()>>10); 1752 1753 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1754 // value if total memory is larger than 4GB 1755 MEMORYSTATUSEX ms; 1756 ms.dwLength = sizeof(ms); 1757 GlobalMemoryStatusEx(&ms); 1758 1759 st->print(", physical %uk", os::physical_memory() >> 10); 1760 st->print("(%uk free)", os::available_memory() >> 10); 1761 1762 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1763 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1764 st->cr(); 1765 } 1766 1767 void os::print_siginfo(outputStream *st, void *siginfo) { 1768 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1769 st->print("siginfo:"); 1770 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1771 1772 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1773 er->NumberParameters >= 2) { 1774 switch (er->ExceptionInformation[0]) { 1775 case 0: st->print(", reading address"); break; 1776 case 1: st->print(", writing address"); break; 1777 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1778 er->ExceptionInformation[0]); 1779 } 1780 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1781 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1782 er->NumberParameters >= 2 && UseSharedSpaces) { 1783 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1784 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1785 st->print("\n\nError accessing class data sharing archive." \ 1786 " Mapped file inaccessible during execution, " \ 1787 " possible disk/network problem."); 1788 } 1789 } else { 1790 int num = er->NumberParameters; 1791 if (num > 0) { 1792 st->print(", ExceptionInformation="); 1793 for (int i = 0; i < num; i++) { 1794 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1795 } 1796 } 1797 } 1798 st->cr(); 1799 } 1800 1801 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1802 // do nothing 1803 } 1804 1805 static char saved_jvm_path[MAX_PATH] = {0}; 1806 1807 // Find the full path to the current module, jvm.dll 1808 void os::jvm_path(char *buf, jint buflen) { 1809 // Error checking. 1810 if (buflen < MAX_PATH) { 1811 assert(false, "must use a large-enough buffer"); 1812 buf[0] = '\0'; 1813 return; 1814 } 1815 // Lazy resolve the path to current module. 1816 if (saved_jvm_path[0] != 0) { 1817 strcpy(buf, saved_jvm_path); 1818 return; 1819 } 1820 1821 buf[0] = '\0'; 1822 if (Arguments::sun_java_launcher_is_altjvm()) { 1823 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1824 // for a JAVA_HOME environment variable and fix up the path so it 1825 // looks like jvm.dll is installed there (append a fake suffix 1826 // hotspot/jvm.dll). 1827 char* java_home_var = ::getenv("JAVA_HOME"); 1828 if (java_home_var != NULL && java_home_var[0] != 0 && 1829 strlen(java_home_var) < (size_t)buflen) { 1830 strncpy(buf, java_home_var, buflen); 1831 1832 // determine if this is a legacy image or modules image 1833 // modules image doesn't have "jre" subdirectory 1834 size_t len = strlen(buf); 1835 char* jrebin_p = buf + len; 1836 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1837 if (0 != _access(buf, 0)) { 1838 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1839 } 1840 len = strlen(buf); 1841 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1842 } 1843 } 1844 1845 if (buf[0] == '\0') { 1846 GetModuleFileName(vm_lib_handle, buf, buflen); 1847 } 1848 strncpy(saved_jvm_path, buf, MAX_PATH); 1849 saved_jvm_path[MAX_PATH - 1] = '\0'; 1850 } 1851 1852 1853 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1854 #ifndef _WIN64 1855 st->print("_"); 1856 #endif 1857 } 1858 1859 1860 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1861 #ifndef _WIN64 1862 st->print("@%d", args_size * sizeof(int)); 1863 #endif 1864 } 1865 1866 // This method is a copy of JDK's sysGetLastErrorString 1867 // from src/windows/hpi/src/system_md.c 1868 1869 size_t os::lasterror(char* buf, size_t len) { 1870 DWORD errval; 1871 1872 if ((errval = GetLastError()) != 0) { 1873 // DOS error 1874 size_t n = (size_t)FormatMessage( 1875 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1876 NULL, 1877 errval, 1878 0, 1879 buf, 1880 (DWORD)len, 1881 NULL); 1882 if (n > 3) { 1883 // Drop final '.', CR, LF 1884 if (buf[n - 1] == '\n') n--; 1885 if (buf[n - 1] == '\r') n--; 1886 if (buf[n - 1] == '.') n--; 1887 buf[n] = '\0'; 1888 } 1889 return n; 1890 } 1891 1892 if (errno != 0) { 1893 // C runtime error that has no corresponding DOS error code 1894 const char* s = strerror(errno); 1895 size_t n = strlen(s); 1896 if (n >= len) n = len - 1; 1897 strncpy(buf, s, n); 1898 buf[n] = '\0'; 1899 return n; 1900 } 1901 1902 return 0; 1903 } 1904 1905 int os::get_last_error() { 1906 DWORD error = GetLastError(); 1907 if (error == 0) { 1908 error = errno; 1909 } 1910 return (int)error; 1911 } 1912 1913 // sun.misc.Signal 1914 // NOTE that this is a workaround for an apparent kernel bug where if 1915 // a signal handler for SIGBREAK is installed then that signal handler 1916 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1917 // See bug 4416763. 1918 static void (*sigbreakHandler)(int) = NULL; 1919 1920 static void UserHandler(int sig, void *siginfo, void *context) { 1921 os::signal_notify(sig); 1922 // We need to reinstate the signal handler each time... 1923 os::signal(sig, (void*)UserHandler); 1924 } 1925 1926 void* os::user_handler() { 1927 return (void*) UserHandler; 1928 } 1929 1930 void* os::signal(int signal_number, void* handler) { 1931 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1932 void (*oldHandler)(int) = sigbreakHandler; 1933 sigbreakHandler = (void (*)(int)) handler; 1934 return (void*) oldHandler; 1935 } else { 1936 return (void*)::signal(signal_number, (void (*)(int))handler); 1937 } 1938 } 1939 1940 void os::signal_raise(int signal_number) { 1941 raise(signal_number); 1942 } 1943 1944 // The Win32 C runtime library maps all console control events other than ^C 1945 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1946 // logoff, and shutdown events. We therefore install our own console handler 1947 // that raises SIGTERM for the latter cases. 1948 // 1949 static BOOL WINAPI consoleHandler(DWORD event) { 1950 switch (event) { 1951 case CTRL_C_EVENT: 1952 if (is_error_reported()) { 1953 // Ctrl-C is pressed during error reporting, likely because the error 1954 // handler fails to abort. Let VM die immediately. 1955 os::die(); 1956 } 1957 1958 os::signal_raise(SIGINT); 1959 return TRUE; 1960 break; 1961 case CTRL_BREAK_EVENT: 1962 if (sigbreakHandler != NULL) { 1963 (*sigbreakHandler)(SIGBREAK); 1964 } 1965 return TRUE; 1966 break; 1967 case CTRL_LOGOFF_EVENT: { 1968 // Don't terminate JVM if it is running in a non-interactive session, 1969 // such as a service process. 1970 USEROBJECTFLAGS flags; 1971 HANDLE handle = GetProcessWindowStation(); 1972 if (handle != NULL && 1973 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1974 sizeof(USEROBJECTFLAGS), NULL)) { 1975 // If it is a non-interactive session, let next handler to deal 1976 // with it. 1977 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1978 return FALSE; 1979 } 1980 } 1981 } 1982 case CTRL_CLOSE_EVENT: 1983 case CTRL_SHUTDOWN_EVENT: 1984 os::signal_raise(SIGTERM); 1985 return TRUE; 1986 break; 1987 default: 1988 break; 1989 } 1990 return FALSE; 1991 } 1992 1993 // The following code is moved from os.cpp for making this 1994 // code platform specific, which it is by its very nature. 1995 1996 // Return maximum OS signal used + 1 for internal use only 1997 // Used as exit signal for signal_thread 1998 int os::sigexitnum_pd() { 1999 return NSIG; 2000 } 2001 2002 // a counter for each possible signal value, including signal_thread exit signal 2003 static volatile jint pending_signals[NSIG+1] = { 0 }; 2004 static HANDLE sig_sem = NULL; 2005 2006 void os::signal_init_pd() { 2007 // Initialize signal structures 2008 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2009 2010 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2011 2012 // Programs embedding the VM do not want it to attempt to receive 2013 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2014 // shutdown hooks mechanism introduced in 1.3. For example, when 2015 // the VM is run as part of a Windows NT service (i.e., a servlet 2016 // engine in a web server), the correct behavior is for any console 2017 // control handler to return FALSE, not TRUE, because the OS's 2018 // "final" handler for such events allows the process to continue if 2019 // it is a service (while terminating it if it is not a service). 2020 // To make this behavior uniform and the mechanism simpler, we 2021 // completely disable the VM's usage of these console events if -Xrs 2022 // (=ReduceSignalUsage) is specified. This means, for example, that 2023 // the CTRL-BREAK thread dump mechanism is also disabled in this 2024 // case. See bugs 4323062, 4345157, and related bugs. 2025 2026 if (!ReduceSignalUsage) { 2027 // Add a CTRL-C handler 2028 SetConsoleCtrlHandler(consoleHandler, TRUE); 2029 } 2030 } 2031 2032 void os::signal_notify(int signal_number) { 2033 BOOL ret; 2034 if (sig_sem != NULL) { 2035 Atomic::inc(&pending_signals[signal_number]); 2036 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2037 assert(ret != 0, "ReleaseSemaphore() failed"); 2038 } 2039 } 2040 2041 static int check_pending_signals(bool wait_for_signal) { 2042 DWORD ret; 2043 while (true) { 2044 for (int i = 0; i < NSIG + 1; i++) { 2045 jint n = pending_signals[i]; 2046 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2047 return i; 2048 } 2049 } 2050 if (!wait_for_signal) { 2051 return -1; 2052 } 2053 2054 JavaThread *thread = JavaThread::current(); 2055 2056 ThreadBlockInVM tbivm(thread); 2057 2058 bool threadIsSuspended; 2059 do { 2060 thread->set_suspend_equivalent(); 2061 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2062 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2063 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2064 2065 // were we externally suspended while we were waiting? 2066 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2067 if (threadIsSuspended) { 2068 // The semaphore has been incremented, but while we were waiting 2069 // another thread suspended us. We don't want to continue running 2070 // while suspended because that would surprise the thread that 2071 // suspended us. 2072 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2073 assert(ret != 0, "ReleaseSemaphore() failed"); 2074 2075 thread->java_suspend_self(); 2076 } 2077 } while (threadIsSuspended); 2078 } 2079 } 2080 2081 int os::signal_lookup() { 2082 return check_pending_signals(false); 2083 } 2084 2085 int os::signal_wait() { 2086 return check_pending_signals(true); 2087 } 2088 2089 // Implicit OS exception handling 2090 2091 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2092 address handler) { 2093 JavaThread* thread = JavaThread::current(); 2094 // Save pc in thread 2095 #ifdef _M_IA64 2096 // Do not blow up if no thread info available. 2097 if (thread) { 2098 // Saving PRECISE pc (with slot information) in thread. 2099 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2100 // Convert precise PC into "Unix" format 2101 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2102 thread->set_saved_exception_pc((address)precise_pc); 2103 } 2104 // Set pc to handler 2105 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2106 // Clear out psr.ri (= Restart Instruction) in order to continue 2107 // at the beginning of the target bundle. 2108 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2109 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2110 #elif _M_AMD64 2111 // Do not blow up if no thread info available. 2112 if (thread) { 2113 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2114 } 2115 // Set pc to handler 2116 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2117 #else 2118 // Do not blow up if no thread info available. 2119 if (thread) { 2120 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2121 } 2122 // Set pc to handler 2123 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2124 #endif 2125 2126 // Continue the execution 2127 return EXCEPTION_CONTINUE_EXECUTION; 2128 } 2129 2130 2131 // Used for PostMortemDump 2132 extern "C" void safepoints(); 2133 extern "C" void find(int x); 2134 extern "C" void events(); 2135 2136 // According to Windows API documentation, an illegal instruction sequence should generate 2137 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2138 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2139 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2140 2141 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2142 2143 // From "Execution Protection in the Windows Operating System" draft 0.35 2144 // Once a system header becomes available, the "real" define should be 2145 // included or copied here. 2146 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2147 2148 // Handle NAT Bit consumption on IA64. 2149 #ifdef _M_IA64 2150 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2151 #endif 2152 2153 // Windows Vista/2008 heap corruption check 2154 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2155 2156 #define def_excpt(val) #val, val 2157 2158 struct siglabel { 2159 char *name; 2160 int number; 2161 }; 2162 2163 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2164 // C++ compiler contain this error code. Because this is a compiler-generated 2165 // error, the code is not listed in the Win32 API header files. 2166 // The code is actually a cryptic mnemonic device, with the initial "E" 2167 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2168 // ASCII values of "msc". 2169 2170 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2171 2172 2173 struct siglabel exceptlabels[] = { 2174 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2175 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2176 def_excpt(EXCEPTION_BREAKPOINT), 2177 def_excpt(EXCEPTION_SINGLE_STEP), 2178 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2179 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2180 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2181 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2182 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2183 def_excpt(EXCEPTION_FLT_OVERFLOW), 2184 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2185 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2186 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2187 def_excpt(EXCEPTION_INT_OVERFLOW), 2188 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2189 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2190 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2191 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2192 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2193 def_excpt(EXCEPTION_STACK_OVERFLOW), 2194 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2195 def_excpt(EXCEPTION_GUARD_PAGE), 2196 def_excpt(EXCEPTION_INVALID_HANDLE), 2197 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2198 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2199 #ifdef _M_IA64 2200 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2201 #endif 2202 NULL, 0 2203 }; 2204 2205 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2206 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2207 if (exceptlabels[i].number == exception_code) { 2208 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2209 return buf; 2210 } 2211 } 2212 2213 return NULL; 2214 } 2215 2216 //----------------------------------------------------------------------------- 2217 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2218 // handle exception caused by idiv; should only happen for -MinInt/-1 2219 // (division by zero is handled explicitly) 2220 #ifdef _M_IA64 2221 assert(0, "Fix Handle_IDiv_Exception"); 2222 #elif _M_AMD64 2223 PCONTEXT ctx = exceptionInfo->ContextRecord; 2224 address pc = (address)ctx->Rip; 2225 assert(pc[0] == 0xF7, "not an idiv opcode"); 2226 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2227 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2228 // set correct result values and continue after idiv instruction 2229 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2230 ctx->Rax = (DWORD)min_jint; // result 2231 ctx->Rdx = (DWORD)0; // remainder 2232 // Continue the execution 2233 #else 2234 PCONTEXT ctx = exceptionInfo->ContextRecord; 2235 address pc = (address)ctx->Eip; 2236 assert(pc[0] == 0xF7, "not an idiv opcode"); 2237 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2238 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2239 // set correct result values and continue after idiv instruction 2240 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2241 ctx->Eax = (DWORD)min_jint; // result 2242 ctx->Edx = (DWORD)0; // remainder 2243 // Continue the execution 2244 #endif 2245 return EXCEPTION_CONTINUE_EXECUTION; 2246 } 2247 2248 //----------------------------------------------------------------------------- 2249 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2250 PCONTEXT ctx = exceptionInfo->ContextRecord; 2251 #ifndef _WIN64 2252 // handle exception caused by native method modifying control word 2253 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2254 2255 switch (exception_code) { 2256 case EXCEPTION_FLT_DENORMAL_OPERAND: 2257 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2258 case EXCEPTION_FLT_INEXACT_RESULT: 2259 case EXCEPTION_FLT_INVALID_OPERATION: 2260 case EXCEPTION_FLT_OVERFLOW: 2261 case EXCEPTION_FLT_STACK_CHECK: 2262 case EXCEPTION_FLT_UNDERFLOW: 2263 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2264 if (fp_control_word != ctx->FloatSave.ControlWord) { 2265 // Restore FPCW and mask out FLT exceptions 2266 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2267 // Mask out pending FLT exceptions 2268 ctx->FloatSave.StatusWord &= 0xffffff00; 2269 return EXCEPTION_CONTINUE_EXECUTION; 2270 } 2271 } 2272 2273 if (prev_uef_handler != NULL) { 2274 // We didn't handle this exception so pass it to the previous 2275 // UnhandledExceptionFilter. 2276 return (prev_uef_handler)(exceptionInfo); 2277 } 2278 #else // !_WIN64 2279 // On Windows, the mxcsr control bits are non-volatile across calls 2280 // See also CR 6192333 2281 // 2282 jint MxCsr = INITIAL_MXCSR; 2283 // we can't use StubRoutines::addr_mxcsr_std() 2284 // because in Win64 mxcsr is not saved there 2285 if (MxCsr != ctx->MxCsr) { 2286 ctx->MxCsr = MxCsr; 2287 return EXCEPTION_CONTINUE_EXECUTION; 2288 } 2289 #endif // !_WIN64 2290 2291 return EXCEPTION_CONTINUE_SEARCH; 2292 } 2293 2294 static inline void report_error(Thread* t, DWORD exception_code, 2295 address addr, void* siginfo, void* context) { 2296 VMError err(t, exception_code, addr, siginfo, context); 2297 err.report_and_die(); 2298 2299 // If UseOsErrorReporting, this will return here and save the error file 2300 // somewhere where we can find it in the minidump. 2301 } 2302 2303 //----------------------------------------------------------------------------- 2304 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2305 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2306 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2307 #ifdef _M_IA64 2308 // On Itanium, we need the "precise pc", which has the slot number coded 2309 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2310 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2311 // Convert the pc to "Unix format", which has the slot number coded 2312 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2313 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2314 // information is saved in the Unix format. 2315 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2316 #elif _M_AMD64 2317 address pc = (address) exceptionInfo->ContextRecord->Rip; 2318 #else 2319 address pc = (address) exceptionInfo->ContextRecord->Eip; 2320 #endif 2321 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2322 2323 // Handle SafeFetch32 and SafeFetchN exceptions. 2324 if (StubRoutines::is_safefetch_fault(pc)) { 2325 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2326 } 2327 2328 #ifndef _WIN64 2329 // Execution protection violation - win32 running on AMD64 only 2330 // Handled first to avoid misdiagnosis as a "normal" access violation; 2331 // This is safe to do because we have a new/unique ExceptionInformation 2332 // code for this condition. 2333 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2334 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2335 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2336 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2337 2338 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2339 int page_size = os::vm_page_size(); 2340 2341 // Make sure the pc and the faulting address are sane. 2342 // 2343 // If an instruction spans a page boundary, and the page containing 2344 // the beginning of the instruction is executable but the following 2345 // page is not, the pc and the faulting address might be slightly 2346 // different - we still want to unguard the 2nd page in this case. 2347 // 2348 // 15 bytes seems to be a (very) safe value for max instruction size. 2349 bool pc_is_near_addr = 2350 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2351 bool instr_spans_page_boundary = 2352 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2353 (intptr_t) page_size) > 0); 2354 2355 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2356 static volatile address last_addr = 2357 (address) os::non_memory_address_word(); 2358 2359 // In conservative mode, don't unguard unless the address is in the VM 2360 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2361 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2362 2363 // Set memory to RWX and retry 2364 address page_start = 2365 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2366 bool res = os::protect_memory((char*) page_start, page_size, 2367 os::MEM_PROT_RWX); 2368 2369 if (PrintMiscellaneous && Verbose) { 2370 char buf[256]; 2371 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2372 "at " INTPTR_FORMAT 2373 ", unguarding " INTPTR_FORMAT ": %s", addr, 2374 page_start, (res ? "success" : strerror(errno))); 2375 tty->print_raw_cr(buf); 2376 } 2377 2378 // Set last_addr so if we fault again at the same address, we don't 2379 // end up in an endless loop. 2380 // 2381 // There are two potential complications here. Two threads trapping 2382 // at the same address at the same time could cause one of the 2383 // threads to think it already unguarded, and abort the VM. Likely 2384 // very rare. 2385 // 2386 // The other race involves two threads alternately trapping at 2387 // different addresses and failing to unguard the page, resulting in 2388 // an endless loop. This condition is probably even more unlikely 2389 // than the first. 2390 // 2391 // Although both cases could be avoided by using locks or thread 2392 // local last_addr, these solutions are unnecessary complication: 2393 // this handler is a best-effort safety net, not a complete solution. 2394 // It is disabled by default and should only be used as a workaround 2395 // in case we missed any no-execute-unsafe VM code. 2396 2397 last_addr = addr; 2398 2399 return EXCEPTION_CONTINUE_EXECUTION; 2400 } 2401 } 2402 2403 // Last unguard failed or not unguarding 2404 tty->print_raw_cr("Execution protection violation"); 2405 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2406 exceptionInfo->ContextRecord); 2407 return EXCEPTION_CONTINUE_SEARCH; 2408 } 2409 } 2410 #endif // _WIN64 2411 2412 // Check to see if we caught the safepoint code in the 2413 // process of write protecting the memory serialization page. 2414 // It write enables the page immediately after protecting it 2415 // so just return. 2416 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2417 JavaThread* thread = (JavaThread*) t; 2418 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2419 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2420 if (os::is_memory_serialize_page(thread, addr)) { 2421 // Block current thread until the memory serialize page permission restored. 2422 os::block_on_serialize_page_trap(); 2423 return EXCEPTION_CONTINUE_EXECUTION; 2424 } 2425 } 2426 2427 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2428 VM_Version::is_cpuinfo_segv_addr(pc)) { 2429 // Verify that OS save/restore AVX registers. 2430 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2431 } 2432 2433 if (t != NULL && t->is_Java_thread()) { 2434 JavaThread* thread = (JavaThread*) t; 2435 bool in_java = thread->thread_state() == _thread_in_Java; 2436 2437 // Handle potential stack overflows up front. 2438 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2439 if (os::uses_stack_guard_pages()) { 2440 #ifdef _M_IA64 2441 // Use guard page for register stack. 2442 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2443 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2444 // Check for a register stack overflow on Itanium 2445 if (thread->addr_inside_register_stack_red_zone(addr)) { 2446 // Fatal red zone violation happens if the Java program 2447 // catches a StackOverflow error and does so much processing 2448 // that it runs beyond the unprotected yellow guard zone. As 2449 // a result, we are out of here. 2450 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2451 } else if(thread->addr_inside_register_stack(addr)) { 2452 // Disable the yellow zone which sets the state that 2453 // we've got a stack overflow problem. 2454 if (thread->stack_yellow_zone_enabled()) { 2455 thread->disable_stack_yellow_zone(); 2456 } 2457 // Give us some room to process the exception. 2458 thread->disable_register_stack_guard(); 2459 // Tracing with +Verbose. 2460 if (Verbose) { 2461 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2462 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2463 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2464 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2465 thread->register_stack_base(), 2466 thread->register_stack_base() + thread->stack_size()); 2467 } 2468 2469 // Reguard the permanent register stack red zone just to be sure. 2470 // We saw Windows silently disabling this without telling us. 2471 thread->enable_register_stack_red_zone(); 2472 2473 return Handle_Exception(exceptionInfo, 2474 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2475 } 2476 #endif 2477 if (thread->stack_yellow_zone_enabled()) { 2478 // Yellow zone violation. The o/s has unprotected the first yellow 2479 // zone page for us. Note: must call disable_stack_yellow_zone to 2480 // update the enabled status, even if the zone contains only one page. 2481 thread->disable_stack_yellow_zone(); 2482 // If not in java code, return and hope for the best. 2483 return in_java 2484 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2485 : EXCEPTION_CONTINUE_EXECUTION; 2486 } else { 2487 // Fatal red zone violation. 2488 thread->disable_stack_red_zone(); 2489 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2490 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2491 exceptionInfo->ContextRecord); 2492 return EXCEPTION_CONTINUE_SEARCH; 2493 } 2494 } else if (in_java) { 2495 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2496 // a one-time-only guard page, which it has released to us. The next 2497 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2498 return Handle_Exception(exceptionInfo, 2499 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2500 } else { 2501 // Can only return and hope for the best. Further stack growth will 2502 // result in an ACCESS_VIOLATION. 2503 return EXCEPTION_CONTINUE_EXECUTION; 2504 } 2505 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2506 // Either stack overflow or null pointer exception. 2507 if (in_java) { 2508 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2509 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2510 address stack_end = thread->stack_base() - thread->stack_size(); 2511 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2512 // Stack overflow. 2513 assert(!os::uses_stack_guard_pages(), 2514 "should be caught by red zone code above."); 2515 return Handle_Exception(exceptionInfo, 2516 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2517 } 2518 // Check for safepoint polling and implicit null 2519 // We only expect null pointers in the stubs (vtable) 2520 // the rest are checked explicitly now. 2521 CodeBlob* cb = CodeCache::find_blob(pc); 2522 if (cb != NULL) { 2523 if (os::is_poll_address(addr)) { 2524 address stub = SharedRuntime::get_poll_stub(pc); 2525 return Handle_Exception(exceptionInfo, stub); 2526 } 2527 } 2528 { 2529 #ifdef _WIN64 2530 // If it's a legal stack address map the entire region in 2531 // 2532 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2533 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2534 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2535 addr = (address)((uintptr_t)addr & 2536 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2537 os::commit_memory((char *)addr, thread->stack_base() - addr, 2538 !ExecMem); 2539 return EXCEPTION_CONTINUE_EXECUTION; 2540 } else 2541 #endif 2542 { 2543 // Null pointer exception. 2544 #ifdef _M_IA64 2545 // Process implicit null checks in compiled code. Note: Implicit null checks 2546 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2547 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2548 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2549 // Handle implicit null check in UEP method entry 2550 if (cb && (cb->is_frame_complete_at(pc) || 2551 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2552 if (Verbose) { 2553 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2554 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2555 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2556 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2557 *(bundle_start + 1), *bundle_start); 2558 } 2559 return Handle_Exception(exceptionInfo, 2560 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2561 } 2562 } 2563 2564 // Implicit null checks were processed above. Hence, we should not reach 2565 // here in the usual case => die! 2566 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2567 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2568 exceptionInfo->ContextRecord); 2569 return EXCEPTION_CONTINUE_SEARCH; 2570 2571 #else // !IA64 2572 2573 // Windows 98 reports faulting addresses incorrectly 2574 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2575 !os::win32::is_nt()) { 2576 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2577 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2578 } 2579 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2580 exceptionInfo->ContextRecord); 2581 return EXCEPTION_CONTINUE_SEARCH; 2582 #endif 2583 } 2584 } 2585 } 2586 2587 #ifdef _WIN64 2588 // Special care for fast JNI field accessors. 2589 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2590 // in and the heap gets shrunk before the field access. 2591 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2592 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2593 if (addr != (address)-1) { 2594 return Handle_Exception(exceptionInfo, addr); 2595 } 2596 } 2597 #endif 2598 2599 // Stack overflow or null pointer exception in native code. 2600 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2601 exceptionInfo->ContextRecord); 2602 return EXCEPTION_CONTINUE_SEARCH; 2603 } // /EXCEPTION_ACCESS_VIOLATION 2604 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2605 #if defined _M_IA64 2606 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2607 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2608 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2609 2610 // Compiled method patched to be non entrant? Following conditions must apply: 2611 // 1. must be first instruction in bundle 2612 // 2. must be a break instruction with appropriate code 2613 if ((((uint64_t) pc & 0x0F) == 0) && 2614 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2615 return Handle_Exception(exceptionInfo, 2616 (address)SharedRuntime::get_handle_wrong_method_stub()); 2617 } 2618 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2619 #endif 2620 2621 2622 if (in_java) { 2623 switch (exception_code) { 2624 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2625 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2626 2627 case EXCEPTION_INT_OVERFLOW: 2628 return Handle_IDiv_Exception(exceptionInfo); 2629 2630 } // switch 2631 } 2632 if (((thread->thread_state() == _thread_in_Java) || 2633 (thread->thread_state() == _thread_in_native)) && 2634 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2635 LONG result=Handle_FLT_Exception(exceptionInfo); 2636 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2637 } 2638 } 2639 2640 if (exception_code != EXCEPTION_BREAKPOINT) { 2641 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2642 exceptionInfo->ContextRecord); 2643 } 2644 return EXCEPTION_CONTINUE_SEARCH; 2645 } 2646 2647 #ifndef _WIN64 2648 // Special care for fast JNI accessors. 2649 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2650 // the heap gets shrunk before the field access. 2651 // Need to install our own structured exception handler since native code may 2652 // install its own. 2653 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2654 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2655 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2656 address pc = (address) exceptionInfo->ContextRecord->Eip; 2657 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2658 if (addr != (address)-1) { 2659 return Handle_Exception(exceptionInfo, addr); 2660 } 2661 } 2662 return EXCEPTION_CONTINUE_SEARCH; 2663 } 2664 2665 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2666 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2667 jobject obj, \ 2668 jfieldID fieldID) { \ 2669 __try { \ 2670 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2671 obj, \ 2672 fieldID); \ 2673 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2674 _exception_info())) { \ 2675 } \ 2676 return 0; \ 2677 } 2678 2679 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2680 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2681 DEFINE_FAST_GETFIELD(jchar, char, Char) 2682 DEFINE_FAST_GETFIELD(jshort, short, Short) 2683 DEFINE_FAST_GETFIELD(jint, int, Int) 2684 DEFINE_FAST_GETFIELD(jlong, long, Long) 2685 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2686 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2687 2688 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2689 switch (type) { 2690 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2691 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2692 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2693 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2694 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2695 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2696 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2697 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2698 default: ShouldNotReachHere(); 2699 } 2700 return (address)-1; 2701 } 2702 #endif 2703 2704 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2705 // Install a win32 structured exception handler around the test 2706 // function call so the VM can generate an error dump if needed. 2707 __try { 2708 (*funcPtr)(); 2709 } __except(topLevelExceptionFilter( 2710 (_EXCEPTION_POINTERS*)_exception_info())) { 2711 // Nothing to do. 2712 } 2713 } 2714 2715 // Virtual Memory 2716 2717 int os::vm_page_size() { return os::win32::vm_page_size(); } 2718 int os::vm_allocation_granularity() { 2719 return os::win32::vm_allocation_granularity(); 2720 } 2721 2722 // Windows large page support is available on Windows 2003. In order to use 2723 // large page memory, the administrator must first assign additional privilege 2724 // to the user: 2725 // + select Control Panel -> Administrative Tools -> Local Security Policy 2726 // + select Local Policies -> User Rights Assignment 2727 // + double click "Lock pages in memory", add users and/or groups 2728 // + reboot 2729 // Note the above steps are needed for administrator as well, as administrators 2730 // by default do not have the privilege to lock pages in memory. 2731 // 2732 // Note about Windows 2003: although the API supports committing large page 2733 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2734 // scenario, I found through experiment it only uses large page if the entire 2735 // memory region is reserved and committed in a single VirtualAlloc() call. 2736 // This makes Windows large page support more or less like Solaris ISM, in 2737 // that the entire heap must be committed upfront. This probably will change 2738 // in the future, if so the code below needs to be revisited. 2739 2740 #ifndef MEM_LARGE_PAGES 2741 #define MEM_LARGE_PAGES 0x20000000 2742 #endif 2743 2744 static HANDLE _hProcess; 2745 static HANDLE _hToken; 2746 2747 // Container for NUMA node list info 2748 class NUMANodeListHolder { 2749 private: 2750 int *_numa_used_node_list; // allocated below 2751 int _numa_used_node_count; 2752 2753 void free_node_list() { 2754 if (_numa_used_node_list != NULL) { 2755 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2756 } 2757 } 2758 2759 public: 2760 NUMANodeListHolder() { 2761 _numa_used_node_count = 0; 2762 _numa_used_node_list = NULL; 2763 // do rest of initialization in build routine (after function pointers are set up) 2764 } 2765 2766 ~NUMANodeListHolder() { 2767 free_node_list(); 2768 } 2769 2770 bool build() { 2771 DWORD_PTR proc_aff_mask; 2772 DWORD_PTR sys_aff_mask; 2773 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2774 ULONG highest_node_number; 2775 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2776 free_node_list(); 2777 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2778 for (unsigned int i = 0; i <= highest_node_number; i++) { 2779 ULONGLONG proc_mask_numa_node; 2780 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2781 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2782 _numa_used_node_list[_numa_used_node_count++] = i; 2783 } 2784 } 2785 return (_numa_used_node_count > 1); 2786 } 2787 2788 int get_count() { return _numa_used_node_count; } 2789 int get_node_list_entry(int n) { 2790 // for indexes out of range, returns -1 2791 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2792 } 2793 2794 } numa_node_list_holder; 2795 2796 2797 2798 static size_t _large_page_size = 0; 2799 2800 static bool resolve_functions_for_large_page_init() { 2801 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2802 os::Advapi32Dll::AdvapiAvailable(); 2803 } 2804 2805 static bool request_lock_memory_privilege() { 2806 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2807 os::current_process_id()); 2808 2809 LUID luid; 2810 if (_hProcess != NULL && 2811 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2812 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2813 2814 TOKEN_PRIVILEGES tp; 2815 tp.PrivilegeCount = 1; 2816 tp.Privileges[0].Luid = luid; 2817 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2818 2819 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2820 // privilege. Check GetLastError() too. See MSDN document. 2821 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2822 (GetLastError() == ERROR_SUCCESS)) { 2823 return true; 2824 } 2825 } 2826 2827 return false; 2828 } 2829 2830 static void cleanup_after_large_page_init() { 2831 if (_hProcess) CloseHandle(_hProcess); 2832 _hProcess = NULL; 2833 if (_hToken) CloseHandle(_hToken); 2834 _hToken = NULL; 2835 } 2836 2837 static bool numa_interleaving_init() { 2838 bool success = false; 2839 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2840 2841 // print a warning if UseNUMAInterleaving flag is specified on command line 2842 bool warn_on_failure = use_numa_interleaving_specified; 2843 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2844 2845 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2846 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2847 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2848 2849 if (os::Kernel32Dll::NumaCallsAvailable()) { 2850 if (numa_node_list_holder.build()) { 2851 if (PrintMiscellaneous && Verbose) { 2852 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2853 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2854 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2855 } 2856 tty->print("\n"); 2857 } 2858 success = true; 2859 } else { 2860 WARN("Process does not cover multiple NUMA nodes."); 2861 } 2862 } else { 2863 WARN("NUMA Interleaving is not supported by the operating system."); 2864 } 2865 if (!success) { 2866 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2867 } 2868 return success; 2869 #undef WARN 2870 } 2871 2872 // this routine is used whenever we need to reserve a contiguous VA range 2873 // but we need to make separate VirtualAlloc calls for each piece of the range 2874 // Reasons for doing this: 2875 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2876 // * UseNUMAInterleaving requires a separate node for each piece 2877 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2878 DWORD prot, 2879 bool should_inject_error = false) { 2880 char * p_buf; 2881 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2882 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2883 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2884 2885 // first reserve enough address space in advance since we want to be 2886 // able to break a single contiguous virtual address range into multiple 2887 // large page commits but WS2003 does not allow reserving large page space 2888 // so we just use 4K pages for reserve, this gives us a legal contiguous 2889 // address space. then we will deallocate that reservation, and re alloc 2890 // using large pages 2891 const size_t size_of_reserve = bytes + chunk_size; 2892 if (bytes > size_of_reserve) { 2893 // Overflowed. 2894 return NULL; 2895 } 2896 p_buf = (char *) VirtualAlloc(addr, 2897 size_of_reserve, // size of Reserve 2898 MEM_RESERVE, 2899 PAGE_READWRITE); 2900 // If reservation failed, return NULL 2901 if (p_buf == NULL) return NULL; 2902 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2903 os::release_memory(p_buf, bytes + chunk_size); 2904 2905 // we still need to round up to a page boundary (in case we are using large pages) 2906 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2907 // instead we handle this in the bytes_to_rq computation below 2908 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2909 2910 // now go through and allocate one chunk at a time until all bytes are 2911 // allocated 2912 size_t bytes_remaining = bytes; 2913 // An overflow of align_size_up() would have been caught above 2914 // in the calculation of size_of_reserve. 2915 char * next_alloc_addr = p_buf; 2916 HANDLE hProc = GetCurrentProcess(); 2917 2918 #ifdef ASSERT 2919 // Variable for the failure injection 2920 long ran_num = os::random(); 2921 size_t fail_after = ran_num % bytes; 2922 #endif 2923 2924 int count=0; 2925 while (bytes_remaining) { 2926 // select bytes_to_rq to get to the next chunk_size boundary 2927 2928 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2929 // Note allocate and commit 2930 char * p_new; 2931 2932 #ifdef ASSERT 2933 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2934 #else 2935 const bool inject_error_now = false; 2936 #endif 2937 2938 if (inject_error_now) { 2939 p_new = NULL; 2940 } else { 2941 if (!UseNUMAInterleaving) { 2942 p_new = (char *) VirtualAlloc(next_alloc_addr, 2943 bytes_to_rq, 2944 flags, 2945 prot); 2946 } else { 2947 // get the next node to use from the used_node_list 2948 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2949 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2950 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2951 next_alloc_addr, 2952 bytes_to_rq, 2953 flags, 2954 prot, 2955 node); 2956 } 2957 } 2958 2959 if (p_new == NULL) { 2960 // Free any allocated pages 2961 if (next_alloc_addr > p_buf) { 2962 // Some memory was committed so release it. 2963 size_t bytes_to_release = bytes - bytes_remaining; 2964 // NMT has yet to record any individual blocks, so it 2965 // need to create a dummy 'reserve' record to match 2966 // the release. 2967 MemTracker::record_virtual_memory_reserve((address)p_buf, 2968 bytes_to_release, CALLER_PC); 2969 os::release_memory(p_buf, bytes_to_release); 2970 } 2971 #ifdef ASSERT 2972 if (should_inject_error) { 2973 if (TracePageSizes && Verbose) { 2974 tty->print_cr("Reserving pages individually failed."); 2975 } 2976 } 2977 #endif 2978 return NULL; 2979 } 2980 2981 bytes_remaining -= bytes_to_rq; 2982 next_alloc_addr += bytes_to_rq; 2983 count++; 2984 } 2985 // Although the memory is allocated individually, it is returned as one. 2986 // NMT records it as one block. 2987 if ((flags & MEM_COMMIT) != 0) { 2988 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2989 } else { 2990 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2991 } 2992 2993 // made it this far, success 2994 return p_buf; 2995 } 2996 2997 2998 2999 void os::large_page_init() { 3000 if (!UseLargePages) return; 3001 3002 // print a warning if any large page related flag is specified on command line 3003 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3004 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3005 bool success = false; 3006 3007 #define WARN(msg) if (warn_on_failure) { warning(msg); } 3008 if (resolve_functions_for_large_page_init()) { 3009 if (request_lock_memory_privilege()) { 3010 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3011 if (s) { 3012 #if defined(IA32) || defined(AMD64) 3013 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3014 WARN("JVM cannot use large pages bigger than 4mb."); 3015 } else { 3016 #endif 3017 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3018 _large_page_size = LargePageSizeInBytes; 3019 } else { 3020 _large_page_size = s; 3021 } 3022 success = true; 3023 #if defined(IA32) || defined(AMD64) 3024 } 3025 #endif 3026 } else { 3027 WARN("Large page is not supported by the processor."); 3028 } 3029 } else { 3030 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3031 } 3032 } else { 3033 WARN("Large page is not supported by the operating system."); 3034 } 3035 #undef WARN 3036 3037 const size_t default_page_size = (size_t) vm_page_size(); 3038 if (success && _large_page_size > default_page_size) { 3039 _page_sizes[0] = _large_page_size; 3040 _page_sizes[1] = default_page_size; 3041 _page_sizes[2] = 0; 3042 } 3043 3044 cleanup_after_large_page_init(); 3045 UseLargePages = success; 3046 } 3047 3048 // On win32, one cannot release just a part of reserved memory, it's an 3049 // all or nothing deal. When we split a reservation, we must break the 3050 // reservation into two reservations. 3051 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3052 bool realloc) { 3053 if (size > 0) { 3054 release_memory(base, size); 3055 if (realloc) { 3056 reserve_memory(split, base); 3057 } 3058 if (size != split) { 3059 reserve_memory(size - split, base + split); 3060 } 3061 } 3062 } 3063 3064 // Multiple threads can race in this code but it's not possible to unmap small sections of 3065 // virtual space to get requested alignment, like posix-like os's. 3066 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3067 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3068 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3069 "Alignment must be a multiple of allocation granularity (page size)"); 3070 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3071 3072 size_t extra_size = size + alignment; 3073 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3074 3075 char* aligned_base = NULL; 3076 3077 do { 3078 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3079 if (extra_base == NULL) { 3080 return NULL; 3081 } 3082 // Do manual alignment 3083 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3084 3085 os::release_memory(extra_base, extra_size); 3086 3087 aligned_base = os::reserve_memory(size, aligned_base); 3088 3089 } while (aligned_base == NULL); 3090 3091 return aligned_base; 3092 } 3093 3094 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3095 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3096 "reserve alignment"); 3097 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3098 char* res; 3099 // note that if UseLargePages is on, all the areas that require interleaving 3100 // will go thru reserve_memory_special rather than thru here. 3101 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3102 if (!use_individual) { 3103 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3104 } else { 3105 elapsedTimer reserveTimer; 3106 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3107 // in numa interleaving, we have to allocate pages individually 3108 // (well really chunks of NUMAInterleaveGranularity size) 3109 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3110 if (res == NULL) { 3111 warning("NUMA page allocation failed"); 3112 } 3113 if (Verbose && PrintMiscellaneous) { 3114 reserveTimer.stop(); 3115 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3116 reserveTimer.milliseconds(), reserveTimer.ticks()); 3117 } 3118 } 3119 assert(res == NULL || addr == NULL || addr == res, 3120 "Unexpected address from reserve."); 3121 3122 return res; 3123 } 3124 3125 // Reserve memory at an arbitrary address, only if that area is 3126 // available (and not reserved for something else). 3127 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3128 // Windows os::reserve_memory() fails of the requested address range is 3129 // not avilable. 3130 return reserve_memory(bytes, requested_addr); 3131 } 3132 3133 size_t os::large_page_size() { 3134 return _large_page_size; 3135 } 3136 3137 bool os::can_commit_large_page_memory() { 3138 // Windows only uses large page memory when the entire region is reserved 3139 // and committed in a single VirtualAlloc() call. This may change in the 3140 // future, but with Windows 2003 it's not possible to commit on demand. 3141 return false; 3142 } 3143 3144 bool os::can_execute_large_page_memory() { 3145 return true; 3146 } 3147 3148 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3149 bool exec) { 3150 assert(UseLargePages, "only for large pages"); 3151 3152 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3153 return NULL; // Fallback to small pages. 3154 } 3155 3156 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3157 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3158 3159 // with large pages, there are two cases where we need to use Individual Allocation 3160 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3161 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3162 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3163 if (TracePageSizes && Verbose) { 3164 tty->print_cr("Reserving large pages individually."); 3165 } 3166 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3167 if (p_buf == NULL) { 3168 // give an appropriate warning message 3169 if (UseNUMAInterleaving) { 3170 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3171 } 3172 if (UseLargePagesIndividualAllocation) { 3173 warning("Individually allocated large pages failed, " 3174 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3175 } 3176 return NULL; 3177 } 3178 3179 return p_buf; 3180 3181 } else { 3182 if (TracePageSizes && Verbose) { 3183 tty->print_cr("Reserving large pages in a single large chunk."); 3184 } 3185 // normal policy just allocate it all at once 3186 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3187 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3188 if (res != NULL) { 3189 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3190 } 3191 3192 return res; 3193 } 3194 } 3195 3196 bool os::release_memory_special(char* base, size_t bytes) { 3197 assert(base != NULL, "Sanity check"); 3198 return release_memory(base, bytes); 3199 } 3200 3201 void os::print_statistics() { 3202 } 3203 3204 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3205 int err = os::get_last_error(); 3206 char buf[256]; 3207 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3208 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3209 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3210 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3211 } 3212 3213 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3214 if (bytes == 0) { 3215 // Don't bother the OS with noops. 3216 return true; 3217 } 3218 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3219 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3220 // Don't attempt to print anything if the OS call fails. We're 3221 // probably low on resources, so the print itself may cause crashes. 3222 3223 // unless we have NUMAInterleaving enabled, the range of a commit 3224 // is always within a reserve covered by a single VirtualAlloc 3225 // in that case we can just do a single commit for the requested size 3226 if (!UseNUMAInterleaving) { 3227 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3228 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3229 return false; 3230 } 3231 if (exec) { 3232 DWORD oldprot; 3233 // Windows doc says to use VirtualProtect to get execute permissions 3234 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3235 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3236 return false; 3237 } 3238 } 3239 return true; 3240 } else { 3241 3242 // when NUMAInterleaving is enabled, the commit might cover a range that 3243 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3244 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3245 // returns represents the number of bytes that can be committed in one step. 3246 size_t bytes_remaining = bytes; 3247 char * next_alloc_addr = addr; 3248 while (bytes_remaining > 0) { 3249 MEMORY_BASIC_INFORMATION alloc_info; 3250 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3251 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3252 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3253 PAGE_READWRITE) == NULL) { 3254 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3255 exec);) 3256 return false; 3257 } 3258 if (exec) { 3259 DWORD oldprot; 3260 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3261 PAGE_EXECUTE_READWRITE, &oldprot)) { 3262 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3263 exec);) 3264 return false; 3265 } 3266 } 3267 bytes_remaining -= bytes_to_rq; 3268 next_alloc_addr += bytes_to_rq; 3269 } 3270 } 3271 // if we made it this far, return true 3272 return true; 3273 } 3274 3275 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3276 bool exec) { 3277 // alignment_hint is ignored on this OS 3278 return pd_commit_memory(addr, size, exec); 3279 } 3280 3281 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3282 const char* mesg) { 3283 assert(mesg != NULL, "mesg must be specified"); 3284 if (!pd_commit_memory(addr, size, exec)) { 3285 warn_fail_commit_memory(addr, size, exec); 3286 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3287 } 3288 } 3289 3290 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3291 size_t alignment_hint, bool exec, 3292 const char* mesg) { 3293 // alignment_hint is ignored on this OS 3294 pd_commit_memory_or_exit(addr, size, exec, mesg); 3295 } 3296 3297 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3298 if (bytes == 0) { 3299 // Don't bother the OS with noops. 3300 return true; 3301 } 3302 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3303 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3304 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3305 } 3306 3307 bool os::pd_release_memory(char* addr, size_t bytes) { 3308 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3309 } 3310 3311 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3312 return os::commit_memory(addr, size, !ExecMem); 3313 } 3314 3315 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3316 return os::uncommit_memory(addr, size); 3317 } 3318 3319 // Set protections specified 3320 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3321 bool is_committed) { 3322 unsigned int p = 0; 3323 switch (prot) { 3324 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3325 case MEM_PROT_READ: p = PAGE_READONLY; break; 3326 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3327 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3328 default: 3329 ShouldNotReachHere(); 3330 } 3331 3332 DWORD old_status; 3333 3334 // Strange enough, but on Win32 one can change protection only for committed 3335 // memory, not a big deal anyway, as bytes less or equal than 64K 3336 if (!is_committed) { 3337 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3338 "cannot commit protection page"); 3339 } 3340 // One cannot use os::guard_memory() here, as on Win32 guard page 3341 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3342 // 3343 // Pages in the region become guard pages. Any attempt to access a guard page 3344 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3345 // the guard page status. Guard pages thus act as a one-time access alarm. 3346 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3347 } 3348 3349 bool os::guard_memory(char* addr, size_t bytes) { 3350 DWORD old_status; 3351 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3352 } 3353 3354 bool os::unguard_memory(char* addr, size_t bytes) { 3355 DWORD old_status; 3356 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3357 } 3358 3359 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3360 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3361 void os::numa_make_global(char *addr, size_t bytes) { } 3362 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3363 bool os::numa_topology_changed() { return false; } 3364 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3365 int os::numa_get_group_id() { return 0; } 3366 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3367 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3368 // Provide an answer for UMA systems 3369 ids[0] = 0; 3370 return 1; 3371 } else { 3372 // check for size bigger than actual groups_num 3373 size = MIN2(size, numa_get_groups_num()); 3374 for (int i = 0; i < (int)size; i++) { 3375 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3376 } 3377 return size; 3378 } 3379 } 3380 3381 bool os::get_page_info(char *start, page_info* info) { 3382 return false; 3383 } 3384 3385 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3386 page_info* page_found) { 3387 return end; 3388 } 3389 3390 char* os::non_memory_address_word() { 3391 // Must never look like an address returned by reserve_memory, 3392 // even in its subfields (as defined by the CPU immediate fields, 3393 // if the CPU splits constants across multiple instructions). 3394 return (char*)-1; 3395 } 3396 3397 #define MAX_ERROR_COUNT 100 3398 #define SYS_THREAD_ERROR 0xffffffffUL 3399 3400 void os::pd_start_thread(Thread* thread) { 3401 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3402 // Returns previous suspend state: 3403 // 0: Thread was not suspended 3404 // 1: Thread is running now 3405 // >1: Thread is still suspended. 3406 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3407 } 3408 3409 class HighResolutionInterval : public CHeapObj<mtThread> { 3410 // The default timer resolution seems to be 10 milliseconds. 3411 // (Where is this written down?) 3412 // If someone wants to sleep for only a fraction of the default, 3413 // then we set the timer resolution down to 1 millisecond for 3414 // the duration of their interval. 3415 // We carefully set the resolution back, since otherwise we 3416 // seem to incur an overhead (3%?) that we don't need. 3417 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3418 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3419 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3420 // timeBeginPeriod() if the relative error exceeded some threshold. 3421 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3422 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3423 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3424 // resolution timers running. 3425 private: 3426 jlong resolution; 3427 public: 3428 HighResolutionInterval(jlong ms) { 3429 resolution = ms % 10L; 3430 if (resolution != 0) { 3431 MMRESULT result = timeBeginPeriod(1L); 3432 } 3433 } 3434 ~HighResolutionInterval() { 3435 if (resolution != 0) { 3436 MMRESULT result = timeEndPeriod(1L); 3437 } 3438 resolution = 0L; 3439 } 3440 }; 3441 3442 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3443 jlong limit = (jlong) MAXDWORD; 3444 3445 while (ms > limit) { 3446 int res; 3447 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3448 return res; 3449 } 3450 ms -= limit; 3451 } 3452 3453 assert(thread == Thread::current(), "thread consistency check"); 3454 OSThread* osthread = thread->osthread(); 3455 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3456 int result; 3457 if (interruptable) { 3458 assert(thread->is_Java_thread(), "must be java thread"); 3459 JavaThread *jt = (JavaThread *) thread; 3460 ThreadBlockInVM tbivm(jt); 3461 3462 jt->set_suspend_equivalent(); 3463 // cleared by handle_special_suspend_equivalent_condition() or 3464 // java_suspend_self() via check_and_wait_while_suspended() 3465 3466 HANDLE events[1]; 3467 events[0] = osthread->interrupt_event(); 3468 HighResolutionInterval *phri=NULL; 3469 if (!ForceTimeHighResolution) { 3470 phri = new HighResolutionInterval(ms); 3471 } 3472 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3473 result = OS_TIMEOUT; 3474 } else { 3475 ResetEvent(osthread->interrupt_event()); 3476 osthread->set_interrupted(false); 3477 result = OS_INTRPT; 3478 } 3479 delete phri; //if it is NULL, harmless 3480 3481 // were we externally suspended while we were waiting? 3482 jt->check_and_wait_while_suspended(); 3483 } else { 3484 assert(!thread->is_Java_thread(), "must not be java thread"); 3485 Sleep((long) ms); 3486 result = OS_TIMEOUT; 3487 } 3488 return result; 3489 } 3490 3491 // Short sleep, direct OS call. 3492 // 3493 // ms = 0, means allow others (if any) to run. 3494 // 3495 void os::naked_short_sleep(jlong ms) { 3496 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3497 Sleep(ms); 3498 } 3499 3500 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3501 void os::infinite_sleep() { 3502 while (true) { // sleep forever ... 3503 Sleep(100000); // ... 100 seconds at a time 3504 } 3505 } 3506 3507 typedef BOOL (WINAPI * STTSignature)(void); 3508 3509 void os::naked_yield() { 3510 // Use either SwitchToThread() or Sleep(0) 3511 // Consider passing back the return value from SwitchToThread(). 3512 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3513 SwitchToThread(); 3514 } else { 3515 Sleep(0); 3516 } 3517 } 3518 3519 // Win32 only gives you access to seven real priorities at a time, 3520 // so we compress Java's ten down to seven. It would be better 3521 // if we dynamically adjusted relative priorities. 3522 3523 int os::java_to_os_priority[CriticalPriority + 1] = { 3524 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3525 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3526 THREAD_PRIORITY_LOWEST, // 2 3527 THREAD_PRIORITY_BELOW_NORMAL, // 3 3528 THREAD_PRIORITY_BELOW_NORMAL, // 4 3529 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3530 THREAD_PRIORITY_NORMAL, // 6 3531 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3532 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3533 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3534 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3535 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3536 }; 3537 3538 int prio_policy1[CriticalPriority + 1] = { 3539 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3540 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3541 THREAD_PRIORITY_LOWEST, // 2 3542 THREAD_PRIORITY_BELOW_NORMAL, // 3 3543 THREAD_PRIORITY_BELOW_NORMAL, // 4 3544 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3545 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3546 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3547 THREAD_PRIORITY_HIGHEST, // 8 3548 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3549 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3550 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3551 }; 3552 3553 static int prio_init() { 3554 // If ThreadPriorityPolicy is 1, switch tables 3555 if (ThreadPriorityPolicy == 1) { 3556 int i; 3557 for (i = 0; i < CriticalPriority + 1; i++) { 3558 os::java_to_os_priority[i] = prio_policy1[i]; 3559 } 3560 } 3561 if (UseCriticalJavaThreadPriority) { 3562 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3563 } 3564 return 0; 3565 } 3566 3567 OSReturn os::set_native_priority(Thread* thread, int priority) { 3568 if (!UseThreadPriorities) return OS_OK; 3569 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3570 return ret ? OS_OK : OS_ERR; 3571 } 3572 3573 OSReturn os::get_native_priority(const Thread* const thread, 3574 int* priority_ptr) { 3575 if (!UseThreadPriorities) { 3576 *priority_ptr = java_to_os_priority[NormPriority]; 3577 return OS_OK; 3578 } 3579 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3580 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3581 assert(false, "GetThreadPriority failed"); 3582 return OS_ERR; 3583 } 3584 *priority_ptr = os_prio; 3585 return OS_OK; 3586 } 3587 3588 3589 // Hint to the underlying OS that a task switch would not be good. 3590 // Void return because it's a hint and can fail. 3591 void os::hint_no_preempt() {} 3592 3593 void os::interrupt(Thread* thread) { 3594 assert(!thread->is_Java_thread() || Thread::current() == thread || 3595 Threads_lock->owned_by_self(), 3596 "possibility of dangling Thread pointer"); 3597 3598 OSThread* osthread = thread->osthread(); 3599 osthread->set_interrupted(true); 3600 // More than one thread can get here with the same value of osthread, 3601 // resulting in multiple notifications. We do, however, want the store 3602 // to interrupted() to be visible to other threads before we post 3603 // the interrupt event. 3604 OrderAccess::release(); 3605 SetEvent(osthread->interrupt_event()); 3606 // For JSR166: unpark after setting status 3607 if (thread->is_Java_thread()) { 3608 ((JavaThread*)thread)->parker()->unpark(); 3609 } 3610 3611 ParkEvent * ev = thread->_ParkEvent; 3612 if (ev != NULL) ev->unpark(); 3613 } 3614 3615 3616 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3617 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3618 "possibility of dangling Thread pointer"); 3619 3620 OSThread* osthread = thread->osthread(); 3621 // There is no synchronization between the setting of the interrupt 3622 // and it being cleared here. It is critical - see 6535709 - that 3623 // we only clear the interrupt state, and reset the interrupt event, 3624 // if we are going to report that we were indeed interrupted - else 3625 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3626 // depending on the timing. By checking thread interrupt event to see 3627 // if the thread gets real interrupt thus prevent spurious wakeup. 3628 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3629 if (interrupted && clear_interrupted) { 3630 osthread->set_interrupted(false); 3631 ResetEvent(osthread->interrupt_event()); 3632 } // Otherwise leave the interrupted state alone 3633 3634 return interrupted; 3635 } 3636 3637 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3638 ExtendedPC os::get_thread_pc(Thread* thread) { 3639 CONTEXT context; 3640 context.ContextFlags = CONTEXT_CONTROL; 3641 HANDLE handle = thread->osthread()->thread_handle(); 3642 #ifdef _M_IA64 3643 assert(0, "Fix get_thread_pc"); 3644 return ExtendedPC(NULL); 3645 #else 3646 if (GetThreadContext(handle, &context)) { 3647 #ifdef _M_AMD64 3648 return ExtendedPC((address) context.Rip); 3649 #else 3650 return ExtendedPC((address) context.Eip); 3651 #endif 3652 } else { 3653 return ExtendedPC(NULL); 3654 } 3655 #endif 3656 } 3657 3658 // GetCurrentThreadId() returns DWORD 3659 intx os::current_thread_id() { return GetCurrentThreadId(); } 3660 3661 static int _initial_pid = 0; 3662 3663 int os::current_process_id() { 3664 return (_initial_pid ? _initial_pid : _getpid()); 3665 } 3666 3667 int os::win32::_vm_page_size = 0; 3668 int os::win32::_vm_allocation_granularity = 0; 3669 int os::win32::_processor_type = 0; 3670 // Processor level is not available on non-NT systems, use vm_version instead 3671 int os::win32::_processor_level = 0; 3672 julong os::win32::_physical_memory = 0; 3673 size_t os::win32::_default_stack_size = 0; 3674 3675 intx os::win32::_os_thread_limit = 0; 3676 volatile intx os::win32::_os_thread_count = 0; 3677 3678 bool os::win32::_is_nt = false; 3679 bool os::win32::_is_windows_2003 = false; 3680 bool os::win32::_is_windows_server = false; 3681 3682 // 6573254 3683 // Currently, the bug is observed across all the supported Windows releases, 3684 // including the latest one (as of this writing - Windows Server 2012 R2) 3685 bool os::win32::_has_exit_bug = true; 3686 bool os::win32::_has_performance_count = 0; 3687 3688 void os::win32::initialize_system_info() { 3689 SYSTEM_INFO si; 3690 GetSystemInfo(&si); 3691 _vm_page_size = si.dwPageSize; 3692 _vm_allocation_granularity = si.dwAllocationGranularity; 3693 _processor_type = si.dwProcessorType; 3694 _processor_level = si.wProcessorLevel; 3695 set_processor_count(si.dwNumberOfProcessors); 3696 3697 MEMORYSTATUSEX ms; 3698 ms.dwLength = sizeof(ms); 3699 3700 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3701 // dwMemoryLoad (% of memory in use) 3702 GlobalMemoryStatusEx(&ms); 3703 _physical_memory = ms.ullTotalPhys; 3704 3705 OSVERSIONINFOEX oi; 3706 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3707 GetVersionEx((OSVERSIONINFO*)&oi); 3708 switch (oi.dwPlatformId) { 3709 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3710 case VER_PLATFORM_WIN32_NT: 3711 _is_nt = true; 3712 { 3713 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3714 if (os_vers == 5002) { 3715 _is_windows_2003 = true; 3716 } 3717 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3718 oi.wProductType == VER_NT_SERVER) { 3719 _is_windows_server = true; 3720 } 3721 } 3722 break; 3723 default: fatal("Unknown platform"); 3724 } 3725 3726 _default_stack_size = os::current_stack_size(); 3727 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3728 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3729 "stack size not a multiple of page size"); 3730 3731 initialize_performance_counter(); 3732 3733 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3734 // known to deadlock the system, if the VM issues to thread operations with 3735 // a too high frequency, e.g., such as changing the priorities. 3736 // The 6000 seems to work well - no deadlocks has been notices on the test 3737 // programs that we have seen experience this problem. 3738 if (!os::win32::is_nt()) { 3739 StarvationMonitorInterval = 6000; 3740 } 3741 } 3742 3743 3744 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3745 int ebuflen) { 3746 char path[MAX_PATH]; 3747 DWORD size; 3748 DWORD pathLen = (DWORD)sizeof(path); 3749 HINSTANCE result = NULL; 3750 3751 // only allow library name without path component 3752 assert(strchr(name, '\\') == NULL, "path not allowed"); 3753 assert(strchr(name, ':') == NULL, "path not allowed"); 3754 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3755 jio_snprintf(ebuf, ebuflen, 3756 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3757 return NULL; 3758 } 3759 3760 // search system directory 3761 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3762 if (size >= pathLen) { 3763 return NULL; // truncated 3764 } 3765 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3766 return NULL; // truncated 3767 } 3768 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3769 return result; 3770 } 3771 } 3772 3773 // try Windows directory 3774 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3775 if (size >= pathLen) { 3776 return NULL; // truncated 3777 } 3778 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3779 return NULL; // truncated 3780 } 3781 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3782 return result; 3783 } 3784 } 3785 3786 jio_snprintf(ebuf, ebuflen, 3787 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3788 return NULL; 3789 } 3790 3791 #define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */ 3792 3793 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3794 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3795 return TRUE; 3796 } 3797 3798 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3799 // Basic approach: 3800 // - Each exiting thread registers its intent to exit and then does so. 3801 // - A thread trying to terminate the process must wait for all 3802 // threads currently exiting to complete their exit. 3803 3804 if (os::win32::has_exit_bug()) { 3805 // The array holds handles of the threads that have started exiting by calling 3806 // _endthreadex(). 3807 // Should be large enough to avoid blocking the exiting thread due to lack of 3808 // a free slot. 3809 static HANDLE handles[MAXIMUM_WAIT_OBJECTS]; 3810 static int handle_count = 0; 3811 3812 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3813 static CRITICAL_SECTION crit_sect; 3814 static volatile jint process_exiting = 0; 3815 int i, j; 3816 DWORD res; 3817 HANDLE hproc, hthr; 3818 3819 // The first thread that reached this point, initializes the critical section. 3820 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3821 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3822 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3823 EnterCriticalSection(&crit_sect); 3824 3825 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3826 // Remove from the array those handles of the threads that have completed exiting. 3827 for (i = 0, j = 0; i < handle_count; ++i) { 3828 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3829 if (res == WAIT_TIMEOUT) { 3830 handles[j++] = handles[i]; 3831 } else { 3832 if (res == WAIT_FAILED) { 3833 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3834 GetLastError(), __FILE__, __LINE__); 3835 } 3836 // Don't keep the handle, if we failed waiting for it. 3837 CloseHandle(handles[i]); 3838 } 3839 } 3840 3841 // If there's no free slot in the array of the kept handles, we'll have to 3842 // wait until at least one thread completes exiting. 3843 if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) { 3844 // Raise the priority of the oldest exiting thread to increase its chances 3845 // to complete sooner. 3846 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3847 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3848 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3849 i = (res - WAIT_OBJECT_0); 3850 handle_count = MAXIMUM_WAIT_OBJECTS - 1; 3851 for (; i < handle_count; ++i) { 3852 handles[i] = handles[i + 1]; 3853 } 3854 } else { 3855 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3856 (res == WAIT_FAILED ? "failed" : "timed out"), 3857 GetLastError(), __FILE__, __LINE__); 3858 // Don't keep handles, if we failed waiting for them. 3859 for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) { 3860 CloseHandle(handles[i]); 3861 } 3862 handle_count = 0; 3863 } 3864 } 3865 3866 // Store a duplicate of the current thread handle in the array of handles. 3867 hproc = GetCurrentProcess(); 3868 hthr = GetCurrentThread(); 3869 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3870 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3871 warning("DuplicateHandle failed (%u) in %s: %d\n", 3872 GetLastError(), __FILE__, __LINE__); 3873 } else { 3874 ++handle_count; 3875 } 3876 3877 // The current exiting thread has stored its handle in the array, and now 3878 // should leave the critical section before calling _endthreadex(). 3879 3880 } else if (what != EPT_THREAD) { 3881 if (handle_count > 0) { 3882 // Before ending the process, make sure all the threads that had called 3883 // _endthreadex() completed. 3884 3885 // Set the priority level of the current thread to the same value as 3886 // the priority level of exiting threads. 3887 // This is to ensure it will be given a fair chance to execute if 3888 // the timeout expires. 3889 hthr = GetCurrentThread(); 3890 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3891 for (i = 0; i < handle_count; ++i) { 3892 SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL); 3893 } 3894 res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT); 3895 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3896 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3897 (res == WAIT_FAILED ? "failed" : "timed out"), 3898 GetLastError(), __FILE__, __LINE__); 3899 } 3900 for (i = 0; i < handle_count; ++i) { 3901 CloseHandle(handles[i]); 3902 } 3903 handle_count = 0; 3904 } 3905 3906 OrderAccess::release_store(&process_exiting, 1); 3907 } 3908 3909 LeaveCriticalSection(&crit_sect); 3910 } 3911 3912 if (what == EPT_THREAD) { 3913 while (OrderAccess::load_acquire(&process_exiting) != 0) { 3914 // Some other thread is about to call exit(), so we 3915 // don't let the current thread proceed to _endthreadex() 3916 SuspendThread(GetCurrentThread()); 3917 // Avoid busy-wait loop, if SuspendThread() failed. 3918 Sleep(EXIT_TIMEOUT); 3919 } 3920 } 3921 } 3922 3923 // We are here if either 3924 // - there's no 'race at exit' bug on this OS release; 3925 // - initialization of the critical section failed (unlikely); 3926 // - the current thread has stored its handle and left the critical section; 3927 // - the process-exiting thread has raised the flag and left the critical section. 3928 if (what == EPT_THREAD) { 3929 _endthreadex((unsigned)exit_code); 3930 } else if (what == EPT_PROCESS) { 3931 ::exit(exit_code); 3932 } else { 3933 _exit(exit_code); 3934 } 3935 3936 // Should not reach here 3937 return exit_code; 3938 } 3939 3940 #undef EXIT_TIMEOUT 3941 3942 void os::win32::setmode_streams() { 3943 _setmode(_fileno(stdin), _O_BINARY); 3944 _setmode(_fileno(stdout), _O_BINARY); 3945 _setmode(_fileno(stderr), _O_BINARY); 3946 } 3947 3948 3949 bool os::is_debugger_attached() { 3950 return IsDebuggerPresent() ? true : false; 3951 } 3952 3953 3954 void os::wait_for_keypress_at_exit(void) { 3955 if (PauseAtExit) { 3956 fprintf(stderr, "Press any key to continue...\n"); 3957 fgetc(stdin); 3958 } 3959 } 3960 3961 3962 int os::message_box(const char* title, const char* message) { 3963 int result = MessageBox(NULL, message, title, 3964 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3965 return result == IDYES; 3966 } 3967 3968 int os::allocate_thread_local_storage() { 3969 return TlsAlloc(); 3970 } 3971 3972 3973 void os::free_thread_local_storage(int index) { 3974 TlsFree(index); 3975 } 3976 3977 3978 void os::thread_local_storage_at_put(int index, void* value) { 3979 TlsSetValue(index, value); 3980 assert(thread_local_storage_at(index) == value, "Just checking"); 3981 } 3982 3983 3984 void* os::thread_local_storage_at(int index) { 3985 return TlsGetValue(index); 3986 } 3987 3988 3989 #ifndef PRODUCT 3990 #ifndef _WIN64 3991 // Helpers to check whether NX protection is enabled 3992 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3993 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3994 pex->ExceptionRecord->NumberParameters > 0 && 3995 pex->ExceptionRecord->ExceptionInformation[0] == 3996 EXCEPTION_INFO_EXEC_VIOLATION) { 3997 return EXCEPTION_EXECUTE_HANDLER; 3998 } 3999 return EXCEPTION_CONTINUE_SEARCH; 4000 } 4001 4002 void nx_check_protection() { 4003 // If NX is enabled we'll get an exception calling into code on the stack 4004 char code[] = { (char)0xC3 }; // ret 4005 void *code_ptr = (void *)code; 4006 __try { 4007 __asm call code_ptr 4008 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4009 tty->print_raw_cr("NX protection detected."); 4010 } 4011 } 4012 #endif // _WIN64 4013 #endif // PRODUCT 4014 4015 // this is called _before_ the global arguments have been parsed 4016 void os::init(void) { 4017 _initial_pid = _getpid(); 4018 4019 init_random(1234567); 4020 4021 win32::initialize_system_info(); 4022 win32::setmode_streams(); 4023 init_page_sizes((size_t) win32::vm_page_size()); 4024 4025 // This may be overridden later when argument processing is done. 4026 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 4027 os::win32::is_windows_2003()); 4028 4029 // Initialize main_process and main_thread 4030 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4031 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4032 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4033 fatal("DuplicateHandle failed\n"); 4034 } 4035 main_thread_id = (int) GetCurrentThreadId(); 4036 } 4037 4038 // To install functions for atexit processing 4039 extern "C" { 4040 static void perfMemory_exit_helper() { 4041 perfMemory_exit(); 4042 } 4043 } 4044 4045 static jint initSock(); 4046 4047 // this is called _after_ the global arguments have been parsed 4048 jint os::init_2(void) { 4049 // Allocate a single page and mark it as readable for safepoint polling 4050 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4051 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4052 4053 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4054 guarantee(return_page != NULL, "Commit Failed for polling page"); 4055 4056 os::set_polling_page(polling_page); 4057 4058 #ifndef PRODUCT 4059 if (Verbose && PrintMiscellaneous) { 4060 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4061 (intptr_t)polling_page); 4062 } 4063 #endif 4064 4065 if (!UseMembar) { 4066 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4067 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4068 4069 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4070 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4071 4072 os::set_memory_serialize_page(mem_serialize_page); 4073 4074 #ifndef PRODUCT 4075 if (Verbose && PrintMiscellaneous) { 4076 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4077 (intptr_t)mem_serialize_page); 4078 } 4079 #endif 4080 } 4081 4082 // Setup Windows Exceptions 4083 4084 // for debugging float code generation bugs 4085 if (ForceFloatExceptions) { 4086 #ifndef _WIN64 4087 static long fp_control_word = 0; 4088 __asm { fstcw fp_control_word } 4089 // see Intel PPro Manual, Vol. 2, p 7-16 4090 const long precision = 0x20; 4091 const long underflow = 0x10; 4092 const long overflow = 0x08; 4093 const long zero_div = 0x04; 4094 const long denorm = 0x02; 4095 const long invalid = 0x01; 4096 fp_control_word |= invalid; 4097 __asm { fldcw fp_control_word } 4098 #endif 4099 } 4100 4101 // If stack_commit_size is 0, windows will reserve the default size, 4102 // but only commit a small portion of it. 4103 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4104 size_t default_reserve_size = os::win32::default_stack_size(); 4105 size_t actual_reserve_size = stack_commit_size; 4106 if (stack_commit_size < default_reserve_size) { 4107 // If stack_commit_size == 0, we want this too 4108 actual_reserve_size = default_reserve_size; 4109 } 4110 4111 // Check minimum allowable stack size for thread creation and to initialize 4112 // the java system classes, including StackOverflowError - depends on page 4113 // size. Add a page for compiler2 recursion in main thread. 4114 // Add in 2*BytesPerWord times page size to account for VM stack during 4115 // class initialization depending on 32 or 64 bit VM. 4116 size_t min_stack_allowed = 4117 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4118 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4119 if (actual_reserve_size < min_stack_allowed) { 4120 tty->print_cr("\nThe stack size specified is too small, " 4121 "Specify at least %dk", 4122 min_stack_allowed / K); 4123 return JNI_ERR; 4124 } 4125 4126 JavaThread::set_stack_size_at_create(stack_commit_size); 4127 4128 // Calculate theoretical max. size of Threads to guard gainst artifical 4129 // out-of-memory situations, where all available address-space has been 4130 // reserved by thread stacks. 4131 assert(actual_reserve_size != 0, "Must have a stack"); 4132 4133 // Calculate the thread limit when we should start doing Virtual Memory 4134 // banging. Currently when the threads will have used all but 200Mb of space. 4135 // 4136 // TODO: consider performing a similar calculation for commit size instead 4137 // as reserve size, since on a 64-bit platform we'll run into that more 4138 // often than running out of virtual memory space. We can use the 4139 // lower value of the two calculations as the os_thread_limit. 4140 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4141 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4142 4143 // at exit methods are called in the reverse order of their registration. 4144 // there is no limit to the number of functions registered. atexit does 4145 // not set errno. 4146 4147 if (PerfAllowAtExitRegistration) { 4148 // only register atexit functions if PerfAllowAtExitRegistration is set. 4149 // atexit functions can be delayed until process exit time, which 4150 // can be problematic for embedded VM situations. Embedded VMs should 4151 // call DestroyJavaVM() to assure that VM resources are released. 4152 4153 // note: perfMemory_exit_helper atexit function may be removed in 4154 // the future if the appropriate cleanup code can be added to the 4155 // VM_Exit VMOperation's doit method. 4156 if (atexit(perfMemory_exit_helper) != 0) { 4157 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4158 } 4159 } 4160 4161 #ifndef _WIN64 4162 // Print something if NX is enabled (win32 on AMD64) 4163 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4164 #endif 4165 4166 // initialize thread priority policy 4167 prio_init(); 4168 4169 if (UseNUMA && !ForceNUMA) { 4170 UseNUMA = false; // We don't fully support this yet 4171 } 4172 4173 if (UseNUMAInterleaving) { 4174 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4175 bool success = numa_interleaving_init(); 4176 if (!success) UseNUMAInterleaving = false; 4177 } 4178 4179 if (initSock() != JNI_OK) { 4180 return JNI_ERR; 4181 } 4182 4183 return JNI_OK; 4184 } 4185 4186 // Mark the polling page as unreadable 4187 void os::make_polling_page_unreadable(void) { 4188 DWORD old_status; 4189 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4190 PAGE_NOACCESS, &old_status)) { 4191 fatal("Could not disable polling page"); 4192 } 4193 } 4194 4195 // Mark the polling page as readable 4196 void os::make_polling_page_readable(void) { 4197 DWORD old_status; 4198 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4199 PAGE_READONLY, &old_status)) { 4200 fatal("Could not enable polling page"); 4201 } 4202 } 4203 4204 4205 int os::stat(const char *path, struct stat *sbuf) { 4206 char pathbuf[MAX_PATH]; 4207 if (strlen(path) > MAX_PATH - 1) { 4208 errno = ENAMETOOLONG; 4209 return -1; 4210 } 4211 os::native_path(strcpy(pathbuf, path)); 4212 int ret = ::stat(pathbuf, sbuf); 4213 if (sbuf != NULL && UseUTCFileTimestamp) { 4214 // Fix for 6539723. st_mtime returned from stat() is dependent on 4215 // the system timezone and so can return different values for the 4216 // same file if/when daylight savings time changes. This adjustment 4217 // makes sure the same timestamp is returned regardless of the TZ. 4218 // 4219 // See: 4220 // http://msdn.microsoft.com/library/ 4221 // default.asp?url=/library/en-us/sysinfo/base/ 4222 // time_zone_information_str.asp 4223 // and 4224 // http://msdn.microsoft.com/library/default.asp?url= 4225 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4226 // 4227 // NOTE: there is a insidious bug here: If the timezone is changed 4228 // after the call to stat() but before 'GetTimeZoneInformation()', then 4229 // the adjustment we do here will be wrong and we'll return the wrong 4230 // value (which will likely end up creating an invalid class data 4231 // archive). Absent a better API for this, or some time zone locking 4232 // mechanism, we'll have to live with this risk. 4233 TIME_ZONE_INFORMATION tz; 4234 DWORD tzid = GetTimeZoneInformation(&tz); 4235 int daylightBias = 4236 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4237 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4238 } 4239 return ret; 4240 } 4241 4242 4243 #define FT2INT64(ft) \ 4244 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4245 4246 4247 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4248 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4249 // of a thread. 4250 // 4251 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4252 // the fast estimate available on the platform. 4253 4254 // current_thread_cpu_time() is not optimized for Windows yet 4255 jlong os::current_thread_cpu_time() { 4256 // return user + sys since the cost is the same 4257 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4258 } 4259 4260 jlong os::thread_cpu_time(Thread* thread) { 4261 // consistent with what current_thread_cpu_time() returns. 4262 return os::thread_cpu_time(thread, true /* user+sys */); 4263 } 4264 4265 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4266 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4267 } 4268 4269 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4270 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4271 // If this function changes, os::is_thread_cpu_time_supported() should too 4272 if (os::win32::is_nt()) { 4273 FILETIME CreationTime; 4274 FILETIME ExitTime; 4275 FILETIME KernelTime; 4276 FILETIME UserTime; 4277 4278 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4279 &ExitTime, &KernelTime, &UserTime) == 0) { 4280 return -1; 4281 } else if (user_sys_cpu_time) { 4282 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4283 } else { 4284 return FT2INT64(UserTime) * 100; 4285 } 4286 } else { 4287 return (jlong) timeGetTime() * 1000000; 4288 } 4289 } 4290 4291 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4292 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4293 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4294 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4295 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4296 } 4297 4298 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4299 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4300 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4301 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4302 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4303 } 4304 4305 bool os::is_thread_cpu_time_supported() { 4306 // see os::thread_cpu_time 4307 if (os::win32::is_nt()) { 4308 FILETIME CreationTime; 4309 FILETIME ExitTime; 4310 FILETIME KernelTime; 4311 FILETIME UserTime; 4312 4313 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4314 &KernelTime, &UserTime) == 0) { 4315 return false; 4316 } else { 4317 return true; 4318 } 4319 } else { 4320 return false; 4321 } 4322 } 4323 4324 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4325 // It does have primitives (PDH API) to get CPU usage and run queue length. 4326 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4327 // If we wanted to implement loadavg on Windows, we have a few options: 4328 // 4329 // a) Query CPU usage and run queue length and "fake" an answer by 4330 // returning the CPU usage if it's under 100%, and the run queue 4331 // length otherwise. It turns out that querying is pretty slow 4332 // on Windows, on the order of 200 microseconds on a fast machine. 4333 // Note that on the Windows the CPU usage value is the % usage 4334 // since the last time the API was called (and the first call 4335 // returns 100%), so we'd have to deal with that as well. 4336 // 4337 // b) Sample the "fake" answer using a sampling thread and store 4338 // the answer in a global variable. The call to loadavg would 4339 // just return the value of the global, avoiding the slow query. 4340 // 4341 // c) Sample a better answer using exponential decay to smooth the 4342 // value. This is basically the algorithm used by UNIX kernels. 4343 // 4344 // Note that sampling thread starvation could affect both (b) and (c). 4345 int os::loadavg(double loadavg[], int nelem) { 4346 return -1; 4347 } 4348 4349 4350 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4351 bool os::dont_yield() { 4352 return DontYieldALot; 4353 } 4354 4355 // This method is a slightly reworked copy of JDK's sysOpen 4356 // from src/windows/hpi/src/sys_api_md.c 4357 4358 int os::open(const char *path, int oflag, int mode) { 4359 char pathbuf[MAX_PATH]; 4360 4361 if (strlen(path) > MAX_PATH - 1) { 4362 errno = ENAMETOOLONG; 4363 return -1; 4364 } 4365 os::native_path(strcpy(pathbuf, path)); 4366 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4367 } 4368 4369 FILE* os::open(int fd, const char* mode) { 4370 return ::_fdopen(fd, mode); 4371 } 4372 4373 // Is a (classpath) directory empty? 4374 bool os::dir_is_empty(const char* path) { 4375 WIN32_FIND_DATA fd; 4376 HANDLE f = FindFirstFile(path, &fd); 4377 if (f == INVALID_HANDLE_VALUE) { 4378 return true; 4379 } 4380 FindClose(f); 4381 return false; 4382 } 4383 4384 // create binary file, rewriting existing file if required 4385 int os::create_binary_file(const char* path, bool rewrite_existing) { 4386 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4387 if (!rewrite_existing) { 4388 oflags |= _O_EXCL; 4389 } 4390 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4391 } 4392 4393 // return current position of file pointer 4394 jlong os::current_file_offset(int fd) { 4395 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4396 } 4397 4398 // move file pointer to the specified offset 4399 jlong os::seek_to_file_offset(int fd, jlong offset) { 4400 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4401 } 4402 4403 4404 jlong os::lseek(int fd, jlong offset, int whence) { 4405 return (jlong) ::_lseeki64(fd, offset, whence); 4406 } 4407 4408 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4409 OVERLAPPED ov; 4410 DWORD nread; 4411 BOOL result; 4412 4413 ZeroMemory(&ov, sizeof(ov)); 4414 ov.Offset = (DWORD)offset; 4415 ov.OffsetHigh = (DWORD)(offset >> 32); 4416 4417 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4418 4419 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4420 4421 return result ? nread : 0; 4422 } 4423 4424 4425 // This method is a slightly reworked copy of JDK's sysNativePath 4426 // from src/windows/hpi/src/path_md.c 4427 4428 // Convert a pathname to native format. On win32, this involves forcing all 4429 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4430 // sometimes rejects '/') and removing redundant separators. The input path is 4431 // assumed to have been converted into the character encoding used by the local 4432 // system. Because this might be a double-byte encoding, care is taken to 4433 // treat double-byte lead characters correctly. 4434 // 4435 // This procedure modifies the given path in place, as the result is never 4436 // longer than the original. There is no error return; this operation always 4437 // succeeds. 4438 char * os::native_path(char *path) { 4439 char *src = path, *dst = path, *end = path; 4440 char *colon = NULL; // If a drive specifier is found, this will 4441 // point to the colon following the drive letter 4442 4443 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4444 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4445 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4446 4447 // Check for leading separators 4448 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4449 while (isfilesep(*src)) { 4450 src++; 4451 } 4452 4453 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4454 // Remove leading separators if followed by drive specifier. This 4455 // hack is necessary to support file URLs containing drive 4456 // specifiers (e.g., "file://c:/path"). As a side effect, 4457 // "/c:/path" can be used as an alternative to "c:/path". 4458 *dst++ = *src++; 4459 colon = dst; 4460 *dst++ = ':'; 4461 src++; 4462 } else { 4463 src = path; 4464 if (isfilesep(src[0]) && isfilesep(src[1])) { 4465 // UNC pathname: Retain first separator; leave src pointed at 4466 // second separator so that further separators will be collapsed 4467 // into the second separator. The result will be a pathname 4468 // beginning with "\\\\" followed (most likely) by a host name. 4469 src = dst = path + 1; 4470 path[0] = '\\'; // Force first separator to '\\' 4471 } 4472 } 4473 4474 end = dst; 4475 4476 // Remove redundant separators from remainder of path, forcing all 4477 // separators to be '\\' rather than '/'. Also, single byte space 4478 // characters are removed from the end of the path because those 4479 // are not legal ending characters on this operating system. 4480 // 4481 while (*src != '\0') { 4482 if (isfilesep(*src)) { 4483 *dst++ = '\\'; src++; 4484 while (isfilesep(*src)) src++; 4485 if (*src == '\0') { 4486 // Check for trailing separator 4487 end = dst; 4488 if (colon == dst - 2) break; // "z:\\" 4489 if (dst == path + 1) break; // "\\" 4490 if (dst == path + 2 && isfilesep(path[0])) { 4491 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4492 // beginning of a UNC pathname. Even though it is not, by 4493 // itself, a valid UNC pathname, we leave it as is in order 4494 // to be consistent with the path canonicalizer as well 4495 // as the win32 APIs, which treat this case as an invalid 4496 // UNC pathname rather than as an alias for the root 4497 // directory of the current drive. 4498 break; 4499 } 4500 end = --dst; // Path does not denote a root directory, so 4501 // remove trailing separator 4502 break; 4503 } 4504 end = dst; 4505 } else { 4506 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4507 *dst++ = *src++; 4508 if (*src) *dst++ = *src++; 4509 end = dst; 4510 } else { // Copy a single-byte character 4511 char c = *src++; 4512 *dst++ = c; 4513 // Space is not a legal ending character 4514 if (c != ' ') end = dst; 4515 } 4516 } 4517 } 4518 4519 *end = '\0'; 4520 4521 // For "z:", add "." to work around a bug in the C runtime library 4522 if (colon == dst - 1) { 4523 path[2] = '.'; 4524 path[3] = '\0'; 4525 } 4526 4527 return path; 4528 } 4529 4530 // This code is a copy of JDK's sysSetLength 4531 // from src/windows/hpi/src/sys_api_md.c 4532 4533 int os::ftruncate(int fd, jlong length) { 4534 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4535 long high = (long)(length >> 32); 4536 DWORD ret; 4537 4538 if (h == (HANDLE)(-1)) { 4539 return -1; 4540 } 4541 4542 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4543 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4544 return -1; 4545 } 4546 4547 if (::SetEndOfFile(h) == FALSE) { 4548 return -1; 4549 } 4550 4551 return 0; 4552 } 4553 4554 4555 // This code is a copy of JDK's sysSync 4556 // from src/windows/hpi/src/sys_api_md.c 4557 // except for the legacy workaround for a bug in Win 98 4558 4559 int os::fsync(int fd) { 4560 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4561 4562 if ((!::FlushFileBuffers(handle)) && 4563 (GetLastError() != ERROR_ACCESS_DENIED)) { 4564 // from winerror.h 4565 return -1; 4566 } 4567 return 0; 4568 } 4569 4570 static int nonSeekAvailable(int, long *); 4571 static int stdinAvailable(int, long *); 4572 4573 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4574 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4575 4576 // This code is a copy of JDK's sysAvailable 4577 // from src/windows/hpi/src/sys_api_md.c 4578 4579 int os::available(int fd, jlong *bytes) { 4580 jlong cur, end; 4581 struct _stati64 stbuf64; 4582 4583 if (::_fstati64(fd, &stbuf64) >= 0) { 4584 int mode = stbuf64.st_mode; 4585 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4586 int ret; 4587 long lpbytes; 4588 if (fd == 0) { 4589 ret = stdinAvailable(fd, &lpbytes); 4590 } else { 4591 ret = nonSeekAvailable(fd, &lpbytes); 4592 } 4593 (*bytes) = (jlong)(lpbytes); 4594 return ret; 4595 } 4596 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4597 return FALSE; 4598 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4599 return FALSE; 4600 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4601 return FALSE; 4602 } 4603 *bytes = end - cur; 4604 return TRUE; 4605 } else { 4606 return FALSE; 4607 } 4608 } 4609 4610 // This code is a copy of JDK's nonSeekAvailable 4611 // from src/windows/hpi/src/sys_api_md.c 4612 4613 static int nonSeekAvailable(int fd, long *pbytes) { 4614 // This is used for available on non-seekable devices 4615 // (like both named and anonymous pipes, such as pipes 4616 // connected to an exec'd process). 4617 // Standard Input is a special case. 4618 HANDLE han; 4619 4620 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4621 return FALSE; 4622 } 4623 4624 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4625 // PeekNamedPipe fails when at EOF. In that case we 4626 // simply make *pbytes = 0 which is consistent with the 4627 // behavior we get on Solaris when an fd is at EOF. 4628 // The only alternative is to raise an Exception, 4629 // which isn't really warranted. 4630 // 4631 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4632 return FALSE; 4633 } 4634 *pbytes = 0; 4635 } 4636 return TRUE; 4637 } 4638 4639 #define MAX_INPUT_EVENTS 2000 4640 4641 // This code is a copy of JDK's stdinAvailable 4642 // from src/windows/hpi/src/sys_api_md.c 4643 4644 static int stdinAvailable(int fd, long *pbytes) { 4645 HANDLE han; 4646 DWORD numEventsRead = 0; // Number of events read from buffer 4647 DWORD numEvents = 0; // Number of events in buffer 4648 DWORD i = 0; // Loop index 4649 DWORD curLength = 0; // Position marker 4650 DWORD actualLength = 0; // Number of bytes readable 4651 BOOL error = FALSE; // Error holder 4652 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4653 4654 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4655 return FALSE; 4656 } 4657 4658 // Construct an array of input records in the console buffer 4659 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4660 if (error == 0) { 4661 return nonSeekAvailable(fd, pbytes); 4662 } 4663 4664 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4665 if (numEvents > MAX_INPUT_EVENTS) { 4666 numEvents = MAX_INPUT_EVENTS; 4667 } 4668 4669 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4670 if (lpBuffer == NULL) { 4671 return FALSE; 4672 } 4673 4674 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4675 if (error == 0) { 4676 os::free(lpBuffer); 4677 return FALSE; 4678 } 4679 4680 // Examine input records for the number of bytes available 4681 for (i=0; i<numEvents; i++) { 4682 if (lpBuffer[i].EventType == KEY_EVENT) { 4683 4684 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4685 &(lpBuffer[i].Event); 4686 if (keyRecord->bKeyDown == TRUE) { 4687 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4688 curLength++; 4689 if (*keyPressed == '\r') { 4690 actualLength = curLength; 4691 } 4692 } 4693 } 4694 } 4695 4696 if (lpBuffer != NULL) { 4697 os::free(lpBuffer); 4698 } 4699 4700 *pbytes = (long) actualLength; 4701 return TRUE; 4702 } 4703 4704 // Map a block of memory. 4705 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4706 char *addr, size_t bytes, bool read_only, 4707 bool allow_exec) { 4708 HANDLE hFile; 4709 char* base; 4710 4711 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4712 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4713 if (hFile == NULL) { 4714 if (PrintMiscellaneous && Verbose) { 4715 DWORD err = GetLastError(); 4716 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4717 } 4718 return NULL; 4719 } 4720 4721 if (allow_exec) { 4722 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4723 // unless it comes from a PE image (which the shared archive is not.) 4724 // Even VirtualProtect refuses to give execute access to mapped memory 4725 // that was not previously executable. 4726 // 4727 // Instead, stick the executable region in anonymous memory. Yuck. 4728 // Penalty is that ~4 pages will not be shareable - in the future 4729 // we might consider DLLizing the shared archive with a proper PE 4730 // header so that mapping executable + sharing is possible. 4731 4732 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4733 PAGE_READWRITE); 4734 if (base == NULL) { 4735 if (PrintMiscellaneous && Verbose) { 4736 DWORD err = GetLastError(); 4737 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4738 } 4739 CloseHandle(hFile); 4740 return NULL; 4741 } 4742 4743 DWORD bytes_read; 4744 OVERLAPPED overlapped; 4745 overlapped.Offset = (DWORD)file_offset; 4746 overlapped.OffsetHigh = 0; 4747 overlapped.hEvent = NULL; 4748 // ReadFile guarantees that if the return value is true, the requested 4749 // number of bytes were read before returning. 4750 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4751 if (!res) { 4752 if (PrintMiscellaneous && Verbose) { 4753 DWORD err = GetLastError(); 4754 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4755 } 4756 release_memory(base, bytes); 4757 CloseHandle(hFile); 4758 return NULL; 4759 } 4760 } else { 4761 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4762 NULL /* file_name */); 4763 if (hMap == NULL) { 4764 if (PrintMiscellaneous && Verbose) { 4765 DWORD err = GetLastError(); 4766 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4767 } 4768 CloseHandle(hFile); 4769 return NULL; 4770 } 4771 4772 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4773 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4774 (DWORD)bytes, addr); 4775 if (base == NULL) { 4776 if (PrintMiscellaneous && Verbose) { 4777 DWORD err = GetLastError(); 4778 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4779 } 4780 CloseHandle(hMap); 4781 CloseHandle(hFile); 4782 return NULL; 4783 } 4784 4785 if (CloseHandle(hMap) == 0) { 4786 if (PrintMiscellaneous && Verbose) { 4787 DWORD err = GetLastError(); 4788 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4789 } 4790 CloseHandle(hFile); 4791 return base; 4792 } 4793 } 4794 4795 if (allow_exec) { 4796 DWORD old_protect; 4797 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4798 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4799 4800 if (!res) { 4801 if (PrintMiscellaneous && Verbose) { 4802 DWORD err = GetLastError(); 4803 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4804 } 4805 // Don't consider this a hard error, on IA32 even if the 4806 // VirtualProtect fails, we should still be able to execute 4807 CloseHandle(hFile); 4808 return base; 4809 } 4810 } 4811 4812 if (CloseHandle(hFile) == 0) { 4813 if (PrintMiscellaneous && Verbose) { 4814 DWORD err = GetLastError(); 4815 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4816 } 4817 return base; 4818 } 4819 4820 return base; 4821 } 4822 4823 4824 // Remap a block of memory. 4825 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4826 char *addr, size_t bytes, bool read_only, 4827 bool allow_exec) { 4828 // This OS does not allow existing memory maps to be remapped so we 4829 // have to unmap the memory before we remap it. 4830 if (!os::unmap_memory(addr, bytes)) { 4831 return NULL; 4832 } 4833 4834 // There is a very small theoretical window between the unmap_memory() 4835 // call above and the map_memory() call below where a thread in native 4836 // code may be able to access an address that is no longer mapped. 4837 4838 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4839 read_only, allow_exec); 4840 } 4841 4842 4843 // Unmap a block of memory. 4844 // Returns true=success, otherwise false. 4845 4846 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4847 BOOL result = UnmapViewOfFile(addr); 4848 if (result == 0) { 4849 if (PrintMiscellaneous && Verbose) { 4850 DWORD err = GetLastError(); 4851 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4852 } 4853 return false; 4854 } 4855 return true; 4856 } 4857 4858 void os::pause() { 4859 char filename[MAX_PATH]; 4860 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4861 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4862 } else { 4863 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4864 } 4865 4866 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4867 if (fd != -1) { 4868 struct stat buf; 4869 ::close(fd); 4870 while (::stat(filename, &buf) == 0) { 4871 Sleep(100); 4872 } 4873 } else { 4874 jio_fprintf(stderr, 4875 "Could not open pause file '%s', continuing immediately.\n", filename); 4876 } 4877 } 4878 4879 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4880 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4881 } 4882 4883 // See the caveats for this class in os_windows.hpp 4884 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4885 // into this method and returns false. If no OS EXCEPTION was raised, returns 4886 // true. 4887 // The callback is supposed to provide the method that should be protected. 4888 // 4889 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4890 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4891 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4892 "crash_protection already set?"); 4893 4894 bool success = true; 4895 __try { 4896 WatcherThread::watcher_thread()->set_crash_protection(this); 4897 cb.call(); 4898 } __except(EXCEPTION_EXECUTE_HANDLER) { 4899 // only for protection, nothing to do 4900 success = false; 4901 } 4902 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4903 return success; 4904 } 4905 4906 // An Event wraps a win32 "CreateEvent" kernel handle. 4907 // 4908 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4909 // 4910 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4911 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4912 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4913 // In addition, an unpark() operation might fetch the handle field, but the 4914 // event could recycle between the fetch and the SetEvent() operation. 4915 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4916 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4917 // on an stale but recycled handle would be harmless, but in practice this might 4918 // confuse other non-Sun code, so it's not a viable approach. 4919 // 4920 // 2: Once a win32 event handle is associated with an Event, it remains associated 4921 // with the Event. The event handle is never closed. This could be construed 4922 // as handle leakage, but only up to the maximum # of threads that have been extant 4923 // at any one time. This shouldn't be an issue, as windows platforms typically 4924 // permit a process to have hundreds of thousands of open handles. 4925 // 4926 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4927 // and release unused handles. 4928 // 4929 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4930 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4931 // 4932 // 5. Use an RCU-like mechanism (Read-Copy Update). 4933 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4934 // 4935 // We use (2). 4936 // 4937 // TODO-FIXME: 4938 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4939 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4940 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4941 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4942 // into a single win32 CreateEvent() handle. 4943 // 4944 // Assumption: 4945 // Only one parker can exist on an event, which is why we allocate 4946 // them per-thread. Multiple unparkers can coexist. 4947 // 4948 // _Event transitions in park() 4949 // -1 => -1 : illegal 4950 // 1 => 0 : pass - return immediately 4951 // 0 => -1 : block; then set _Event to 0 before returning 4952 // 4953 // _Event transitions in unpark() 4954 // 0 => 1 : just return 4955 // 1 => 1 : just return 4956 // -1 => either 0 or 1; must signal target thread 4957 // That is, we can safely transition _Event from -1 to either 4958 // 0 or 1. 4959 // 4960 // _Event serves as a restricted-range semaphore. 4961 // -1 : thread is blocked, i.e. there is a waiter 4962 // 0 : neutral: thread is running or ready, 4963 // could have been signaled after a wait started 4964 // 1 : signaled - thread is running or ready 4965 // 4966 // Another possible encoding of _Event would be with 4967 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4968 // 4969 4970 int os::PlatformEvent::park(jlong Millis) { 4971 // Transitions for _Event: 4972 // -1 => -1 : illegal 4973 // 1 => 0 : pass - return immediately 4974 // 0 => -1 : block; then set _Event to 0 before returning 4975 4976 guarantee(_ParkHandle != NULL , "Invariant"); 4977 guarantee(Millis > 0 , "Invariant"); 4978 4979 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4980 // the initial park() operation. 4981 // Consider: use atomic decrement instead of CAS-loop 4982 4983 int v; 4984 for (;;) { 4985 v = _Event; 4986 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4987 } 4988 guarantee((v == 0) || (v == 1), "invariant"); 4989 if (v != 0) return OS_OK; 4990 4991 // Do this the hard way by blocking ... 4992 // TODO: consider a brief spin here, gated on the success of recent 4993 // spin attempts by this thread. 4994 // 4995 // We decompose long timeouts into series of shorter timed waits. 4996 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4997 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4998 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4999 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5000 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5001 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5002 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5003 // for the already waited time. This policy does not admit any new outcomes. 5004 // In the future, however, we might want to track the accumulated wait time and 5005 // adjust Millis accordingly if we encounter a spurious wakeup. 5006 5007 const int MAXTIMEOUT = 0x10000000; 5008 DWORD rv = WAIT_TIMEOUT; 5009 while (_Event < 0 && Millis > 0) { 5010 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5011 if (Millis > MAXTIMEOUT) { 5012 prd = MAXTIMEOUT; 5013 } 5014 rv = ::WaitForSingleObject(_ParkHandle, prd); 5015 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5016 if (rv == WAIT_TIMEOUT) { 5017 Millis -= prd; 5018 } 5019 } 5020 v = _Event; 5021 _Event = 0; 5022 // see comment at end of os::PlatformEvent::park() below: 5023 OrderAccess::fence(); 5024 // If we encounter a nearly simultanous timeout expiry and unpark() 5025 // we return OS_OK indicating we awoke via unpark(). 5026 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5027 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5028 } 5029 5030 void os::PlatformEvent::park() { 5031 // Transitions for _Event: 5032 // -1 => -1 : illegal 5033 // 1 => 0 : pass - return immediately 5034 // 0 => -1 : block; then set _Event to 0 before returning 5035 5036 guarantee(_ParkHandle != NULL, "Invariant"); 5037 // Invariant: Only the thread associated with the Event/PlatformEvent 5038 // may call park(). 5039 // Consider: use atomic decrement instead of CAS-loop 5040 int v; 5041 for (;;) { 5042 v = _Event; 5043 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5044 } 5045 guarantee((v == 0) || (v == 1), "invariant"); 5046 if (v != 0) return; 5047 5048 // Do this the hard way by blocking ... 5049 // TODO: consider a brief spin here, gated on the success of recent 5050 // spin attempts by this thread. 5051 while (_Event < 0) { 5052 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5053 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5054 } 5055 5056 // Usually we'll find _Event == 0 at this point, but as 5057 // an optional optimization we clear it, just in case can 5058 // multiple unpark() operations drove _Event up to 1. 5059 _Event = 0; 5060 OrderAccess::fence(); 5061 guarantee(_Event >= 0, "invariant"); 5062 } 5063 5064 void os::PlatformEvent::unpark() { 5065 guarantee(_ParkHandle != NULL, "Invariant"); 5066 5067 // Transitions for _Event: 5068 // 0 => 1 : just return 5069 // 1 => 1 : just return 5070 // -1 => either 0 or 1; must signal target thread 5071 // That is, we can safely transition _Event from -1 to either 5072 // 0 or 1. 5073 // See also: "Semaphores in Plan 9" by Mullender & Cox 5074 // 5075 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5076 // that it will take two back-to-back park() calls for the owning 5077 // thread to block. This has the benefit of forcing a spurious return 5078 // from the first park() call after an unpark() call which will help 5079 // shake out uses of park() and unpark() without condition variables. 5080 5081 if (Atomic::xchg(1, &_Event) >= 0) return; 5082 5083 ::SetEvent(_ParkHandle); 5084 } 5085 5086 5087 // JSR166 5088 // ------------------------------------------------------- 5089 5090 // The Windows implementation of Park is very straightforward: Basic 5091 // operations on Win32 Events turn out to have the right semantics to 5092 // use them directly. We opportunistically resuse the event inherited 5093 // from Monitor. 5094 5095 void Parker::park(bool isAbsolute, jlong time) { 5096 guarantee(_ParkEvent != NULL, "invariant"); 5097 // First, demultiplex/decode time arguments 5098 if (time < 0) { // don't wait 5099 return; 5100 } else if (time == 0 && !isAbsolute) { 5101 time = INFINITE; 5102 } else if (isAbsolute) { 5103 time -= os::javaTimeMillis(); // convert to relative time 5104 if (time <= 0) { // already elapsed 5105 return; 5106 } 5107 } else { // relative 5108 time /= 1000000; // Must coarsen from nanos to millis 5109 if (time == 0) { // Wait for the minimal time unit if zero 5110 time = 1; 5111 } 5112 } 5113 5114 JavaThread* thread = (JavaThread*)(Thread::current()); 5115 assert(thread->is_Java_thread(), "Must be JavaThread"); 5116 JavaThread *jt = (JavaThread *)thread; 5117 5118 // Don't wait if interrupted or already triggered 5119 if (Thread::is_interrupted(thread, false) || 5120 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5121 ResetEvent(_ParkEvent); 5122 return; 5123 } else { 5124 ThreadBlockInVM tbivm(jt); 5125 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5126 jt->set_suspend_equivalent(); 5127 5128 WaitForSingleObject(_ParkEvent, time); 5129 ResetEvent(_ParkEvent); 5130 5131 // If externally suspended while waiting, re-suspend 5132 if (jt->handle_special_suspend_equivalent_condition()) { 5133 jt->java_suspend_self(); 5134 } 5135 } 5136 } 5137 5138 void Parker::unpark() { 5139 guarantee(_ParkEvent != NULL, "invariant"); 5140 SetEvent(_ParkEvent); 5141 } 5142 5143 // Run the specified command in a separate process. Return its exit value, 5144 // or -1 on failure (e.g. can't create a new process). 5145 int os::fork_and_exec(char* cmd) { 5146 STARTUPINFO si; 5147 PROCESS_INFORMATION pi; 5148 5149 memset(&si, 0, sizeof(si)); 5150 si.cb = sizeof(si); 5151 memset(&pi, 0, sizeof(pi)); 5152 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5153 cmd, // command line 5154 NULL, // process security attribute 5155 NULL, // thread security attribute 5156 TRUE, // inherits system handles 5157 0, // no creation flags 5158 NULL, // use parent's environment block 5159 NULL, // use parent's starting directory 5160 &si, // (in) startup information 5161 &pi); // (out) process information 5162 5163 if (rslt) { 5164 // Wait until child process exits. 5165 WaitForSingleObject(pi.hProcess, INFINITE); 5166 5167 DWORD exit_code; 5168 GetExitCodeProcess(pi.hProcess, &exit_code); 5169 5170 // Close process and thread handles. 5171 CloseHandle(pi.hProcess); 5172 CloseHandle(pi.hThread); 5173 5174 return (int)exit_code; 5175 } else { 5176 return -1; 5177 } 5178 } 5179 5180 //-------------------------------------------------------------------------------------------------- 5181 // Non-product code 5182 5183 static int mallocDebugIntervalCounter = 0; 5184 static int mallocDebugCounter = 0; 5185 bool os::check_heap(bool force) { 5186 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5187 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5188 // Note: HeapValidate executes two hardware breakpoints when it finds something 5189 // wrong; at these points, eax contains the address of the offending block (I think). 5190 // To get to the exlicit error message(s) below, just continue twice. 5191 HANDLE heap = GetProcessHeap(); 5192 5193 // If we fail to lock the heap, then gflags.exe has been used 5194 // or some other special heap flag has been set that prevents 5195 // locking. We don't try to walk a heap we can't lock. 5196 if (HeapLock(heap) != 0) { 5197 PROCESS_HEAP_ENTRY phe; 5198 phe.lpData = NULL; 5199 while (HeapWalk(heap, &phe) != 0) { 5200 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5201 !HeapValidate(heap, 0, phe.lpData)) { 5202 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5203 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5204 fatal("corrupted C heap"); 5205 } 5206 } 5207 DWORD err = GetLastError(); 5208 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5209 fatal(err_msg("heap walk aborted with error %d", err)); 5210 } 5211 HeapUnlock(heap); 5212 } 5213 mallocDebugIntervalCounter = 0; 5214 } 5215 return true; 5216 } 5217 5218 5219 bool os::find(address addr, outputStream* st) { 5220 // Nothing yet 5221 return false; 5222 } 5223 5224 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5225 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5226 5227 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5228 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5229 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5230 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5231 5232 if (os::is_memory_serialize_page(thread, addr)) { 5233 return EXCEPTION_CONTINUE_EXECUTION; 5234 } 5235 } 5236 5237 return EXCEPTION_CONTINUE_SEARCH; 5238 } 5239 5240 // We don't build a headless jre for Windows 5241 bool os::is_headless_jre() { return false; } 5242 5243 static jint initSock() { 5244 WSADATA wsadata; 5245 5246 if (!os::WinSock2Dll::WinSock2Available()) { 5247 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5248 ::GetLastError()); 5249 return JNI_ERR; 5250 } 5251 5252 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5253 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5254 ::GetLastError()); 5255 return JNI_ERR; 5256 } 5257 return JNI_OK; 5258 } 5259 5260 struct hostent* os::get_host_by_name(char* name) { 5261 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5262 } 5263 5264 int os::socket_close(int fd) { 5265 return ::closesocket(fd); 5266 } 5267 5268 int os::socket(int domain, int type, int protocol) { 5269 return ::socket(domain, type, protocol); 5270 } 5271 5272 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5273 return ::connect(fd, him, len); 5274 } 5275 5276 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5277 return ::recv(fd, buf, (int)nBytes, flags); 5278 } 5279 5280 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5281 return ::send(fd, buf, (int)nBytes, flags); 5282 } 5283 5284 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5285 return ::send(fd, buf, (int)nBytes, flags); 5286 } 5287 5288 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5289 #if defined(IA32) 5290 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5291 #elif defined (AMD64) 5292 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5293 #endif 5294 5295 // returns true if thread could be suspended, 5296 // false otherwise 5297 static bool do_suspend(HANDLE* h) { 5298 if (h != NULL) { 5299 if (SuspendThread(*h) != ~0) { 5300 return true; 5301 } 5302 } 5303 return false; 5304 } 5305 5306 // resume the thread 5307 // calling resume on an active thread is a no-op 5308 static void do_resume(HANDLE* h) { 5309 if (h != NULL) { 5310 ResumeThread(*h); 5311 } 5312 } 5313 5314 // retrieve a suspend/resume context capable handle 5315 // from the tid. Caller validates handle return value. 5316 void get_thread_handle_for_extended_context(HANDLE* h, 5317 OSThread::thread_id_t tid) { 5318 if (h != NULL) { 5319 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5320 } 5321 } 5322 5323 // Thread sampling implementation 5324 // 5325 void os::SuspendedThreadTask::internal_do_task() { 5326 CONTEXT ctxt; 5327 HANDLE h = NULL; 5328 5329 // get context capable handle for thread 5330 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5331 5332 // sanity 5333 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5334 return; 5335 } 5336 5337 // suspend the thread 5338 if (do_suspend(&h)) { 5339 ctxt.ContextFlags = sampling_context_flags; 5340 // get thread context 5341 GetThreadContext(h, &ctxt); 5342 SuspendedThreadTaskContext context(_thread, &ctxt); 5343 // pass context to Thread Sampling impl 5344 do_task(context); 5345 // resume thread 5346 do_resume(&h); 5347 } 5348 5349 // close handle 5350 CloseHandle(h); 5351 } 5352 5353 5354 // Kernel32 API 5355 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5356 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn)(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5357 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn)(PULONG); 5358 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn)(UCHAR, PULONGLONG); 5359 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5360 5361 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5362 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5363 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5364 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5365 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5366 5367 5368 BOOL os::Kernel32Dll::initialized = FALSE; 5369 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5370 assert(initialized && _GetLargePageMinimum != NULL, 5371 "GetLargePageMinimumAvailable() not yet called"); 5372 return _GetLargePageMinimum(); 5373 } 5374 5375 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5376 if (!initialized) { 5377 initialize(); 5378 } 5379 return _GetLargePageMinimum != NULL; 5380 } 5381 5382 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5383 if (!initialized) { 5384 initialize(); 5385 } 5386 return _VirtualAllocExNuma != NULL; 5387 } 5388 5389 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, 5390 SIZE_T bytes, DWORD flags, 5391 DWORD prot, DWORD node) { 5392 assert(initialized && _VirtualAllocExNuma != NULL, 5393 "NUMACallsAvailable() not yet called"); 5394 5395 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5396 } 5397 5398 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5399 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5400 "NUMACallsAvailable() not yet called"); 5401 5402 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5403 } 5404 5405 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, 5406 PULONGLONG proc_mask) { 5407 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5408 "NUMACallsAvailable() not yet called"); 5409 5410 return _GetNumaNodeProcessorMask(node, proc_mask); 5411 } 5412 5413 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5414 ULONG FrameToCapture, 5415 PVOID* BackTrace, 5416 PULONG BackTraceHash) { 5417 if (!initialized) { 5418 initialize(); 5419 } 5420 5421 if (_RtlCaptureStackBackTrace != NULL) { 5422 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5423 BackTrace, BackTraceHash); 5424 } else { 5425 return 0; 5426 } 5427 } 5428 5429 void os::Kernel32Dll::initializeCommon() { 5430 if (!initialized) { 5431 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5432 assert(handle != NULL, "Just check"); 5433 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5434 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5435 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5436 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5437 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5438 initialized = TRUE; 5439 } 5440 } 5441 5442 5443 5444 #ifndef JDK6_OR_EARLIER 5445 5446 void os::Kernel32Dll::initialize() { 5447 initializeCommon(); 5448 } 5449 5450 5451 // Kernel32 API 5452 inline BOOL os::Kernel32Dll::SwitchToThread() { 5453 return ::SwitchToThread(); 5454 } 5455 5456 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5457 return true; 5458 } 5459 5460 // Help tools 5461 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5462 return true; 5463 } 5464 5465 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5466 DWORD th32ProcessId) { 5467 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5468 } 5469 5470 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot, 5471 LPMODULEENTRY32 lpme) { 5472 return ::Module32First(hSnapshot, lpme); 5473 } 5474 5475 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5476 LPMODULEENTRY32 lpme) { 5477 return ::Module32Next(hSnapshot, lpme); 5478 } 5479 5480 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5481 ::GetNativeSystemInfo(lpSystemInfo); 5482 } 5483 5484 // PSAPI API 5485 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, 5486 HMODULE *lpModule, DWORD cb, 5487 LPDWORD lpcbNeeded) { 5488 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5489 } 5490 5491 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, 5492 HMODULE hModule, 5493 LPTSTR lpFilename, 5494 DWORD nSize) { 5495 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5496 } 5497 5498 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, 5499 HMODULE hModule, 5500 LPMODULEINFO lpmodinfo, 5501 DWORD cb) { 5502 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5503 } 5504 5505 inline BOOL os::PSApiDll::PSApiAvailable() { 5506 return true; 5507 } 5508 5509 5510 // WinSock2 API 5511 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, 5512 LPWSADATA lpWSAData) { 5513 return ::WSAStartup(wVersionRequested, lpWSAData); 5514 } 5515 5516 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5517 return ::gethostbyname(name); 5518 } 5519 5520 inline BOOL os::WinSock2Dll::WinSock2Available() { 5521 return true; 5522 } 5523 5524 // Advapi API 5525 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5526 BOOL DisableAllPrivileges, 5527 PTOKEN_PRIVILEGES NewState, 5528 DWORD BufferLength, 5529 PTOKEN_PRIVILEGES PreviousState, 5530 PDWORD ReturnLength) { 5531 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5532 BufferLength, PreviousState, ReturnLength); 5533 } 5534 5535 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5536 DWORD DesiredAccess, 5537 PHANDLE TokenHandle) { 5538 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5539 } 5540 5541 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5542 LPCTSTR lpName, 5543 PLUID lpLuid) { 5544 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5545 } 5546 5547 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5548 return true; 5549 } 5550 5551 void* os::get_default_process_handle() { 5552 return (void*)GetModuleHandle(NULL); 5553 } 5554 5555 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5556 // which is used to find statically linked in agents. 5557 // Additionally for windows, takes into account __stdcall names. 5558 // Parameters: 5559 // sym_name: Symbol in library we are looking for 5560 // lib_name: Name of library to look in, NULL for shared libs. 5561 // is_absolute_path == true if lib_name is absolute path to agent 5562 // such as "C:/a/b/L.dll" 5563 // == false if only the base name of the library is passed in 5564 // such as "L" 5565 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5566 bool is_absolute_path) { 5567 char *agent_entry_name; 5568 size_t len; 5569 size_t name_len; 5570 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5571 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5572 const char *start; 5573 5574 if (lib_name != NULL) { 5575 len = name_len = strlen(lib_name); 5576 if (is_absolute_path) { 5577 // Need to strip path, prefix and suffix 5578 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5579 lib_name = ++start; 5580 } else { 5581 // Need to check for drive prefix 5582 if ((start = strchr(lib_name, ':')) != NULL) { 5583 lib_name = ++start; 5584 } 5585 } 5586 if (len <= (prefix_len + suffix_len)) { 5587 return NULL; 5588 } 5589 lib_name += prefix_len; 5590 name_len = strlen(lib_name) - suffix_len; 5591 } 5592 } 5593 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5594 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5595 if (agent_entry_name == NULL) { 5596 return NULL; 5597 } 5598 if (lib_name != NULL) { 5599 const char *p = strrchr(sym_name, '@'); 5600 if (p != NULL && p != sym_name) { 5601 // sym_name == _Agent_OnLoad@XX 5602 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5603 agent_entry_name[(p-sym_name)] = '\0'; 5604 // agent_entry_name == _Agent_OnLoad 5605 strcat(agent_entry_name, "_"); 5606 strncat(agent_entry_name, lib_name, name_len); 5607 strcat(agent_entry_name, p); 5608 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5609 } else { 5610 strcpy(agent_entry_name, sym_name); 5611 strcat(agent_entry_name, "_"); 5612 strncat(agent_entry_name, lib_name, name_len); 5613 } 5614 } else { 5615 strcpy(agent_entry_name, sym_name); 5616 } 5617 return agent_entry_name; 5618 } 5619 5620 #else 5621 // Kernel32 API 5622 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5623 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD); 5624 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32); 5625 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32); 5626 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5627 5628 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5629 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5630 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5631 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5632 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5633 5634 void os::Kernel32Dll::initialize() { 5635 if (!initialized) { 5636 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5637 assert(handle != NULL, "Just check"); 5638 5639 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5640 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5641 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5642 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5643 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5644 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5645 initializeCommon(); // resolve the functions that always need resolving 5646 5647 initialized = TRUE; 5648 } 5649 } 5650 5651 BOOL os::Kernel32Dll::SwitchToThread() { 5652 assert(initialized && _SwitchToThread != NULL, 5653 "SwitchToThreadAvailable() not yet called"); 5654 return _SwitchToThread(); 5655 } 5656 5657 5658 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5659 if (!initialized) { 5660 initialize(); 5661 } 5662 return _SwitchToThread != NULL; 5663 } 5664 5665 // Help tools 5666 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5667 if (!initialized) { 5668 initialize(); 5669 } 5670 return _CreateToolhelp32Snapshot != NULL && 5671 _Module32First != NULL && 5672 _Module32Next != NULL; 5673 } 5674 5675 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5676 DWORD th32ProcessId) { 5677 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5678 "HelpToolsAvailable() not yet called"); 5679 5680 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5681 } 5682 5683 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5684 assert(initialized && _Module32First != NULL, 5685 "HelpToolsAvailable() not yet called"); 5686 5687 return _Module32First(hSnapshot, lpme); 5688 } 5689 5690 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5691 LPMODULEENTRY32 lpme) { 5692 assert(initialized && _Module32Next != NULL, 5693 "HelpToolsAvailable() not yet called"); 5694 5695 return _Module32Next(hSnapshot, lpme); 5696 } 5697 5698 5699 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5700 if (!initialized) { 5701 initialize(); 5702 } 5703 return _GetNativeSystemInfo != NULL; 5704 } 5705 5706 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5707 assert(initialized && _GetNativeSystemInfo != NULL, 5708 "GetNativeSystemInfoAvailable() not yet called"); 5709 5710 _GetNativeSystemInfo(lpSystemInfo); 5711 } 5712 5713 // PSAPI API 5714 5715 5716 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5717 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD); 5718 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5719 5720 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5721 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5722 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5723 BOOL os::PSApiDll::initialized = FALSE; 5724 5725 void os::PSApiDll::initialize() { 5726 if (!initialized) { 5727 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5728 if (handle != NULL) { 5729 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5730 "EnumProcessModules"); 5731 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5732 "GetModuleFileNameExA"); 5733 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5734 "GetModuleInformation"); 5735 } 5736 initialized = TRUE; 5737 } 5738 } 5739 5740 5741 5742 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, 5743 DWORD cb, LPDWORD lpcbNeeded) { 5744 assert(initialized && _EnumProcessModules != NULL, 5745 "PSApiAvailable() not yet called"); 5746 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5747 } 5748 5749 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, 5750 LPTSTR lpFilename, DWORD nSize) { 5751 assert(initialized && _GetModuleFileNameEx != NULL, 5752 "PSApiAvailable() not yet called"); 5753 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5754 } 5755 5756 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, 5757 LPMODULEINFO lpmodinfo, DWORD cb) { 5758 assert(initialized && _GetModuleInformation != NULL, 5759 "PSApiAvailable() not yet called"); 5760 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5761 } 5762 5763 BOOL os::PSApiDll::PSApiAvailable() { 5764 if (!initialized) { 5765 initialize(); 5766 } 5767 return _EnumProcessModules != NULL && 5768 _GetModuleFileNameEx != NULL && 5769 _GetModuleInformation != NULL; 5770 } 5771 5772 5773 // WinSock2 API 5774 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5775 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5776 5777 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5778 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5779 BOOL os::WinSock2Dll::initialized = FALSE; 5780 5781 void os::WinSock2Dll::initialize() { 5782 if (!initialized) { 5783 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5784 if (handle != NULL) { 5785 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5786 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5787 } 5788 initialized = TRUE; 5789 } 5790 } 5791 5792 5793 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5794 assert(initialized && _WSAStartup != NULL, 5795 "WinSock2Available() not yet called"); 5796 return _WSAStartup(wVersionRequested, lpWSAData); 5797 } 5798 5799 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5800 assert(initialized && _gethostbyname != NULL, 5801 "WinSock2Available() not yet called"); 5802 return _gethostbyname(name); 5803 } 5804 5805 BOOL os::WinSock2Dll::WinSock2Available() { 5806 if (!initialized) { 5807 initialize(); 5808 } 5809 return _WSAStartup != NULL && 5810 _gethostbyname != NULL; 5811 } 5812 5813 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5814 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5815 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5816 5817 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5818 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5819 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5820 BOOL os::Advapi32Dll::initialized = FALSE; 5821 5822 void os::Advapi32Dll::initialize() { 5823 if (!initialized) { 5824 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5825 if (handle != NULL) { 5826 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5827 "AdjustTokenPrivileges"); 5828 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5829 "OpenProcessToken"); 5830 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5831 "LookupPrivilegeValueA"); 5832 } 5833 initialized = TRUE; 5834 } 5835 } 5836 5837 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5838 BOOL DisableAllPrivileges, 5839 PTOKEN_PRIVILEGES NewState, 5840 DWORD BufferLength, 5841 PTOKEN_PRIVILEGES PreviousState, 5842 PDWORD ReturnLength) { 5843 assert(initialized && _AdjustTokenPrivileges != NULL, 5844 "AdvapiAvailable() not yet called"); 5845 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5846 BufferLength, PreviousState, ReturnLength); 5847 } 5848 5849 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5850 DWORD DesiredAccess, 5851 PHANDLE TokenHandle) { 5852 assert(initialized && _OpenProcessToken != NULL, 5853 "AdvapiAvailable() not yet called"); 5854 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5855 } 5856 5857 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5858 LPCTSTR lpName, PLUID lpLuid) { 5859 assert(initialized && _LookupPrivilegeValue != NULL, 5860 "AdvapiAvailable() not yet called"); 5861 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5862 } 5863 5864 BOOL os::Advapi32Dll::AdvapiAvailable() { 5865 if (!initialized) { 5866 initialize(); 5867 } 5868 return _AdjustTokenPrivileges != NULL && 5869 _OpenProcessToken != NULL && 5870 _LookupPrivilegeValue != NULL; 5871 } 5872 5873 #endif 5874 5875 #ifndef PRODUCT 5876 5877 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5878 // contiguous memory block at a particular address. 5879 // The test first tries to find a good approximate address to allocate at by using the same 5880 // method to allocate some memory at any address. The test then tries to allocate memory in 5881 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5882 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5883 // the previously allocated memory is available for allocation. The only actual failure 5884 // that is reported is when the test tries to allocate at a particular location but gets a 5885 // different valid one. A NULL return value at this point is not considered an error but may 5886 // be legitimate. 5887 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5888 void TestReserveMemorySpecial_test() { 5889 if (!UseLargePages) { 5890 if (VerboseInternalVMTests) { 5891 gclog_or_tty->print("Skipping test because large pages are disabled"); 5892 } 5893 return; 5894 } 5895 // save current value of globals 5896 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5897 bool old_use_numa_interleaving = UseNUMAInterleaving; 5898 5899 // set globals to make sure we hit the correct code path 5900 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5901 5902 // do an allocation at an address selected by the OS to get a good one. 5903 const size_t large_allocation_size = os::large_page_size() * 4; 5904 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5905 if (result == NULL) { 5906 if (VerboseInternalVMTests) { 5907 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5908 large_allocation_size); 5909 } 5910 } else { 5911 os::release_memory_special(result, large_allocation_size); 5912 5913 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5914 // we managed to get it once. 5915 const size_t expected_allocation_size = os::large_page_size(); 5916 char* expected_location = result + os::large_page_size(); 5917 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5918 if (actual_location == NULL) { 5919 if (VerboseInternalVMTests) { 5920 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5921 expected_location, large_allocation_size); 5922 } 5923 } else { 5924 // release memory 5925 os::release_memory_special(actual_location, expected_allocation_size); 5926 // only now check, after releasing any memory to avoid any leaks. 5927 assert(actual_location == expected_location, 5928 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5929 expected_location, expected_allocation_size, actual_location)); 5930 } 5931 } 5932 5933 // restore globals 5934 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5935 UseNUMAInterleaving = old_use_numa_interleaving; 5936 } 5937 #endif // PRODUCT 5938