1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "services/attachListener.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/vmError.hpp" 74 75 #ifdef _DEBUG 76 #include <crtdbg.h> 77 #endif 78 79 80 #include <windows.h> 81 #include <sys/types.h> 82 #include <sys/stat.h> 83 #include <sys/timeb.h> 84 #include <objidl.h> 85 #include <shlobj.h> 86 87 #include <malloc.h> 88 #include <signal.h> 89 #include <direct.h> 90 #include <errno.h> 91 #include <fcntl.h> 92 #include <io.h> 93 #include <process.h> // For _beginthreadex(), _endthreadex() 94 #include <imagehlp.h> // For os::dll_address_to_function_name 95 // for enumerating dll libraries 96 #include <vdmdbg.h> 97 98 // for timer info max values which include all bits 99 #define ALL_64_BITS CONST64(-1) 100 101 // For DLL loading/load error detection 102 // Values of PE COFF 103 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 104 #define IMAGE_FILE_SIGNATURE_LENGTH 4 105 106 static HANDLE main_process; 107 static HANDLE main_thread; 108 static int main_thread_id; 109 110 static FILETIME process_creation_time; 111 static FILETIME process_exit_time; 112 static FILETIME process_user_time; 113 static FILETIME process_kernel_time; 114 115 #ifdef _M_IA64 116 #define __CPU__ ia64 117 #elif _M_AMD64 118 #define __CPU__ amd64 119 #else 120 #define __CPU__ i486 121 #endif 122 123 // save DLL module handle, used by GetModuleFileName 124 125 HINSTANCE vm_lib_handle; 126 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 128 switch (reason) { 129 case DLL_PROCESS_ATTACH: 130 vm_lib_handle = hinst; 131 if (ForceTimeHighResolution) { 132 timeBeginPeriod(1L); 133 } 134 break; 135 case DLL_PROCESS_DETACH: 136 if (ForceTimeHighResolution) { 137 timeEndPeriod(1L); 138 } 139 break; 140 default: 141 break; 142 } 143 return true; 144 } 145 146 static inline double fileTimeAsDouble(FILETIME* time) { 147 const double high = (double) ((unsigned int) ~0); 148 const double split = 10000000.0; 149 double result = (time->dwLowDateTime / split) + 150 time->dwHighDateTime * (high/split); 151 return result; 152 } 153 154 // Implementation of os 155 156 bool os::unsetenv(const char* name) { 157 assert(name != NULL, "Null pointer"); 158 return (SetEnvironmentVariable(name, NULL) == TRUE); 159 } 160 161 // No setuid programs under Windows. 162 bool os::have_special_privileges() { 163 return false; 164 } 165 166 167 // This method is a periodic task to check for misbehaving JNI applications 168 // under CheckJNI, we can add any periodic checks here. 169 // For Windows at the moment does nothing 170 void os::run_periodic_checks() { 171 return; 172 } 173 174 // previous UnhandledExceptionFilter, if there is one 175 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 176 177 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 178 179 void os::init_system_properties_values() { 180 // sysclasspath, java_home, dll_dir 181 { 182 char *home_path; 183 char *dll_path; 184 char *pslash; 185 char *bin = "\\bin"; 186 char home_dir[MAX_PATH]; 187 188 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 189 os::jvm_path(home_dir, sizeof(home_dir)); 190 // Found the full path to jvm.dll. 191 // Now cut the path to <java_home>/jre if we can. 192 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 193 pslash = strrchr(home_dir, '\\'); 194 if (pslash != NULL) { 195 *pslash = '\0'; // get rid of \{client|server} 196 pslash = strrchr(home_dir, '\\'); 197 if (pslash != NULL) { 198 *pslash = '\0'; // get rid of \bin 199 } 200 } 201 } 202 203 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 204 if (home_path == NULL) { 205 return; 206 } 207 strcpy(home_path, home_dir); 208 Arguments::set_java_home(home_path); 209 FREE_C_HEAP_ARRAY(char, home_path); 210 211 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 212 mtInternal); 213 if (dll_path == NULL) { 214 return; 215 } 216 strcpy(dll_path, home_dir); 217 strcat(dll_path, bin); 218 Arguments::set_dll_dir(dll_path); 219 FREE_C_HEAP_ARRAY(char, dll_path); 220 221 if (!set_boot_path('\\', ';')) { 222 return; 223 } 224 } 225 226 // library_path 227 #define EXT_DIR "\\lib\\ext" 228 #define BIN_DIR "\\bin" 229 #define PACKAGE_DIR "\\Sun\\Java" 230 { 231 // Win32 library search order (See the documentation for LoadLibrary): 232 // 233 // 1. The directory from which application is loaded. 234 // 2. The system wide Java Extensions directory (Java only) 235 // 3. System directory (GetSystemDirectory) 236 // 4. Windows directory (GetWindowsDirectory) 237 // 5. The PATH environment variable 238 // 6. The current directory 239 240 char *library_path; 241 char tmp[MAX_PATH]; 242 char *path_str = ::getenv("PATH"); 243 244 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 245 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 246 247 library_path[0] = '\0'; 248 249 GetModuleFileName(NULL, tmp, sizeof(tmp)); 250 *(strrchr(tmp, '\\')) = '\0'; 251 strcat(library_path, tmp); 252 253 GetWindowsDirectory(tmp, sizeof(tmp)); 254 strcat(library_path, ";"); 255 strcat(library_path, tmp); 256 strcat(library_path, PACKAGE_DIR BIN_DIR); 257 258 GetSystemDirectory(tmp, sizeof(tmp)); 259 strcat(library_path, ";"); 260 strcat(library_path, tmp); 261 262 GetWindowsDirectory(tmp, sizeof(tmp)); 263 strcat(library_path, ";"); 264 strcat(library_path, tmp); 265 266 if (path_str) { 267 strcat(library_path, ";"); 268 strcat(library_path, path_str); 269 } 270 271 strcat(library_path, ";."); 272 273 Arguments::set_library_path(library_path); 274 FREE_C_HEAP_ARRAY(char, library_path); 275 } 276 277 // Default extensions directory 278 { 279 char path[MAX_PATH]; 280 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 281 GetWindowsDirectory(path, MAX_PATH); 282 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 283 path, PACKAGE_DIR, EXT_DIR); 284 Arguments::set_ext_dirs(buf); 285 } 286 #undef EXT_DIR 287 #undef BIN_DIR 288 #undef PACKAGE_DIR 289 290 #ifndef _WIN64 291 // set our UnhandledExceptionFilter and save any previous one 292 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 293 #endif 294 295 // Done 296 return; 297 } 298 299 void os::breakpoint() { 300 DebugBreak(); 301 } 302 303 // Invoked from the BREAKPOINT Macro 304 extern "C" void breakpoint() { 305 os::breakpoint(); 306 } 307 308 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 309 // So far, this method is only used by Native Memory Tracking, which is 310 // only supported on Windows XP or later. 311 // 312 int os::get_native_stack(address* stack, int frames, int toSkip) { 313 #ifdef _NMT_NOINLINE_ 314 toSkip++; 315 #endif 316 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 317 (PVOID*)stack, NULL); 318 for (int index = captured; index < frames; index ++) { 319 stack[index] = NULL; 320 } 321 return captured; 322 } 323 324 325 // os::current_stack_base() 326 // 327 // Returns the base of the stack, which is the stack's 328 // starting address. This function must be called 329 // while running on the stack of the thread being queried. 330 331 address os::current_stack_base() { 332 MEMORY_BASIC_INFORMATION minfo; 333 address stack_bottom; 334 size_t stack_size; 335 336 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 337 stack_bottom = (address)minfo.AllocationBase; 338 stack_size = minfo.RegionSize; 339 340 // Add up the sizes of all the regions with the same 341 // AllocationBase. 342 while (1) { 343 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 344 if (stack_bottom == (address)minfo.AllocationBase) { 345 stack_size += minfo.RegionSize; 346 } else { 347 break; 348 } 349 } 350 351 #ifdef _M_IA64 352 // IA64 has memory and register stacks 353 // 354 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 355 // at thread creation (1MB backing store growing upwards, 1MB memory stack 356 // growing downwards, 2MB summed up) 357 // 358 // ... 359 // ------- top of stack (high address) ----- 360 // | 361 // | 1MB 362 // | Backing Store (Register Stack) 363 // | 364 // | / \ 365 // | | 366 // | | 367 // | | 368 // ------------------------ stack base ----- 369 // | 1MB 370 // | Memory Stack 371 // | 372 // | | 373 // | | 374 // | | 375 // | \ / 376 // | 377 // ----- bottom of stack (low address) ----- 378 // ... 379 380 stack_size = stack_size / 2; 381 #endif 382 return stack_bottom + stack_size; 383 } 384 385 size_t os::current_stack_size() { 386 size_t sz; 387 MEMORY_BASIC_INFORMATION minfo; 388 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 389 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 390 return sz; 391 } 392 393 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 394 const struct tm* time_struct_ptr = localtime(clock); 395 if (time_struct_ptr != NULL) { 396 *res = *time_struct_ptr; 397 return res; 398 } 399 return NULL; 400 } 401 402 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 403 404 // Thread start routine for all new Java threads 405 static unsigned __stdcall java_start(Thread* thread) { 406 // Try to randomize the cache line index of hot stack frames. 407 // This helps when threads of the same stack traces evict each other's 408 // cache lines. The threads can be either from the same JVM instance, or 409 // from different JVM instances. The benefit is especially true for 410 // processors with hyperthreading technology. 411 static int counter = 0; 412 int pid = os::current_process_id(); 413 _alloca(((pid ^ counter++) & 7) * 128); 414 415 OSThread* osthr = thread->osthread(); 416 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 417 418 if (UseNUMA) { 419 int lgrp_id = os::numa_get_group_id(); 420 if (lgrp_id != -1) { 421 thread->set_lgrp_id(lgrp_id); 422 } 423 } 424 425 // Diagnostic code to investigate JDK-6573254 426 int res = 30115; // non-java thread 427 if (thread->is_Java_thread()) { 428 res = 20115; // java thread 429 } 430 431 // Install a win32 structured exception handler around every thread created 432 // by VM, so VM can generate error dump when an exception occurred in non- 433 // Java thread (e.g. VM thread). 434 __try { 435 thread->run(); 436 } __except(topLevelExceptionFilter( 437 (_EXCEPTION_POINTERS*)_exception_info())) { 438 // Nothing to do. 439 } 440 441 // One less thread is executing 442 // When the VMThread gets here, the main thread may have already exited 443 // which frees the CodeHeap containing the Atomic::add code 444 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 445 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 446 } 447 448 // Thread must not return from exit_process_or_thread(), but if it does, 449 // let it proceed to exit normally 450 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 451 } 452 453 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 454 int thread_id) { 455 // Allocate the OSThread object 456 OSThread* osthread = new OSThread(NULL, NULL); 457 if (osthread == NULL) return NULL; 458 459 // Initialize support for Java interrupts 460 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 461 if (interrupt_event == NULL) { 462 delete osthread; 463 return NULL; 464 } 465 osthread->set_interrupt_event(interrupt_event); 466 467 // Store info on the Win32 thread into the OSThread 468 osthread->set_thread_handle(thread_handle); 469 osthread->set_thread_id(thread_id); 470 471 if (UseNUMA) { 472 int lgrp_id = os::numa_get_group_id(); 473 if (lgrp_id != -1) { 474 thread->set_lgrp_id(lgrp_id); 475 } 476 } 477 478 // Initial thread state is INITIALIZED, not SUSPENDED 479 osthread->set_state(INITIALIZED); 480 481 return osthread; 482 } 483 484 485 bool os::create_attached_thread(JavaThread* thread) { 486 #ifdef ASSERT 487 thread->verify_not_published(); 488 #endif 489 HANDLE thread_h; 490 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 491 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 492 fatal("DuplicateHandle failed\n"); 493 } 494 OSThread* osthread = create_os_thread(thread, thread_h, 495 (int)current_thread_id()); 496 if (osthread == NULL) { 497 return false; 498 } 499 500 // Initial thread state is RUNNABLE 501 osthread->set_state(RUNNABLE); 502 503 thread->set_osthread(osthread); 504 return true; 505 } 506 507 bool os::create_main_thread(JavaThread* thread) { 508 #ifdef ASSERT 509 thread->verify_not_published(); 510 #endif 511 if (_starting_thread == NULL) { 512 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 513 if (_starting_thread == NULL) { 514 return false; 515 } 516 } 517 518 // The primordial thread is runnable from the start) 519 _starting_thread->set_state(RUNNABLE); 520 521 thread->set_osthread(_starting_thread); 522 return true; 523 } 524 525 // Allocate and initialize a new OSThread 526 bool os::create_thread(Thread* thread, ThreadType thr_type, 527 size_t stack_size) { 528 unsigned thread_id; 529 530 // Allocate the OSThread object 531 OSThread* osthread = new OSThread(NULL, NULL); 532 if (osthread == NULL) { 533 return false; 534 } 535 536 // Initialize support for Java interrupts 537 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 538 if (interrupt_event == NULL) { 539 delete osthread; 540 return NULL; 541 } 542 osthread->set_interrupt_event(interrupt_event); 543 osthread->set_interrupted(false); 544 545 thread->set_osthread(osthread); 546 547 if (stack_size == 0) { 548 switch (thr_type) { 549 case os::java_thread: 550 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 551 if (JavaThread::stack_size_at_create() > 0) { 552 stack_size = JavaThread::stack_size_at_create(); 553 } 554 break; 555 case os::compiler_thread: 556 if (CompilerThreadStackSize > 0) { 557 stack_size = (size_t)(CompilerThreadStackSize * K); 558 break; 559 } // else fall through: 560 // use VMThreadStackSize if CompilerThreadStackSize is not defined 561 case os::vm_thread: 562 case os::pgc_thread: 563 case os::cgc_thread: 564 case os::watcher_thread: 565 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 566 break; 567 } 568 } 569 570 // Create the Win32 thread 571 // 572 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 573 // does not specify stack size. Instead, it specifies the size of 574 // initially committed space. The stack size is determined by 575 // PE header in the executable. If the committed "stack_size" is larger 576 // than default value in the PE header, the stack is rounded up to the 577 // nearest multiple of 1MB. For example if the launcher has default 578 // stack size of 320k, specifying any size less than 320k does not 579 // affect the actual stack size at all, it only affects the initial 580 // commitment. On the other hand, specifying 'stack_size' larger than 581 // default value may cause significant increase in memory usage, because 582 // not only the stack space will be rounded up to MB, but also the 583 // entire space is committed upfront. 584 // 585 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 586 // for CreateThread() that can treat 'stack_size' as stack size. However we 587 // are not supposed to call CreateThread() directly according to MSDN 588 // document because JVM uses C runtime library. The good news is that the 589 // flag appears to work with _beginthredex() as well. 590 591 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 592 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 593 #endif 594 595 HANDLE thread_handle = 596 (HANDLE)_beginthreadex(NULL, 597 (unsigned)stack_size, 598 (unsigned (__stdcall *)(void*)) java_start, 599 thread, 600 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 601 &thread_id); 602 if (thread_handle == NULL) { 603 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 604 // without the flag. 605 thread_handle = 606 (HANDLE)_beginthreadex(NULL, 607 (unsigned)stack_size, 608 (unsigned (__stdcall *)(void*)) java_start, 609 thread, 610 CREATE_SUSPENDED, 611 &thread_id); 612 } 613 if (thread_handle == NULL) { 614 // Need to clean up stuff we've allocated so far 615 CloseHandle(osthread->interrupt_event()); 616 thread->set_osthread(NULL); 617 delete osthread; 618 return NULL; 619 } 620 621 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 622 623 // Store info on the Win32 thread into the OSThread 624 osthread->set_thread_handle(thread_handle); 625 osthread->set_thread_id(thread_id); 626 627 // Initial thread state is INITIALIZED, not SUSPENDED 628 osthread->set_state(INITIALIZED); 629 630 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 631 return true; 632 } 633 634 635 // Free Win32 resources related to the OSThread 636 void os::free_thread(OSThread* osthread) { 637 assert(osthread != NULL, "osthread not set"); 638 CloseHandle(osthread->thread_handle()); 639 CloseHandle(osthread->interrupt_event()); 640 delete osthread; 641 } 642 643 static jlong first_filetime; 644 static jlong initial_performance_count; 645 static jlong performance_frequency; 646 647 648 jlong as_long(LARGE_INTEGER x) { 649 jlong result = 0; // initialization to avoid warning 650 set_high(&result, x.HighPart); 651 set_low(&result, x.LowPart); 652 return result; 653 } 654 655 656 jlong os::elapsed_counter() { 657 LARGE_INTEGER count; 658 if (win32::_has_performance_count) { 659 QueryPerformanceCounter(&count); 660 return as_long(count) - initial_performance_count; 661 } else { 662 FILETIME wt; 663 GetSystemTimeAsFileTime(&wt); 664 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 665 } 666 } 667 668 669 jlong os::elapsed_frequency() { 670 if (win32::_has_performance_count) { 671 return performance_frequency; 672 } else { 673 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 674 return 10000000; 675 } 676 } 677 678 679 julong os::available_memory() { 680 return win32::available_memory(); 681 } 682 683 julong os::win32::available_memory() { 684 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 685 // value if total memory is larger than 4GB 686 MEMORYSTATUSEX ms; 687 ms.dwLength = sizeof(ms); 688 GlobalMemoryStatusEx(&ms); 689 690 return (julong)ms.ullAvailPhys; 691 } 692 693 julong os::physical_memory() { 694 return win32::physical_memory(); 695 } 696 697 bool os::has_allocatable_memory_limit(julong* limit) { 698 MEMORYSTATUSEX ms; 699 ms.dwLength = sizeof(ms); 700 GlobalMemoryStatusEx(&ms); 701 #ifdef _LP64 702 *limit = (julong)ms.ullAvailVirtual; 703 return true; 704 #else 705 // Limit to 1400m because of the 2gb address space wall 706 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 707 return true; 708 #endif 709 } 710 711 // VC6 lacks DWORD_PTR 712 #if _MSC_VER < 1300 713 typedef UINT_PTR DWORD_PTR; 714 #endif 715 716 int os::active_processor_count() { 717 DWORD_PTR lpProcessAffinityMask = 0; 718 DWORD_PTR lpSystemAffinityMask = 0; 719 int proc_count = processor_count(); 720 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 721 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 722 // Nof active processors is number of bits in process affinity mask 723 int bitcount = 0; 724 while (lpProcessAffinityMask != 0) { 725 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 726 bitcount++; 727 } 728 return bitcount; 729 } else { 730 return proc_count; 731 } 732 } 733 734 void os::set_native_thread_name(const char *name) { 735 736 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 737 // 738 // Note that unfortunately this only works if the process 739 // is already attached to a debugger; debugger must observe 740 // the exception below to show the correct name. 741 742 const DWORD MS_VC_EXCEPTION = 0x406D1388; 743 struct { 744 DWORD dwType; // must be 0x1000 745 LPCSTR szName; // pointer to name (in user addr space) 746 DWORD dwThreadID; // thread ID (-1=caller thread) 747 DWORD dwFlags; // reserved for future use, must be zero 748 } info; 749 750 info.dwType = 0x1000; 751 info.szName = name; 752 info.dwThreadID = -1; 753 info.dwFlags = 0; 754 755 __try { 756 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 757 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 758 } 759 760 bool os::distribute_processes(uint length, uint* distribution) { 761 // Not yet implemented. 762 return false; 763 } 764 765 bool os::bind_to_processor(uint processor_id) { 766 // Not yet implemented. 767 return false; 768 } 769 770 void os::win32::initialize_performance_counter() { 771 LARGE_INTEGER count; 772 if (QueryPerformanceFrequency(&count)) { 773 win32::_has_performance_count = 1; 774 performance_frequency = as_long(count); 775 QueryPerformanceCounter(&count); 776 initial_performance_count = as_long(count); 777 } else { 778 win32::_has_performance_count = 0; 779 FILETIME wt; 780 GetSystemTimeAsFileTime(&wt); 781 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 782 } 783 } 784 785 786 double os::elapsedTime() { 787 return (double) elapsed_counter() / (double) elapsed_frequency(); 788 } 789 790 791 // Windows format: 792 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 793 // Java format: 794 // Java standards require the number of milliseconds since 1/1/1970 795 796 // Constant offset - calculated using offset() 797 static jlong _offset = 116444736000000000; 798 // Fake time counter for reproducible results when debugging 799 static jlong fake_time = 0; 800 801 #ifdef ASSERT 802 // Just to be safe, recalculate the offset in debug mode 803 static jlong _calculated_offset = 0; 804 static int _has_calculated_offset = 0; 805 806 jlong offset() { 807 if (_has_calculated_offset) return _calculated_offset; 808 SYSTEMTIME java_origin; 809 java_origin.wYear = 1970; 810 java_origin.wMonth = 1; 811 java_origin.wDayOfWeek = 0; // ignored 812 java_origin.wDay = 1; 813 java_origin.wHour = 0; 814 java_origin.wMinute = 0; 815 java_origin.wSecond = 0; 816 java_origin.wMilliseconds = 0; 817 FILETIME jot; 818 if (!SystemTimeToFileTime(&java_origin, &jot)) { 819 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 820 } 821 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 822 _has_calculated_offset = 1; 823 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 824 return _calculated_offset; 825 } 826 #else 827 jlong offset() { 828 return _offset; 829 } 830 #endif 831 832 jlong windows_to_java_time(FILETIME wt) { 833 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 834 return (a - offset()) / 10000; 835 } 836 837 // Returns time ticks in (10th of micro seconds) 838 jlong windows_to_time_ticks(FILETIME wt) { 839 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 840 return (a - offset()); 841 } 842 843 FILETIME java_to_windows_time(jlong l) { 844 jlong a = (l * 10000) + offset(); 845 FILETIME result; 846 result.dwHighDateTime = high(a); 847 result.dwLowDateTime = low(a); 848 return result; 849 } 850 851 bool os::supports_vtime() { return true; } 852 bool os::enable_vtime() { return false; } 853 bool os::vtime_enabled() { return false; } 854 855 double os::elapsedVTime() { 856 FILETIME created; 857 FILETIME exited; 858 FILETIME kernel; 859 FILETIME user; 860 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 861 // the resolution of windows_to_java_time() should be sufficient (ms) 862 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 863 } else { 864 return elapsedTime(); 865 } 866 } 867 868 jlong os::javaTimeMillis() { 869 if (UseFakeTimers) { 870 return fake_time++; 871 } else { 872 FILETIME wt; 873 GetSystemTimeAsFileTime(&wt); 874 return windows_to_java_time(wt); 875 } 876 } 877 878 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 879 FILETIME wt; 880 GetSystemTimeAsFileTime(&wt); 881 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 882 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 883 seconds = secs; 884 nanos = jlong(ticks - (secs*10000000)) * 100; 885 } 886 887 jlong os::javaTimeNanos() { 888 if (!win32::_has_performance_count) { 889 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 890 } else { 891 LARGE_INTEGER current_count; 892 QueryPerformanceCounter(¤t_count); 893 double current = as_long(current_count); 894 double freq = performance_frequency; 895 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 896 return time; 897 } 898 } 899 900 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 901 if (!win32::_has_performance_count) { 902 // javaTimeMillis() doesn't have much percision, 903 // but it is not going to wrap -- so all 64 bits 904 info_ptr->max_value = ALL_64_BITS; 905 906 // this is a wall clock timer, so may skip 907 info_ptr->may_skip_backward = true; 908 info_ptr->may_skip_forward = true; 909 } else { 910 jlong freq = performance_frequency; 911 if (freq < NANOSECS_PER_SEC) { 912 // the performance counter is 64 bits and we will 913 // be multiplying it -- so no wrap in 64 bits 914 info_ptr->max_value = ALL_64_BITS; 915 } else if (freq > NANOSECS_PER_SEC) { 916 // use the max value the counter can reach to 917 // determine the max value which could be returned 918 julong max_counter = (julong)ALL_64_BITS; 919 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 920 } else { 921 // the performance counter is 64 bits and we will 922 // be using it directly -- so no wrap in 64 bits 923 info_ptr->max_value = ALL_64_BITS; 924 } 925 926 // using a counter, so no skipping 927 info_ptr->may_skip_backward = false; 928 info_ptr->may_skip_forward = false; 929 } 930 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 931 } 932 933 char* os::local_time_string(char *buf, size_t buflen) { 934 SYSTEMTIME st; 935 GetLocalTime(&st); 936 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 937 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 938 return buf; 939 } 940 941 bool os::getTimesSecs(double* process_real_time, 942 double* process_user_time, 943 double* process_system_time) { 944 HANDLE h_process = GetCurrentProcess(); 945 FILETIME create_time, exit_time, kernel_time, user_time; 946 BOOL result = GetProcessTimes(h_process, 947 &create_time, 948 &exit_time, 949 &kernel_time, 950 &user_time); 951 if (result != 0) { 952 FILETIME wt; 953 GetSystemTimeAsFileTime(&wt); 954 jlong rtc_millis = windows_to_java_time(wt); 955 jlong user_millis = windows_to_java_time(user_time); 956 jlong system_millis = windows_to_java_time(kernel_time); 957 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 958 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 959 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 960 return true; 961 } else { 962 return false; 963 } 964 } 965 966 void os::shutdown() { 967 // allow PerfMemory to attempt cleanup of any persistent resources 968 perfMemory_exit(); 969 970 // flush buffered output, finish log files 971 ostream_abort(); 972 973 // Check for abort hook 974 abort_hook_t abort_hook = Arguments::abort_hook(); 975 if (abort_hook != NULL) { 976 abort_hook(); 977 } 978 } 979 980 981 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 982 PMINIDUMP_EXCEPTION_INFORMATION, 983 PMINIDUMP_USER_STREAM_INFORMATION, 984 PMINIDUMP_CALLBACK_INFORMATION); 985 986 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 987 HINSTANCE dbghelp; 988 EXCEPTION_POINTERS ep; 989 MINIDUMP_EXCEPTION_INFORMATION mei; 990 MINIDUMP_EXCEPTION_INFORMATION* pmei; 991 992 HANDLE hProcess = GetCurrentProcess(); 993 DWORD processId = GetCurrentProcessId(); 994 HANDLE dumpFile; 995 MINIDUMP_TYPE dumpType; 996 static const char* cwd; 997 998 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 999 #ifndef ASSERT 1000 // If running on a client version of Windows and user has not explicitly enabled dumping 1001 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 1002 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 1003 return; 1004 // If running on a server version of Windows and user has explictly disabled dumping 1005 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 1006 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 1007 return; 1008 } 1009 #else 1010 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 1011 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 1012 return; 1013 } 1014 #endif 1015 1016 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1017 1018 if (dbghelp == NULL) { 1019 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 1020 return; 1021 } 1022 1023 _MiniDumpWriteDump = 1024 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1025 PMINIDUMP_EXCEPTION_INFORMATION, 1026 PMINIDUMP_USER_STREAM_INFORMATION, 1027 PMINIDUMP_CALLBACK_INFORMATION), 1028 GetProcAddress(dbghelp, 1029 "MiniDumpWriteDump")); 1030 1031 if (_MiniDumpWriteDump == NULL) { 1032 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1033 return; 1034 } 1035 1036 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1037 1038 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1039 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1040 #if API_VERSION_NUMBER >= 11 1041 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1042 MiniDumpWithUnloadedModules); 1043 #endif 1044 1045 cwd = get_current_directory(NULL, 0); 1046 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id()); 1047 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1048 1049 if (dumpFile == INVALID_HANDLE_VALUE) { 1050 VMError::report_coredump_status("Failed to create file for dumping", false); 1051 return; 1052 } 1053 if (exceptionRecord != NULL && contextRecord != NULL) { 1054 ep.ContextRecord = (PCONTEXT) contextRecord; 1055 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1056 1057 mei.ThreadId = GetCurrentThreadId(); 1058 mei.ExceptionPointers = &ep; 1059 pmei = &mei; 1060 } else { 1061 pmei = NULL; 1062 } 1063 1064 1065 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1066 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1067 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1068 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1069 DWORD error = GetLastError(); 1070 LPTSTR msgbuf = NULL; 1071 1072 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1073 FORMAT_MESSAGE_FROM_SYSTEM | 1074 FORMAT_MESSAGE_IGNORE_INSERTS, 1075 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1076 1077 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1078 LocalFree(msgbuf); 1079 } else { 1080 // Call to FormatMessage failed, just include the result from GetLastError 1081 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1082 } 1083 VMError::report_coredump_status(buffer, false); 1084 } else { 1085 VMError::report_coredump_status(buffer, true); 1086 } 1087 1088 CloseHandle(dumpFile); 1089 } 1090 1091 1092 void os::abort(bool dump_core) { 1093 os::shutdown(); 1094 // no core dump on Windows 1095 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1096 } 1097 1098 // Die immediately, no exit hook, no abort hook, no cleanup. 1099 void os::die() { 1100 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1101 } 1102 1103 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1104 // * dirent_md.c 1.15 00/02/02 1105 // 1106 // The declarations for DIR and struct dirent are in jvm_win32.h. 1107 1108 // Caller must have already run dirname through JVM_NativePath, which removes 1109 // duplicate slashes and converts all instances of '/' into '\\'. 1110 1111 DIR * os::opendir(const char *dirname) { 1112 assert(dirname != NULL, "just checking"); // hotspot change 1113 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1114 DWORD fattr; // hotspot change 1115 char alt_dirname[4] = { 0, 0, 0, 0 }; 1116 1117 if (dirp == 0) { 1118 errno = ENOMEM; 1119 return 0; 1120 } 1121 1122 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1123 // as a directory in FindFirstFile(). We detect this case here and 1124 // prepend the current drive name. 1125 // 1126 if (dirname[1] == '\0' && dirname[0] == '\\') { 1127 alt_dirname[0] = _getdrive() + 'A' - 1; 1128 alt_dirname[1] = ':'; 1129 alt_dirname[2] = '\\'; 1130 alt_dirname[3] = '\0'; 1131 dirname = alt_dirname; 1132 } 1133 1134 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1135 if (dirp->path == 0) { 1136 free(dirp); 1137 errno = ENOMEM; 1138 return 0; 1139 } 1140 strcpy(dirp->path, dirname); 1141 1142 fattr = GetFileAttributes(dirp->path); 1143 if (fattr == 0xffffffff) { 1144 free(dirp->path); 1145 free(dirp); 1146 errno = ENOENT; 1147 return 0; 1148 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1149 free(dirp->path); 1150 free(dirp); 1151 errno = ENOTDIR; 1152 return 0; 1153 } 1154 1155 // Append "*.*", or possibly "\\*.*", to path 1156 if (dirp->path[1] == ':' && 1157 (dirp->path[2] == '\0' || 1158 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1159 // No '\\' needed for cases like "Z:" or "Z:\" 1160 strcat(dirp->path, "*.*"); 1161 } else { 1162 strcat(dirp->path, "\\*.*"); 1163 } 1164 1165 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1166 if (dirp->handle == INVALID_HANDLE_VALUE) { 1167 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1168 free(dirp->path); 1169 free(dirp); 1170 errno = EACCES; 1171 return 0; 1172 } 1173 } 1174 return dirp; 1175 } 1176 1177 // parameter dbuf unused on Windows 1178 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1179 assert(dirp != NULL, "just checking"); // hotspot change 1180 if (dirp->handle == INVALID_HANDLE_VALUE) { 1181 return 0; 1182 } 1183 1184 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1185 1186 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1187 if (GetLastError() == ERROR_INVALID_HANDLE) { 1188 errno = EBADF; 1189 return 0; 1190 } 1191 FindClose(dirp->handle); 1192 dirp->handle = INVALID_HANDLE_VALUE; 1193 } 1194 1195 return &dirp->dirent; 1196 } 1197 1198 int os::closedir(DIR *dirp) { 1199 assert(dirp != NULL, "just checking"); // hotspot change 1200 if (dirp->handle != INVALID_HANDLE_VALUE) { 1201 if (!FindClose(dirp->handle)) { 1202 errno = EBADF; 1203 return -1; 1204 } 1205 dirp->handle = INVALID_HANDLE_VALUE; 1206 } 1207 free(dirp->path); 1208 free(dirp); 1209 return 0; 1210 } 1211 1212 // This must be hard coded because it's the system's temporary 1213 // directory not the java application's temp directory, ala java.io.tmpdir. 1214 const char* os::get_temp_directory() { 1215 static char path_buf[MAX_PATH]; 1216 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1217 return path_buf; 1218 } else { 1219 path_buf[0] = '\0'; 1220 return path_buf; 1221 } 1222 } 1223 1224 static bool file_exists(const char* filename) { 1225 if (filename == NULL || strlen(filename) == 0) { 1226 return false; 1227 } 1228 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1229 } 1230 1231 bool os::dll_build_name(char *buffer, size_t buflen, 1232 const char* pname, const char* fname) { 1233 bool retval = false; 1234 const size_t pnamelen = pname ? strlen(pname) : 0; 1235 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1236 1237 // Return error on buffer overflow. 1238 if (pnamelen + strlen(fname) + 10 > buflen) { 1239 return retval; 1240 } 1241 1242 if (pnamelen == 0) { 1243 jio_snprintf(buffer, buflen, "%s.dll", fname); 1244 retval = true; 1245 } else if (c == ':' || c == '\\') { 1246 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1247 retval = true; 1248 } else if (strchr(pname, *os::path_separator()) != NULL) { 1249 int n; 1250 char** pelements = split_path(pname, &n); 1251 if (pelements == NULL) { 1252 return false; 1253 } 1254 for (int i = 0; i < n; i++) { 1255 char* path = pelements[i]; 1256 // Really shouldn't be NULL, but check can't hurt 1257 size_t plen = (path == NULL) ? 0 : strlen(path); 1258 if (plen == 0) { 1259 continue; // skip the empty path values 1260 } 1261 const char lastchar = path[plen - 1]; 1262 if (lastchar == ':' || lastchar == '\\') { 1263 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1264 } else { 1265 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1266 } 1267 if (file_exists(buffer)) { 1268 retval = true; 1269 break; 1270 } 1271 } 1272 // release the storage 1273 for (int i = 0; i < n; i++) { 1274 if (pelements[i] != NULL) { 1275 FREE_C_HEAP_ARRAY(char, pelements[i]); 1276 } 1277 } 1278 if (pelements != NULL) { 1279 FREE_C_HEAP_ARRAY(char*, pelements); 1280 } 1281 } else { 1282 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1283 retval = true; 1284 } 1285 return retval; 1286 } 1287 1288 // Needs to be in os specific directory because windows requires another 1289 // header file <direct.h> 1290 const char* os::get_current_directory(char *buf, size_t buflen) { 1291 int n = static_cast<int>(buflen); 1292 if (buflen > INT_MAX) n = INT_MAX; 1293 return _getcwd(buf, n); 1294 } 1295 1296 //----------------------------------------------------------- 1297 // Helper functions for fatal error handler 1298 #ifdef _WIN64 1299 // Helper routine which returns true if address in 1300 // within the NTDLL address space. 1301 // 1302 static bool _addr_in_ntdll(address addr) { 1303 HMODULE hmod; 1304 MODULEINFO minfo; 1305 1306 hmod = GetModuleHandle("NTDLL.DLL"); 1307 if (hmod == NULL) return false; 1308 if (!os::PSApiDll::GetModuleInformation(GetCurrentProcess(), hmod, 1309 &minfo, sizeof(MODULEINFO))) { 1310 return false; 1311 } 1312 1313 if ((addr >= minfo.lpBaseOfDll) && 1314 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1315 return true; 1316 } else { 1317 return false; 1318 } 1319 } 1320 #endif 1321 1322 struct _modinfo { 1323 address addr; 1324 char* full_path; // point to a char buffer 1325 int buflen; // size of the buffer 1326 address base_addr; 1327 }; 1328 1329 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1330 address top_address, void * param) { 1331 struct _modinfo *pmod = (struct _modinfo *)param; 1332 if (!pmod) return -1; 1333 1334 if (base_addr <= pmod->addr && 1335 top_address > pmod->addr) { 1336 // if a buffer is provided, copy path name to the buffer 1337 if (pmod->full_path) { 1338 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1339 } 1340 pmod->base_addr = base_addr; 1341 return 1; 1342 } 1343 return 0; 1344 } 1345 1346 bool os::dll_address_to_library_name(address addr, char* buf, 1347 int buflen, int* offset) { 1348 // buf is not optional, but offset is optional 1349 assert(buf != NULL, "sanity check"); 1350 1351 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1352 // return the full path to the DLL file, sometimes it returns path 1353 // to the corresponding PDB file (debug info); sometimes it only 1354 // returns partial path, which makes life painful. 1355 1356 struct _modinfo mi; 1357 mi.addr = addr; 1358 mi.full_path = buf; 1359 mi.buflen = buflen; 1360 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1361 // buf already contains path name 1362 if (offset) *offset = addr - mi.base_addr; 1363 return true; 1364 } 1365 1366 buf[0] = '\0'; 1367 if (offset) *offset = -1; 1368 return false; 1369 } 1370 1371 bool os::dll_address_to_function_name(address addr, char *buf, 1372 int buflen, int *offset) { 1373 // buf is not optional, but offset is optional 1374 assert(buf != NULL, "sanity check"); 1375 1376 if (Decoder::decode(addr, buf, buflen, offset)) { 1377 return true; 1378 } 1379 if (offset != NULL) *offset = -1; 1380 buf[0] = '\0'; 1381 return false; 1382 } 1383 1384 // save the start and end address of jvm.dll into param[0] and param[1] 1385 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1386 address top_address, void * param) { 1387 if (!param) return -1; 1388 1389 if (base_addr <= (address)_locate_jvm_dll && 1390 top_address > (address)_locate_jvm_dll) { 1391 ((address*)param)[0] = base_addr; 1392 ((address*)param)[1] = top_address; 1393 return 1; 1394 } 1395 return 0; 1396 } 1397 1398 address vm_lib_location[2]; // start and end address of jvm.dll 1399 1400 // check if addr is inside jvm.dll 1401 bool os::address_is_in_vm(address addr) { 1402 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1403 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1404 assert(false, "Can't find jvm module."); 1405 return false; 1406 } 1407 } 1408 1409 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1410 } 1411 1412 // print module info; param is outputStream* 1413 static int _print_module(const char* fname, address base_address, 1414 address top_address, void* param) { 1415 if (!param) return -1; 1416 1417 outputStream* st = (outputStream*)param; 1418 1419 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1420 return 0; 1421 } 1422 1423 // Loads .dll/.so and 1424 // in case of error it checks if .dll/.so was built for the 1425 // same architecture as Hotspot is running on 1426 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1427 void * result = LoadLibrary(name); 1428 if (result != NULL) { 1429 return result; 1430 } 1431 1432 DWORD errcode = GetLastError(); 1433 if (errcode == ERROR_MOD_NOT_FOUND) { 1434 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1435 ebuf[ebuflen - 1] = '\0'; 1436 return NULL; 1437 } 1438 1439 // Parsing dll below 1440 // If we can read dll-info and find that dll was built 1441 // for an architecture other than Hotspot is running in 1442 // - then print to buffer "DLL was built for a different architecture" 1443 // else call os::lasterror to obtain system error message 1444 1445 // Read system error message into ebuf 1446 // It may or may not be overwritten below (in the for loop and just above) 1447 lasterror(ebuf, (size_t) ebuflen); 1448 ebuf[ebuflen - 1] = '\0'; 1449 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1450 if (fd < 0) { 1451 return NULL; 1452 } 1453 1454 uint32_t signature_offset; 1455 uint16_t lib_arch = 0; 1456 bool failed_to_get_lib_arch = 1457 ( // Go to position 3c in the dll 1458 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1459 || 1460 // Read location of signature 1461 (sizeof(signature_offset) != 1462 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1463 || 1464 // Go to COFF File Header in dll 1465 // that is located after "signature" (4 bytes long) 1466 (os::seek_to_file_offset(fd, 1467 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1468 || 1469 // Read field that contains code of architecture 1470 // that dll was built for 1471 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1472 ); 1473 1474 ::close(fd); 1475 if (failed_to_get_lib_arch) { 1476 // file i/o error - report os::lasterror(...) msg 1477 return NULL; 1478 } 1479 1480 typedef struct { 1481 uint16_t arch_code; 1482 char* arch_name; 1483 } arch_t; 1484 1485 static const arch_t arch_array[] = { 1486 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1487 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1488 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1489 }; 1490 #if (defined _M_IA64) 1491 static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64; 1492 #elif (defined _M_AMD64) 1493 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1494 #elif (defined _M_IX86) 1495 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1496 #else 1497 #error Method os::dll_load requires that one of following \ 1498 is defined :_M_IA64,_M_AMD64 or _M_IX86 1499 #endif 1500 1501 1502 // Obtain a string for printf operation 1503 // lib_arch_str shall contain string what platform this .dll was built for 1504 // running_arch_str shall string contain what platform Hotspot was built for 1505 char *running_arch_str = NULL, *lib_arch_str = NULL; 1506 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1507 if (lib_arch == arch_array[i].arch_code) { 1508 lib_arch_str = arch_array[i].arch_name; 1509 } 1510 if (running_arch == arch_array[i].arch_code) { 1511 running_arch_str = arch_array[i].arch_name; 1512 } 1513 } 1514 1515 assert(running_arch_str, 1516 "Didn't find running architecture code in arch_array"); 1517 1518 // If the architecture is right 1519 // but some other error took place - report os::lasterror(...) msg 1520 if (lib_arch == running_arch) { 1521 return NULL; 1522 } 1523 1524 if (lib_arch_str != NULL) { 1525 ::_snprintf(ebuf, ebuflen - 1, 1526 "Can't load %s-bit .dll on a %s-bit platform", 1527 lib_arch_str, running_arch_str); 1528 } else { 1529 // don't know what architecture this dll was build for 1530 ::_snprintf(ebuf, ebuflen - 1, 1531 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1532 lib_arch, running_arch_str); 1533 } 1534 1535 return NULL; 1536 } 1537 1538 void os::print_dll_info(outputStream *st) { 1539 st->print_cr("Dynamic libraries:"); 1540 get_loaded_modules_info(_print_module, (void *)st); 1541 } 1542 1543 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1544 HANDLE hProcess; 1545 1546 # define MAX_NUM_MODULES 128 1547 HMODULE modules[MAX_NUM_MODULES]; 1548 static char filename[MAX_PATH]; 1549 int result = 0; 1550 1551 if (!os::PSApiDll::PSApiAvailable()) { 1552 return 0; 1553 } 1554 1555 int pid = os::current_process_id(); 1556 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1557 FALSE, pid); 1558 if (hProcess == NULL) return 0; 1559 1560 DWORD size_needed; 1561 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1562 sizeof(modules), &size_needed)) { 1563 CloseHandle(hProcess); 1564 return 0; 1565 } 1566 1567 // number of modules that are currently loaded 1568 int num_modules = size_needed / sizeof(HMODULE); 1569 1570 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1571 // Get Full pathname: 1572 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1573 filename, sizeof(filename))) { 1574 filename[0] = '\0'; 1575 } 1576 1577 MODULEINFO modinfo; 1578 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1579 &modinfo, sizeof(modinfo))) { 1580 modinfo.lpBaseOfDll = NULL; 1581 modinfo.SizeOfImage = 0; 1582 } 1583 1584 // Invoke callback function 1585 result = callback(filename, (address)modinfo.lpBaseOfDll, 1586 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1587 if (result) break; 1588 } 1589 1590 CloseHandle(hProcess); 1591 return result; 1592 } 1593 1594 void os::print_os_info_brief(outputStream* st) { 1595 os::print_os_info(st); 1596 } 1597 1598 void os::print_os_info(outputStream* st) { 1599 #ifdef ASSERT 1600 char buffer[1024]; 1601 DWORD size = sizeof(buffer); 1602 st->print(" HostName: "); 1603 if (GetComputerNameEx(ComputerNameDnsHostname, buffer, &size)) { 1604 st->print("%s", buffer); 1605 } else { 1606 st->print("N/A"); 1607 } 1608 #endif 1609 st->print(" OS:"); 1610 os::win32::print_windows_version(st); 1611 } 1612 1613 void os::win32::print_windows_version(outputStream* st) { 1614 OSVERSIONINFOEX osvi; 1615 VS_FIXEDFILEINFO *file_info; 1616 TCHAR kernel32_path[MAX_PATH]; 1617 UINT len, ret; 1618 1619 // Use the GetVersionEx information to see if we're on a server or 1620 // workstation edition of Windows. Starting with Windows 8.1 we can't 1621 // trust the OS version information returned by this API. 1622 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1623 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1624 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1625 st->print_cr("Call to GetVersionEx failed"); 1626 return; 1627 } 1628 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1629 1630 // Get the full path to \Windows\System32\kernel32.dll and use that for 1631 // determining what version of Windows we're running on. 1632 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1633 ret = GetSystemDirectory(kernel32_path, len); 1634 if (ret == 0 || ret > len) { 1635 st->print_cr("Call to GetSystemDirectory failed"); 1636 return; 1637 } 1638 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1639 1640 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1641 if (version_size == 0) { 1642 st->print_cr("Call to GetFileVersionInfoSize failed"); 1643 return; 1644 } 1645 1646 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1647 if (version_info == NULL) { 1648 st->print_cr("Failed to allocate version_info"); 1649 return; 1650 } 1651 1652 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1653 os::free(version_info); 1654 st->print_cr("Call to GetFileVersionInfo failed"); 1655 return; 1656 } 1657 1658 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1659 os::free(version_info); 1660 st->print_cr("Call to VerQueryValue failed"); 1661 return; 1662 } 1663 1664 int major_version = HIWORD(file_info->dwProductVersionMS); 1665 int minor_version = LOWORD(file_info->dwProductVersionMS); 1666 int build_number = HIWORD(file_info->dwProductVersionLS); 1667 int build_minor = LOWORD(file_info->dwProductVersionLS); 1668 int os_vers = major_version * 1000 + minor_version; 1669 os::free(version_info); 1670 1671 st->print(" Windows "); 1672 switch (os_vers) { 1673 1674 case 6000: 1675 if (is_workstation) { 1676 st->print("Vista"); 1677 } else { 1678 st->print("Server 2008"); 1679 } 1680 break; 1681 1682 case 6001: 1683 if (is_workstation) { 1684 st->print("7"); 1685 } else { 1686 st->print("Server 2008 R2"); 1687 } 1688 break; 1689 1690 case 6002: 1691 if (is_workstation) { 1692 st->print("8"); 1693 } else { 1694 st->print("Server 2012"); 1695 } 1696 break; 1697 1698 case 6003: 1699 if (is_workstation) { 1700 st->print("8.1"); 1701 } else { 1702 st->print("Server 2012 R2"); 1703 } 1704 break; 1705 1706 case 10000: 1707 if (is_workstation) { 1708 st->print("10"); 1709 } else { 1710 // The server version name of Windows 10 is not known at this time 1711 st->print("%d.%d", major_version, minor_version); 1712 } 1713 break; 1714 1715 default: 1716 // Unrecognized windows, print out its major and minor versions 1717 st->print("%d.%d", major_version, minor_version); 1718 break; 1719 } 1720 1721 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1722 // find out whether we are running on 64 bit processor or not 1723 SYSTEM_INFO si; 1724 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1725 os::Kernel32Dll::GetNativeSystemInfo(&si); 1726 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1727 st->print(" , 64 bit"); 1728 } 1729 1730 st->print(" Build %d", build_number); 1731 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1732 st->cr(); 1733 } 1734 1735 void os::pd_print_cpu_info(outputStream* st) { 1736 // Nothing to do for now. 1737 } 1738 1739 void os::print_memory_info(outputStream* st) { 1740 st->print("Memory:"); 1741 st->print(" %dk page", os::vm_page_size()>>10); 1742 1743 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1744 // value if total memory is larger than 4GB 1745 MEMORYSTATUSEX ms; 1746 ms.dwLength = sizeof(ms); 1747 GlobalMemoryStatusEx(&ms); 1748 1749 st->print(", physical %uk", os::physical_memory() >> 10); 1750 st->print("(%uk free)", os::available_memory() >> 10); 1751 1752 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1753 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1754 st->cr(); 1755 } 1756 1757 void os::print_siginfo(outputStream *st, void *siginfo) { 1758 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1759 st->print("siginfo:"); 1760 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1761 1762 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1763 er->NumberParameters >= 2) { 1764 switch (er->ExceptionInformation[0]) { 1765 case 0: st->print(", reading address"); break; 1766 case 1: st->print(", writing address"); break; 1767 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1768 er->ExceptionInformation[0]); 1769 } 1770 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1771 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1772 er->NumberParameters >= 2 && UseSharedSpaces) { 1773 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1774 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1775 st->print("\n\nError accessing class data sharing archive." \ 1776 " Mapped file inaccessible during execution, " \ 1777 " possible disk/network problem."); 1778 } 1779 } else { 1780 int num = er->NumberParameters; 1781 if (num > 0) { 1782 st->print(", ExceptionInformation="); 1783 for (int i = 0; i < num; i++) { 1784 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1785 } 1786 } 1787 } 1788 st->cr(); 1789 } 1790 1791 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1792 // do nothing 1793 } 1794 1795 static char saved_jvm_path[MAX_PATH] = {0}; 1796 1797 // Find the full path to the current module, jvm.dll 1798 void os::jvm_path(char *buf, jint buflen) { 1799 // Error checking. 1800 if (buflen < MAX_PATH) { 1801 assert(false, "must use a large-enough buffer"); 1802 buf[0] = '\0'; 1803 return; 1804 } 1805 // Lazy resolve the path to current module. 1806 if (saved_jvm_path[0] != 0) { 1807 strcpy(buf, saved_jvm_path); 1808 return; 1809 } 1810 1811 buf[0] = '\0'; 1812 if (Arguments::sun_java_launcher_is_altjvm()) { 1813 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1814 // for a JAVA_HOME environment variable and fix up the path so it 1815 // looks like jvm.dll is installed there (append a fake suffix 1816 // hotspot/jvm.dll). 1817 char* java_home_var = ::getenv("JAVA_HOME"); 1818 if (java_home_var != NULL && java_home_var[0] != 0 && 1819 strlen(java_home_var) < (size_t)buflen) { 1820 strncpy(buf, java_home_var, buflen); 1821 1822 // determine if this is a legacy image or modules image 1823 // modules image doesn't have "jre" subdirectory 1824 size_t len = strlen(buf); 1825 char* jrebin_p = buf + len; 1826 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1827 if (0 != _access(buf, 0)) { 1828 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1829 } 1830 len = strlen(buf); 1831 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1832 } 1833 } 1834 1835 if (buf[0] == '\0') { 1836 GetModuleFileName(vm_lib_handle, buf, buflen); 1837 } 1838 strncpy(saved_jvm_path, buf, MAX_PATH); 1839 saved_jvm_path[MAX_PATH - 1] = '\0'; 1840 } 1841 1842 1843 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1844 #ifndef _WIN64 1845 st->print("_"); 1846 #endif 1847 } 1848 1849 1850 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1851 #ifndef _WIN64 1852 st->print("@%d", args_size * sizeof(int)); 1853 #endif 1854 } 1855 1856 // This method is a copy of JDK's sysGetLastErrorString 1857 // from src/windows/hpi/src/system_md.c 1858 1859 size_t os::lasterror(char* buf, size_t len) { 1860 DWORD errval; 1861 1862 if ((errval = GetLastError()) != 0) { 1863 // DOS error 1864 size_t n = (size_t)FormatMessage( 1865 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1866 NULL, 1867 errval, 1868 0, 1869 buf, 1870 (DWORD)len, 1871 NULL); 1872 if (n > 3) { 1873 // Drop final '.', CR, LF 1874 if (buf[n - 1] == '\n') n--; 1875 if (buf[n - 1] == '\r') n--; 1876 if (buf[n - 1] == '.') n--; 1877 buf[n] = '\0'; 1878 } 1879 return n; 1880 } 1881 1882 if (errno != 0) { 1883 // C runtime error that has no corresponding DOS error code 1884 const char* s = strerror(errno); 1885 size_t n = strlen(s); 1886 if (n >= len) n = len - 1; 1887 strncpy(buf, s, n); 1888 buf[n] = '\0'; 1889 return n; 1890 } 1891 1892 return 0; 1893 } 1894 1895 int os::get_last_error() { 1896 DWORD error = GetLastError(); 1897 if (error == 0) { 1898 error = errno; 1899 } 1900 return (int)error; 1901 } 1902 1903 // sun.misc.Signal 1904 // NOTE that this is a workaround for an apparent kernel bug where if 1905 // a signal handler for SIGBREAK is installed then that signal handler 1906 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1907 // See bug 4416763. 1908 static void (*sigbreakHandler)(int) = NULL; 1909 1910 static void UserHandler(int sig, void *siginfo, void *context) { 1911 os::signal_notify(sig); 1912 // We need to reinstate the signal handler each time... 1913 os::signal(sig, (void*)UserHandler); 1914 } 1915 1916 void* os::user_handler() { 1917 return (void*) UserHandler; 1918 } 1919 1920 void* os::signal(int signal_number, void* handler) { 1921 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1922 void (*oldHandler)(int) = sigbreakHandler; 1923 sigbreakHandler = (void (*)(int)) handler; 1924 return (void*) oldHandler; 1925 } else { 1926 return (void*)::signal(signal_number, (void (*)(int))handler); 1927 } 1928 } 1929 1930 void os::signal_raise(int signal_number) { 1931 raise(signal_number); 1932 } 1933 1934 // The Win32 C runtime library maps all console control events other than ^C 1935 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1936 // logoff, and shutdown events. We therefore install our own console handler 1937 // that raises SIGTERM for the latter cases. 1938 // 1939 static BOOL WINAPI consoleHandler(DWORD event) { 1940 switch (event) { 1941 case CTRL_C_EVENT: 1942 if (is_error_reported()) { 1943 // Ctrl-C is pressed during error reporting, likely because the error 1944 // handler fails to abort. Let VM die immediately. 1945 os::die(); 1946 } 1947 1948 os::signal_raise(SIGINT); 1949 return TRUE; 1950 break; 1951 case CTRL_BREAK_EVENT: 1952 if (sigbreakHandler != NULL) { 1953 (*sigbreakHandler)(SIGBREAK); 1954 } 1955 return TRUE; 1956 break; 1957 case CTRL_LOGOFF_EVENT: { 1958 // Don't terminate JVM if it is running in a non-interactive session, 1959 // such as a service process. 1960 USEROBJECTFLAGS flags; 1961 HANDLE handle = GetProcessWindowStation(); 1962 if (handle != NULL && 1963 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1964 sizeof(USEROBJECTFLAGS), NULL)) { 1965 // If it is a non-interactive session, let next handler to deal 1966 // with it. 1967 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1968 return FALSE; 1969 } 1970 } 1971 } 1972 case CTRL_CLOSE_EVENT: 1973 case CTRL_SHUTDOWN_EVENT: 1974 os::signal_raise(SIGTERM); 1975 return TRUE; 1976 break; 1977 default: 1978 break; 1979 } 1980 return FALSE; 1981 } 1982 1983 // The following code is moved from os.cpp for making this 1984 // code platform specific, which it is by its very nature. 1985 1986 // Return maximum OS signal used + 1 for internal use only 1987 // Used as exit signal for signal_thread 1988 int os::sigexitnum_pd() { 1989 return NSIG; 1990 } 1991 1992 // a counter for each possible signal value, including signal_thread exit signal 1993 static volatile jint pending_signals[NSIG+1] = { 0 }; 1994 static HANDLE sig_sem = NULL; 1995 1996 void os::signal_init_pd() { 1997 // Initialize signal structures 1998 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1999 2000 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2001 2002 // Programs embedding the VM do not want it to attempt to receive 2003 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2004 // shutdown hooks mechanism introduced in 1.3. For example, when 2005 // the VM is run as part of a Windows NT service (i.e., a servlet 2006 // engine in a web server), the correct behavior is for any console 2007 // control handler to return FALSE, not TRUE, because the OS's 2008 // "final" handler for such events allows the process to continue if 2009 // it is a service (while terminating it if it is not a service). 2010 // To make this behavior uniform and the mechanism simpler, we 2011 // completely disable the VM's usage of these console events if -Xrs 2012 // (=ReduceSignalUsage) is specified. This means, for example, that 2013 // the CTRL-BREAK thread dump mechanism is also disabled in this 2014 // case. See bugs 4323062, 4345157, and related bugs. 2015 2016 if (!ReduceSignalUsage) { 2017 // Add a CTRL-C handler 2018 SetConsoleCtrlHandler(consoleHandler, TRUE); 2019 } 2020 } 2021 2022 void os::signal_notify(int signal_number) { 2023 BOOL ret; 2024 if (sig_sem != NULL) { 2025 Atomic::inc(&pending_signals[signal_number]); 2026 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2027 assert(ret != 0, "ReleaseSemaphore() failed"); 2028 } 2029 } 2030 2031 static int check_pending_signals(bool wait_for_signal) { 2032 DWORD ret; 2033 while (true) { 2034 for (int i = 0; i < NSIG + 1; i++) { 2035 jint n = pending_signals[i]; 2036 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2037 return i; 2038 } 2039 } 2040 if (!wait_for_signal) { 2041 return -1; 2042 } 2043 2044 JavaThread *thread = JavaThread::current(); 2045 2046 ThreadBlockInVM tbivm(thread); 2047 2048 bool threadIsSuspended; 2049 do { 2050 thread->set_suspend_equivalent(); 2051 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2052 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2053 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2054 2055 // were we externally suspended while we were waiting? 2056 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2057 if (threadIsSuspended) { 2058 // The semaphore has been incremented, but while we were waiting 2059 // another thread suspended us. We don't want to continue running 2060 // while suspended because that would surprise the thread that 2061 // suspended us. 2062 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2063 assert(ret != 0, "ReleaseSemaphore() failed"); 2064 2065 thread->java_suspend_self(); 2066 } 2067 } while (threadIsSuspended); 2068 } 2069 } 2070 2071 int os::signal_lookup() { 2072 return check_pending_signals(false); 2073 } 2074 2075 int os::signal_wait() { 2076 return check_pending_signals(true); 2077 } 2078 2079 // Implicit OS exception handling 2080 2081 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2082 address handler) { 2083 JavaThread* thread = JavaThread::current(); 2084 // Save pc in thread 2085 #ifdef _M_IA64 2086 // Do not blow up if no thread info available. 2087 if (thread) { 2088 // Saving PRECISE pc (with slot information) in thread. 2089 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2090 // Convert precise PC into "Unix" format 2091 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2092 thread->set_saved_exception_pc((address)precise_pc); 2093 } 2094 // Set pc to handler 2095 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2096 // Clear out psr.ri (= Restart Instruction) in order to continue 2097 // at the beginning of the target bundle. 2098 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2099 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2100 #elif _M_AMD64 2101 // Do not blow up if no thread info available. 2102 if (thread) { 2103 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2104 } 2105 // Set pc to handler 2106 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2107 #else 2108 // Do not blow up if no thread info available. 2109 if (thread) { 2110 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2111 } 2112 // Set pc to handler 2113 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2114 #endif 2115 2116 // Continue the execution 2117 return EXCEPTION_CONTINUE_EXECUTION; 2118 } 2119 2120 2121 // Used for PostMortemDump 2122 extern "C" void safepoints(); 2123 extern "C" void find(int x); 2124 extern "C" void events(); 2125 2126 // According to Windows API documentation, an illegal instruction sequence should generate 2127 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2128 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2129 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2130 2131 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2132 2133 // From "Execution Protection in the Windows Operating System" draft 0.35 2134 // Once a system header becomes available, the "real" define should be 2135 // included or copied here. 2136 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2137 2138 // Handle NAT Bit consumption on IA64. 2139 #ifdef _M_IA64 2140 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2141 #endif 2142 2143 // Windows Vista/2008 heap corruption check 2144 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2145 2146 #define def_excpt(val) #val, val 2147 2148 struct siglabel { 2149 char *name; 2150 int number; 2151 }; 2152 2153 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2154 // C++ compiler contain this error code. Because this is a compiler-generated 2155 // error, the code is not listed in the Win32 API header files. 2156 // The code is actually a cryptic mnemonic device, with the initial "E" 2157 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2158 // ASCII values of "msc". 2159 2160 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2161 2162 2163 struct siglabel exceptlabels[] = { 2164 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2165 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2166 def_excpt(EXCEPTION_BREAKPOINT), 2167 def_excpt(EXCEPTION_SINGLE_STEP), 2168 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2169 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2170 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2171 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2172 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2173 def_excpt(EXCEPTION_FLT_OVERFLOW), 2174 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2175 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2176 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2177 def_excpt(EXCEPTION_INT_OVERFLOW), 2178 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2179 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2180 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2181 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2182 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2183 def_excpt(EXCEPTION_STACK_OVERFLOW), 2184 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2185 def_excpt(EXCEPTION_GUARD_PAGE), 2186 def_excpt(EXCEPTION_INVALID_HANDLE), 2187 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2188 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2189 #ifdef _M_IA64 2190 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2191 #endif 2192 NULL, 0 2193 }; 2194 2195 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2196 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2197 if (exceptlabels[i].number == exception_code) { 2198 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2199 return buf; 2200 } 2201 } 2202 2203 return NULL; 2204 } 2205 2206 //----------------------------------------------------------------------------- 2207 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2208 // handle exception caused by idiv; should only happen for -MinInt/-1 2209 // (division by zero is handled explicitly) 2210 #ifdef _M_IA64 2211 assert(0, "Fix Handle_IDiv_Exception"); 2212 #elif _M_AMD64 2213 PCONTEXT ctx = exceptionInfo->ContextRecord; 2214 address pc = (address)ctx->Rip; 2215 assert(pc[0] == 0xF7, "not an idiv opcode"); 2216 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2217 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2218 // set correct result values and continue after idiv instruction 2219 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2220 ctx->Rax = (DWORD)min_jint; // result 2221 ctx->Rdx = (DWORD)0; // remainder 2222 // Continue the execution 2223 #else 2224 PCONTEXT ctx = exceptionInfo->ContextRecord; 2225 address pc = (address)ctx->Eip; 2226 assert(pc[0] == 0xF7, "not an idiv opcode"); 2227 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2228 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2229 // set correct result values and continue after idiv instruction 2230 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2231 ctx->Eax = (DWORD)min_jint; // result 2232 ctx->Edx = (DWORD)0; // remainder 2233 // Continue the execution 2234 #endif 2235 return EXCEPTION_CONTINUE_EXECUTION; 2236 } 2237 2238 //----------------------------------------------------------------------------- 2239 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2240 PCONTEXT ctx = exceptionInfo->ContextRecord; 2241 #ifndef _WIN64 2242 // handle exception caused by native method modifying control word 2243 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2244 2245 switch (exception_code) { 2246 case EXCEPTION_FLT_DENORMAL_OPERAND: 2247 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2248 case EXCEPTION_FLT_INEXACT_RESULT: 2249 case EXCEPTION_FLT_INVALID_OPERATION: 2250 case EXCEPTION_FLT_OVERFLOW: 2251 case EXCEPTION_FLT_STACK_CHECK: 2252 case EXCEPTION_FLT_UNDERFLOW: 2253 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2254 if (fp_control_word != ctx->FloatSave.ControlWord) { 2255 // Restore FPCW and mask out FLT exceptions 2256 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2257 // Mask out pending FLT exceptions 2258 ctx->FloatSave.StatusWord &= 0xffffff00; 2259 return EXCEPTION_CONTINUE_EXECUTION; 2260 } 2261 } 2262 2263 if (prev_uef_handler != NULL) { 2264 // We didn't handle this exception so pass it to the previous 2265 // UnhandledExceptionFilter. 2266 return (prev_uef_handler)(exceptionInfo); 2267 } 2268 #else // !_WIN64 2269 // On Windows, the mxcsr control bits are non-volatile across calls 2270 // See also CR 6192333 2271 // 2272 jint MxCsr = INITIAL_MXCSR; 2273 // we can't use StubRoutines::addr_mxcsr_std() 2274 // because in Win64 mxcsr is not saved there 2275 if (MxCsr != ctx->MxCsr) { 2276 ctx->MxCsr = MxCsr; 2277 return EXCEPTION_CONTINUE_EXECUTION; 2278 } 2279 #endif // !_WIN64 2280 2281 return EXCEPTION_CONTINUE_SEARCH; 2282 } 2283 2284 static inline void report_error(Thread* t, DWORD exception_code, 2285 address addr, void* siginfo, void* context) { 2286 VMError err(t, exception_code, addr, siginfo, context); 2287 err.report_and_die(); 2288 2289 // If UseOsErrorReporting, this will return here and save the error file 2290 // somewhere where we can find it in the minidump. 2291 } 2292 2293 //----------------------------------------------------------------------------- 2294 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2295 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2296 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2297 #ifdef _M_IA64 2298 // On Itanium, we need the "precise pc", which has the slot number coded 2299 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2300 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2301 // Convert the pc to "Unix format", which has the slot number coded 2302 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2303 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2304 // information is saved in the Unix format. 2305 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2306 #elif _M_AMD64 2307 address pc = (address) exceptionInfo->ContextRecord->Rip; 2308 #else 2309 address pc = (address) exceptionInfo->ContextRecord->Eip; 2310 #endif 2311 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2312 2313 // Handle SafeFetch32 and SafeFetchN exceptions. 2314 if (StubRoutines::is_safefetch_fault(pc)) { 2315 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2316 } 2317 2318 #ifndef _WIN64 2319 // Execution protection violation - win32 running on AMD64 only 2320 // Handled first to avoid misdiagnosis as a "normal" access violation; 2321 // This is safe to do because we have a new/unique ExceptionInformation 2322 // code for this condition. 2323 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2324 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2325 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2326 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2327 2328 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2329 int page_size = os::vm_page_size(); 2330 2331 // Make sure the pc and the faulting address are sane. 2332 // 2333 // If an instruction spans a page boundary, and the page containing 2334 // the beginning of the instruction is executable but the following 2335 // page is not, the pc and the faulting address might be slightly 2336 // different - we still want to unguard the 2nd page in this case. 2337 // 2338 // 15 bytes seems to be a (very) safe value for max instruction size. 2339 bool pc_is_near_addr = 2340 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2341 bool instr_spans_page_boundary = 2342 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2343 (intptr_t) page_size) > 0); 2344 2345 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2346 static volatile address last_addr = 2347 (address) os::non_memory_address_word(); 2348 2349 // In conservative mode, don't unguard unless the address is in the VM 2350 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2351 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2352 2353 // Set memory to RWX and retry 2354 address page_start = 2355 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2356 bool res = os::protect_memory((char*) page_start, page_size, 2357 os::MEM_PROT_RWX); 2358 2359 if (PrintMiscellaneous && Verbose) { 2360 char buf[256]; 2361 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2362 "at " INTPTR_FORMAT 2363 ", unguarding " INTPTR_FORMAT ": %s", addr, 2364 page_start, (res ? "success" : strerror(errno))); 2365 tty->print_raw_cr(buf); 2366 } 2367 2368 // Set last_addr so if we fault again at the same address, we don't 2369 // end up in an endless loop. 2370 // 2371 // There are two potential complications here. Two threads trapping 2372 // at the same address at the same time could cause one of the 2373 // threads to think it already unguarded, and abort the VM. Likely 2374 // very rare. 2375 // 2376 // The other race involves two threads alternately trapping at 2377 // different addresses and failing to unguard the page, resulting in 2378 // an endless loop. This condition is probably even more unlikely 2379 // than the first. 2380 // 2381 // Although both cases could be avoided by using locks or thread 2382 // local last_addr, these solutions are unnecessary complication: 2383 // this handler is a best-effort safety net, not a complete solution. 2384 // It is disabled by default and should only be used as a workaround 2385 // in case we missed any no-execute-unsafe VM code. 2386 2387 last_addr = addr; 2388 2389 return EXCEPTION_CONTINUE_EXECUTION; 2390 } 2391 } 2392 2393 // Last unguard failed or not unguarding 2394 tty->print_raw_cr("Execution protection violation"); 2395 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2396 exceptionInfo->ContextRecord); 2397 return EXCEPTION_CONTINUE_SEARCH; 2398 } 2399 } 2400 #endif // _WIN64 2401 2402 // Check to see if we caught the safepoint code in the 2403 // process of write protecting the memory serialization page. 2404 // It write enables the page immediately after protecting it 2405 // so just return. 2406 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2407 JavaThread* thread = (JavaThread*) t; 2408 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2409 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2410 if (os::is_memory_serialize_page(thread, addr)) { 2411 // Block current thread until the memory serialize page permission restored. 2412 os::block_on_serialize_page_trap(); 2413 return EXCEPTION_CONTINUE_EXECUTION; 2414 } 2415 } 2416 2417 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2418 VM_Version::is_cpuinfo_segv_addr(pc)) { 2419 // Verify that OS save/restore AVX registers. 2420 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2421 } 2422 2423 if (t != NULL && t->is_Java_thread()) { 2424 JavaThread* thread = (JavaThread*) t; 2425 bool in_java = thread->thread_state() == _thread_in_Java; 2426 2427 // Handle potential stack overflows up front. 2428 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2429 if (os::uses_stack_guard_pages()) { 2430 #ifdef _M_IA64 2431 // Use guard page for register stack. 2432 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2433 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2434 // Check for a register stack overflow on Itanium 2435 if (thread->addr_inside_register_stack_red_zone(addr)) { 2436 // Fatal red zone violation happens if the Java program 2437 // catches a StackOverflow error and does so much processing 2438 // that it runs beyond the unprotected yellow guard zone. As 2439 // a result, we are out of here. 2440 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2441 } else if(thread->addr_inside_register_stack(addr)) { 2442 // Disable the yellow zone which sets the state that 2443 // we've got a stack overflow problem. 2444 if (thread->stack_yellow_zone_enabled()) { 2445 thread->disable_stack_yellow_zone(); 2446 } 2447 // Give us some room to process the exception. 2448 thread->disable_register_stack_guard(); 2449 // Tracing with +Verbose. 2450 if (Verbose) { 2451 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2452 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2453 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2454 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2455 thread->register_stack_base(), 2456 thread->register_stack_base() + thread->stack_size()); 2457 } 2458 2459 // Reguard the permanent register stack red zone just to be sure. 2460 // We saw Windows silently disabling this without telling us. 2461 thread->enable_register_stack_red_zone(); 2462 2463 return Handle_Exception(exceptionInfo, 2464 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2465 } 2466 #endif 2467 if (thread->stack_yellow_zone_enabled()) { 2468 // Yellow zone violation. The o/s has unprotected the first yellow 2469 // zone page for us. Note: must call disable_stack_yellow_zone to 2470 // update the enabled status, even if the zone contains only one page. 2471 thread->disable_stack_yellow_zone(); 2472 // If not in java code, return and hope for the best. 2473 return in_java 2474 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2475 : EXCEPTION_CONTINUE_EXECUTION; 2476 } else { 2477 // Fatal red zone violation. 2478 thread->disable_stack_red_zone(); 2479 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2480 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2481 exceptionInfo->ContextRecord); 2482 return EXCEPTION_CONTINUE_SEARCH; 2483 } 2484 } else if (in_java) { 2485 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2486 // a one-time-only guard page, which it has released to us. The next 2487 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2488 return Handle_Exception(exceptionInfo, 2489 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2490 } else { 2491 // Can only return and hope for the best. Further stack growth will 2492 // result in an ACCESS_VIOLATION. 2493 return EXCEPTION_CONTINUE_EXECUTION; 2494 } 2495 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2496 // Either stack overflow or null pointer exception. 2497 if (in_java) { 2498 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2499 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2500 address stack_end = thread->stack_base() - thread->stack_size(); 2501 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2502 // Stack overflow. 2503 assert(!os::uses_stack_guard_pages(), 2504 "should be caught by red zone code above."); 2505 return Handle_Exception(exceptionInfo, 2506 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2507 } 2508 // Check for safepoint polling and implicit null 2509 // We only expect null pointers in the stubs (vtable) 2510 // the rest are checked explicitly now. 2511 CodeBlob* cb = CodeCache::find_blob(pc); 2512 if (cb != NULL) { 2513 if (os::is_poll_address(addr)) { 2514 address stub = SharedRuntime::get_poll_stub(pc); 2515 return Handle_Exception(exceptionInfo, stub); 2516 } 2517 } 2518 { 2519 #ifdef _WIN64 2520 // If it's a legal stack address map the entire region in 2521 // 2522 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2523 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2524 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2525 addr = (address)((uintptr_t)addr & 2526 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2527 os::commit_memory((char *)addr, thread->stack_base() - addr, 2528 !ExecMem); 2529 return EXCEPTION_CONTINUE_EXECUTION; 2530 } else 2531 #endif 2532 { 2533 // Null pointer exception. 2534 #ifdef _M_IA64 2535 // Process implicit null checks in compiled code. Note: Implicit null checks 2536 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2537 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2538 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2539 // Handle implicit null check in UEP method entry 2540 if (cb && (cb->is_frame_complete_at(pc) || 2541 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2542 if (Verbose) { 2543 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2544 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2545 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2546 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2547 *(bundle_start + 1), *bundle_start); 2548 } 2549 return Handle_Exception(exceptionInfo, 2550 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2551 } 2552 } 2553 2554 // Implicit null checks were processed above. Hence, we should not reach 2555 // here in the usual case => die! 2556 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2557 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2558 exceptionInfo->ContextRecord); 2559 return EXCEPTION_CONTINUE_SEARCH; 2560 2561 #else // !IA64 2562 2563 // Windows 98 reports faulting addresses incorrectly 2564 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2565 !os::win32::is_nt()) { 2566 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2567 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2568 } 2569 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2570 exceptionInfo->ContextRecord); 2571 return EXCEPTION_CONTINUE_SEARCH; 2572 #endif 2573 } 2574 } 2575 } 2576 2577 #ifdef _WIN64 2578 // Special care for fast JNI field accessors. 2579 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2580 // in and the heap gets shrunk before the field access. 2581 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2582 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2583 if (addr != (address)-1) { 2584 return Handle_Exception(exceptionInfo, addr); 2585 } 2586 } 2587 #endif 2588 2589 // Stack overflow or null pointer exception in native code. 2590 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2591 exceptionInfo->ContextRecord); 2592 return EXCEPTION_CONTINUE_SEARCH; 2593 } // /EXCEPTION_ACCESS_VIOLATION 2594 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2595 #if defined _M_IA64 2596 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2597 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2598 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2599 2600 // Compiled method patched to be non entrant? Following conditions must apply: 2601 // 1. must be first instruction in bundle 2602 // 2. must be a break instruction with appropriate code 2603 if ((((uint64_t) pc & 0x0F) == 0) && 2604 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2605 return Handle_Exception(exceptionInfo, 2606 (address)SharedRuntime::get_handle_wrong_method_stub()); 2607 } 2608 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2609 #endif 2610 2611 2612 if (in_java) { 2613 switch (exception_code) { 2614 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2615 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2616 2617 case EXCEPTION_INT_OVERFLOW: 2618 return Handle_IDiv_Exception(exceptionInfo); 2619 2620 } // switch 2621 } 2622 if (((thread->thread_state() == _thread_in_Java) || 2623 (thread->thread_state() == _thread_in_native)) && 2624 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2625 LONG result=Handle_FLT_Exception(exceptionInfo); 2626 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2627 } 2628 } 2629 2630 if (exception_code != EXCEPTION_BREAKPOINT) { 2631 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2632 exceptionInfo->ContextRecord); 2633 } 2634 return EXCEPTION_CONTINUE_SEARCH; 2635 } 2636 2637 #ifndef _WIN64 2638 // Special care for fast JNI accessors. 2639 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2640 // the heap gets shrunk before the field access. 2641 // Need to install our own structured exception handler since native code may 2642 // install its own. 2643 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2644 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2645 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2646 address pc = (address) exceptionInfo->ContextRecord->Eip; 2647 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2648 if (addr != (address)-1) { 2649 return Handle_Exception(exceptionInfo, addr); 2650 } 2651 } 2652 return EXCEPTION_CONTINUE_SEARCH; 2653 } 2654 2655 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2656 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2657 jobject obj, \ 2658 jfieldID fieldID) { \ 2659 __try { \ 2660 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2661 obj, \ 2662 fieldID); \ 2663 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2664 _exception_info())) { \ 2665 } \ 2666 return 0; \ 2667 } 2668 2669 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2670 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2671 DEFINE_FAST_GETFIELD(jchar, char, Char) 2672 DEFINE_FAST_GETFIELD(jshort, short, Short) 2673 DEFINE_FAST_GETFIELD(jint, int, Int) 2674 DEFINE_FAST_GETFIELD(jlong, long, Long) 2675 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2676 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2677 2678 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2679 switch (type) { 2680 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2681 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2682 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2683 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2684 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2685 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2686 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2687 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2688 default: ShouldNotReachHere(); 2689 } 2690 return (address)-1; 2691 } 2692 #endif 2693 2694 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2695 // Install a win32 structured exception handler around the test 2696 // function call so the VM can generate an error dump if needed. 2697 __try { 2698 (*funcPtr)(); 2699 } __except(topLevelExceptionFilter( 2700 (_EXCEPTION_POINTERS*)_exception_info())) { 2701 // Nothing to do. 2702 } 2703 } 2704 2705 // Virtual Memory 2706 2707 int os::vm_page_size() { return os::win32::vm_page_size(); } 2708 int os::vm_allocation_granularity() { 2709 return os::win32::vm_allocation_granularity(); 2710 } 2711 2712 // Windows large page support is available on Windows 2003. In order to use 2713 // large page memory, the administrator must first assign additional privilege 2714 // to the user: 2715 // + select Control Panel -> Administrative Tools -> Local Security Policy 2716 // + select Local Policies -> User Rights Assignment 2717 // + double click "Lock pages in memory", add users and/or groups 2718 // + reboot 2719 // Note the above steps are needed for administrator as well, as administrators 2720 // by default do not have the privilege to lock pages in memory. 2721 // 2722 // Note about Windows 2003: although the API supports committing large page 2723 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2724 // scenario, I found through experiment it only uses large page if the entire 2725 // memory region is reserved and committed in a single VirtualAlloc() call. 2726 // This makes Windows large page support more or less like Solaris ISM, in 2727 // that the entire heap must be committed upfront. This probably will change 2728 // in the future, if so the code below needs to be revisited. 2729 2730 #ifndef MEM_LARGE_PAGES 2731 #define MEM_LARGE_PAGES 0x20000000 2732 #endif 2733 2734 static HANDLE _hProcess; 2735 static HANDLE _hToken; 2736 2737 // Container for NUMA node list info 2738 class NUMANodeListHolder { 2739 private: 2740 int *_numa_used_node_list; // allocated below 2741 int _numa_used_node_count; 2742 2743 void free_node_list() { 2744 if (_numa_used_node_list != NULL) { 2745 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2746 } 2747 } 2748 2749 public: 2750 NUMANodeListHolder() { 2751 _numa_used_node_count = 0; 2752 _numa_used_node_list = NULL; 2753 // do rest of initialization in build routine (after function pointers are set up) 2754 } 2755 2756 ~NUMANodeListHolder() { 2757 free_node_list(); 2758 } 2759 2760 bool build() { 2761 DWORD_PTR proc_aff_mask; 2762 DWORD_PTR sys_aff_mask; 2763 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2764 ULONG highest_node_number; 2765 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2766 free_node_list(); 2767 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2768 for (unsigned int i = 0; i <= highest_node_number; i++) { 2769 ULONGLONG proc_mask_numa_node; 2770 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2771 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2772 _numa_used_node_list[_numa_used_node_count++] = i; 2773 } 2774 } 2775 return (_numa_used_node_count > 1); 2776 } 2777 2778 int get_count() { return _numa_used_node_count; } 2779 int get_node_list_entry(int n) { 2780 // for indexes out of range, returns -1 2781 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2782 } 2783 2784 } numa_node_list_holder; 2785 2786 2787 2788 static size_t _large_page_size = 0; 2789 2790 static bool resolve_functions_for_large_page_init() { 2791 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2792 os::Advapi32Dll::AdvapiAvailable(); 2793 } 2794 2795 static bool request_lock_memory_privilege() { 2796 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2797 os::current_process_id()); 2798 2799 LUID luid; 2800 if (_hProcess != NULL && 2801 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2802 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2803 2804 TOKEN_PRIVILEGES tp; 2805 tp.PrivilegeCount = 1; 2806 tp.Privileges[0].Luid = luid; 2807 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2808 2809 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2810 // privilege. Check GetLastError() too. See MSDN document. 2811 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2812 (GetLastError() == ERROR_SUCCESS)) { 2813 return true; 2814 } 2815 } 2816 2817 return false; 2818 } 2819 2820 static void cleanup_after_large_page_init() { 2821 if (_hProcess) CloseHandle(_hProcess); 2822 _hProcess = NULL; 2823 if (_hToken) CloseHandle(_hToken); 2824 _hToken = NULL; 2825 } 2826 2827 static bool numa_interleaving_init() { 2828 bool success = false; 2829 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2830 2831 // print a warning if UseNUMAInterleaving flag is specified on command line 2832 bool warn_on_failure = use_numa_interleaving_specified; 2833 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2834 2835 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2836 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2837 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2838 2839 if (os::Kernel32Dll::NumaCallsAvailable()) { 2840 if (numa_node_list_holder.build()) { 2841 if (PrintMiscellaneous && Verbose) { 2842 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2843 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2844 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2845 } 2846 tty->print("\n"); 2847 } 2848 success = true; 2849 } else { 2850 WARN("Process does not cover multiple NUMA nodes."); 2851 } 2852 } else { 2853 WARN("NUMA Interleaving is not supported by the operating system."); 2854 } 2855 if (!success) { 2856 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2857 } 2858 return success; 2859 #undef WARN 2860 } 2861 2862 // this routine is used whenever we need to reserve a contiguous VA range 2863 // but we need to make separate VirtualAlloc calls for each piece of the range 2864 // Reasons for doing this: 2865 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2866 // * UseNUMAInterleaving requires a separate node for each piece 2867 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2868 DWORD prot, 2869 bool should_inject_error = false) { 2870 char * p_buf; 2871 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2872 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2873 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2874 2875 // first reserve enough address space in advance since we want to be 2876 // able to break a single contiguous virtual address range into multiple 2877 // large page commits but WS2003 does not allow reserving large page space 2878 // so we just use 4K pages for reserve, this gives us a legal contiguous 2879 // address space. then we will deallocate that reservation, and re alloc 2880 // using large pages 2881 const size_t size_of_reserve = bytes + chunk_size; 2882 if (bytes > size_of_reserve) { 2883 // Overflowed. 2884 return NULL; 2885 } 2886 p_buf = (char *) VirtualAlloc(addr, 2887 size_of_reserve, // size of Reserve 2888 MEM_RESERVE, 2889 PAGE_READWRITE); 2890 // If reservation failed, return NULL 2891 if (p_buf == NULL) return NULL; 2892 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2893 os::release_memory(p_buf, bytes + chunk_size); 2894 2895 // we still need to round up to a page boundary (in case we are using large pages) 2896 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2897 // instead we handle this in the bytes_to_rq computation below 2898 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2899 2900 // now go through and allocate one chunk at a time until all bytes are 2901 // allocated 2902 size_t bytes_remaining = bytes; 2903 // An overflow of align_size_up() would have been caught above 2904 // in the calculation of size_of_reserve. 2905 char * next_alloc_addr = p_buf; 2906 HANDLE hProc = GetCurrentProcess(); 2907 2908 #ifdef ASSERT 2909 // Variable for the failure injection 2910 long ran_num = os::random(); 2911 size_t fail_after = ran_num % bytes; 2912 #endif 2913 2914 int count=0; 2915 while (bytes_remaining) { 2916 // select bytes_to_rq to get to the next chunk_size boundary 2917 2918 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2919 // Note allocate and commit 2920 char * p_new; 2921 2922 #ifdef ASSERT 2923 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2924 #else 2925 const bool inject_error_now = false; 2926 #endif 2927 2928 if (inject_error_now) { 2929 p_new = NULL; 2930 } else { 2931 if (!UseNUMAInterleaving) { 2932 p_new = (char *) VirtualAlloc(next_alloc_addr, 2933 bytes_to_rq, 2934 flags, 2935 prot); 2936 } else { 2937 // get the next node to use from the used_node_list 2938 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2939 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2940 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2941 next_alloc_addr, 2942 bytes_to_rq, 2943 flags, 2944 prot, 2945 node); 2946 } 2947 } 2948 2949 if (p_new == NULL) { 2950 // Free any allocated pages 2951 if (next_alloc_addr > p_buf) { 2952 // Some memory was committed so release it. 2953 size_t bytes_to_release = bytes - bytes_remaining; 2954 // NMT has yet to record any individual blocks, so it 2955 // need to create a dummy 'reserve' record to match 2956 // the release. 2957 MemTracker::record_virtual_memory_reserve((address)p_buf, 2958 bytes_to_release, CALLER_PC); 2959 os::release_memory(p_buf, bytes_to_release); 2960 } 2961 #ifdef ASSERT 2962 if (should_inject_error) { 2963 if (TracePageSizes && Verbose) { 2964 tty->print_cr("Reserving pages individually failed."); 2965 } 2966 } 2967 #endif 2968 return NULL; 2969 } 2970 2971 bytes_remaining -= bytes_to_rq; 2972 next_alloc_addr += bytes_to_rq; 2973 count++; 2974 } 2975 // Although the memory is allocated individually, it is returned as one. 2976 // NMT records it as one block. 2977 if ((flags & MEM_COMMIT) != 0) { 2978 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2979 } else { 2980 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2981 } 2982 2983 // made it this far, success 2984 return p_buf; 2985 } 2986 2987 2988 2989 void os::large_page_init() { 2990 if (!UseLargePages) return; 2991 2992 // print a warning if any large page related flag is specified on command line 2993 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2994 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2995 bool success = false; 2996 2997 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2998 if (resolve_functions_for_large_page_init()) { 2999 if (request_lock_memory_privilege()) { 3000 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3001 if (s) { 3002 #if defined(IA32) || defined(AMD64) 3003 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3004 WARN("JVM cannot use large pages bigger than 4mb."); 3005 } else { 3006 #endif 3007 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3008 _large_page_size = LargePageSizeInBytes; 3009 } else { 3010 _large_page_size = s; 3011 } 3012 success = true; 3013 #if defined(IA32) || defined(AMD64) 3014 } 3015 #endif 3016 } else { 3017 WARN("Large page is not supported by the processor."); 3018 } 3019 } else { 3020 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3021 } 3022 } else { 3023 WARN("Large page is not supported by the operating system."); 3024 } 3025 #undef WARN 3026 3027 const size_t default_page_size = (size_t) vm_page_size(); 3028 if (success && _large_page_size > default_page_size) { 3029 _page_sizes[0] = _large_page_size; 3030 _page_sizes[1] = default_page_size; 3031 _page_sizes[2] = 0; 3032 } 3033 3034 cleanup_after_large_page_init(); 3035 UseLargePages = success; 3036 } 3037 3038 // On win32, one cannot release just a part of reserved memory, it's an 3039 // all or nothing deal. When we split a reservation, we must break the 3040 // reservation into two reservations. 3041 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3042 bool realloc) { 3043 if (size > 0) { 3044 release_memory(base, size); 3045 if (realloc) { 3046 reserve_memory(split, base); 3047 } 3048 if (size != split) { 3049 reserve_memory(size - split, base + split); 3050 } 3051 } 3052 } 3053 3054 // Multiple threads can race in this code but it's not possible to unmap small sections of 3055 // virtual space to get requested alignment, like posix-like os's. 3056 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3057 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3058 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3059 "Alignment must be a multiple of allocation granularity (page size)"); 3060 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3061 3062 size_t extra_size = size + alignment; 3063 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3064 3065 char* aligned_base = NULL; 3066 3067 do { 3068 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3069 if (extra_base == NULL) { 3070 return NULL; 3071 } 3072 // Do manual alignment 3073 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3074 3075 os::release_memory(extra_base, extra_size); 3076 3077 aligned_base = os::reserve_memory(size, aligned_base); 3078 3079 } while (aligned_base == NULL); 3080 3081 return aligned_base; 3082 } 3083 3084 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3085 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3086 "reserve alignment"); 3087 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3088 char* res; 3089 // note that if UseLargePages is on, all the areas that require interleaving 3090 // will go thru reserve_memory_special rather than thru here. 3091 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3092 if (!use_individual) { 3093 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3094 } else { 3095 elapsedTimer reserveTimer; 3096 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3097 // in numa interleaving, we have to allocate pages individually 3098 // (well really chunks of NUMAInterleaveGranularity size) 3099 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3100 if (res == NULL) { 3101 warning("NUMA page allocation failed"); 3102 } 3103 if (Verbose && PrintMiscellaneous) { 3104 reserveTimer.stop(); 3105 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3106 reserveTimer.milliseconds(), reserveTimer.ticks()); 3107 } 3108 } 3109 assert(res == NULL || addr == NULL || addr == res, 3110 "Unexpected address from reserve."); 3111 3112 return res; 3113 } 3114 3115 // Reserve memory at an arbitrary address, only if that area is 3116 // available (and not reserved for something else). 3117 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3118 // Windows os::reserve_memory() fails of the requested address range is 3119 // not avilable. 3120 return reserve_memory(bytes, requested_addr); 3121 } 3122 3123 size_t os::large_page_size() { 3124 return _large_page_size; 3125 } 3126 3127 bool os::can_commit_large_page_memory() { 3128 // Windows only uses large page memory when the entire region is reserved 3129 // and committed in a single VirtualAlloc() call. This may change in the 3130 // future, but with Windows 2003 it's not possible to commit on demand. 3131 return false; 3132 } 3133 3134 bool os::can_execute_large_page_memory() { 3135 return true; 3136 } 3137 3138 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3139 bool exec) { 3140 assert(UseLargePages, "only for large pages"); 3141 3142 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3143 return NULL; // Fallback to small pages. 3144 } 3145 3146 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3147 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3148 3149 // with large pages, there are two cases where we need to use Individual Allocation 3150 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3151 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3152 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3153 if (TracePageSizes && Verbose) { 3154 tty->print_cr("Reserving large pages individually."); 3155 } 3156 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3157 if (p_buf == NULL) { 3158 // give an appropriate warning message 3159 if (UseNUMAInterleaving) { 3160 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3161 } 3162 if (UseLargePagesIndividualAllocation) { 3163 warning("Individually allocated large pages failed, " 3164 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3165 } 3166 return NULL; 3167 } 3168 3169 return p_buf; 3170 3171 } else { 3172 if (TracePageSizes && Verbose) { 3173 tty->print_cr("Reserving large pages in a single large chunk."); 3174 } 3175 // normal policy just allocate it all at once 3176 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3177 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3178 if (res != NULL) { 3179 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3180 } 3181 3182 return res; 3183 } 3184 } 3185 3186 bool os::release_memory_special(char* base, size_t bytes) { 3187 assert(base != NULL, "Sanity check"); 3188 return release_memory(base, bytes); 3189 } 3190 3191 void os::print_statistics() { 3192 } 3193 3194 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3195 int err = os::get_last_error(); 3196 char buf[256]; 3197 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3198 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3199 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3200 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3201 } 3202 3203 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3204 if (bytes == 0) { 3205 // Don't bother the OS with noops. 3206 return true; 3207 } 3208 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3209 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3210 // Don't attempt to print anything if the OS call fails. We're 3211 // probably low on resources, so the print itself may cause crashes. 3212 3213 // unless we have NUMAInterleaving enabled, the range of a commit 3214 // is always within a reserve covered by a single VirtualAlloc 3215 // in that case we can just do a single commit for the requested size 3216 if (!UseNUMAInterleaving) { 3217 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3218 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3219 return false; 3220 } 3221 if (exec) { 3222 DWORD oldprot; 3223 // Windows doc says to use VirtualProtect to get execute permissions 3224 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3225 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3226 return false; 3227 } 3228 } 3229 return true; 3230 } else { 3231 3232 // when NUMAInterleaving is enabled, the commit might cover a range that 3233 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3234 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3235 // returns represents the number of bytes that can be committed in one step. 3236 size_t bytes_remaining = bytes; 3237 char * next_alloc_addr = addr; 3238 while (bytes_remaining > 0) { 3239 MEMORY_BASIC_INFORMATION alloc_info; 3240 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3241 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3242 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3243 PAGE_READWRITE) == NULL) { 3244 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3245 exec);) 3246 return false; 3247 } 3248 if (exec) { 3249 DWORD oldprot; 3250 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3251 PAGE_EXECUTE_READWRITE, &oldprot)) { 3252 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3253 exec);) 3254 return false; 3255 } 3256 } 3257 bytes_remaining -= bytes_to_rq; 3258 next_alloc_addr += bytes_to_rq; 3259 } 3260 } 3261 // if we made it this far, return true 3262 return true; 3263 } 3264 3265 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3266 bool exec) { 3267 // alignment_hint is ignored on this OS 3268 return pd_commit_memory(addr, size, exec); 3269 } 3270 3271 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3272 const char* mesg) { 3273 assert(mesg != NULL, "mesg must be specified"); 3274 if (!pd_commit_memory(addr, size, exec)) { 3275 warn_fail_commit_memory(addr, size, exec); 3276 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3277 } 3278 } 3279 3280 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3281 size_t alignment_hint, bool exec, 3282 const char* mesg) { 3283 // alignment_hint is ignored on this OS 3284 pd_commit_memory_or_exit(addr, size, exec, mesg); 3285 } 3286 3287 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3288 if (bytes == 0) { 3289 // Don't bother the OS with noops. 3290 return true; 3291 } 3292 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3293 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3294 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3295 } 3296 3297 bool os::pd_release_memory(char* addr, size_t bytes) { 3298 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3299 } 3300 3301 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3302 return os::commit_memory(addr, size, !ExecMem); 3303 } 3304 3305 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3306 return os::uncommit_memory(addr, size); 3307 } 3308 3309 // Set protections specified 3310 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3311 bool is_committed) { 3312 unsigned int p = 0; 3313 switch (prot) { 3314 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3315 case MEM_PROT_READ: p = PAGE_READONLY; break; 3316 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3317 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3318 default: 3319 ShouldNotReachHere(); 3320 } 3321 3322 DWORD old_status; 3323 3324 // Strange enough, but on Win32 one can change protection only for committed 3325 // memory, not a big deal anyway, as bytes less or equal than 64K 3326 if (!is_committed) { 3327 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3328 "cannot commit protection page"); 3329 } 3330 // One cannot use os::guard_memory() here, as on Win32 guard page 3331 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3332 // 3333 // Pages in the region become guard pages. Any attempt to access a guard page 3334 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3335 // the guard page status. Guard pages thus act as a one-time access alarm. 3336 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3337 } 3338 3339 bool os::guard_memory(char* addr, size_t bytes) { 3340 DWORD old_status; 3341 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3342 } 3343 3344 bool os::unguard_memory(char* addr, size_t bytes) { 3345 DWORD old_status; 3346 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3347 } 3348 3349 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3350 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3351 void os::numa_make_global(char *addr, size_t bytes) { } 3352 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3353 bool os::numa_topology_changed() { return false; } 3354 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3355 int os::numa_get_group_id() { return 0; } 3356 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3357 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3358 // Provide an answer for UMA systems 3359 ids[0] = 0; 3360 return 1; 3361 } else { 3362 // check for size bigger than actual groups_num 3363 size = MIN2(size, numa_get_groups_num()); 3364 for (int i = 0; i < (int)size; i++) { 3365 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3366 } 3367 return size; 3368 } 3369 } 3370 3371 bool os::get_page_info(char *start, page_info* info) { 3372 return false; 3373 } 3374 3375 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3376 page_info* page_found) { 3377 return end; 3378 } 3379 3380 char* os::non_memory_address_word() { 3381 // Must never look like an address returned by reserve_memory, 3382 // even in its subfields (as defined by the CPU immediate fields, 3383 // if the CPU splits constants across multiple instructions). 3384 return (char*)-1; 3385 } 3386 3387 #define MAX_ERROR_COUNT 100 3388 #define SYS_THREAD_ERROR 0xffffffffUL 3389 3390 void os::pd_start_thread(Thread* thread) { 3391 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3392 // Returns previous suspend state: 3393 // 0: Thread was not suspended 3394 // 1: Thread is running now 3395 // >1: Thread is still suspended. 3396 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3397 } 3398 3399 class HighResolutionInterval : public CHeapObj<mtThread> { 3400 // The default timer resolution seems to be 10 milliseconds. 3401 // (Where is this written down?) 3402 // If someone wants to sleep for only a fraction of the default, 3403 // then we set the timer resolution down to 1 millisecond for 3404 // the duration of their interval. 3405 // We carefully set the resolution back, since otherwise we 3406 // seem to incur an overhead (3%?) that we don't need. 3407 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3408 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3409 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3410 // timeBeginPeriod() if the relative error exceeded some threshold. 3411 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3412 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3413 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3414 // resolution timers running. 3415 private: 3416 jlong resolution; 3417 public: 3418 HighResolutionInterval(jlong ms) { 3419 resolution = ms % 10L; 3420 if (resolution != 0) { 3421 MMRESULT result = timeBeginPeriod(1L); 3422 } 3423 } 3424 ~HighResolutionInterval() { 3425 if (resolution != 0) { 3426 MMRESULT result = timeEndPeriod(1L); 3427 } 3428 resolution = 0L; 3429 } 3430 }; 3431 3432 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3433 jlong limit = (jlong) MAXDWORD; 3434 3435 while (ms > limit) { 3436 int res; 3437 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3438 return res; 3439 } 3440 ms -= limit; 3441 } 3442 3443 assert(thread == Thread::current(), "thread consistency check"); 3444 OSThread* osthread = thread->osthread(); 3445 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3446 int result; 3447 if (interruptable) { 3448 assert(thread->is_Java_thread(), "must be java thread"); 3449 JavaThread *jt = (JavaThread *) thread; 3450 ThreadBlockInVM tbivm(jt); 3451 3452 jt->set_suspend_equivalent(); 3453 // cleared by handle_special_suspend_equivalent_condition() or 3454 // java_suspend_self() via check_and_wait_while_suspended() 3455 3456 HANDLE events[1]; 3457 events[0] = osthread->interrupt_event(); 3458 HighResolutionInterval *phri=NULL; 3459 if (!ForceTimeHighResolution) { 3460 phri = new HighResolutionInterval(ms); 3461 } 3462 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3463 result = OS_TIMEOUT; 3464 } else { 3465 ResetEvent(osthread->interrupt_event()); 3466 osthread->set_interrupted(false); 3467 result = OS_INTRPT; 3468 } 3469 delete phri; //if it is NULL, harmless 3470 3471 // were we externally suspended while we were waiting? 3472 jt->check_and_wait_while_suspended(); 3473 } else { 3474 assert(!thread->is_Java_thread(), "must not be java thread"); 3475 Sleep((long) ms); 3476 result = OS_TIMEOUT; 3477 } 3478 return result; 3479 } 3480 3481 // Short sleep, direct OS call. 3482 // 3483 // ms = 0, means allow others (if any) to run. 3484 // 3485 void os::naked_short_sleep(jlong ms) { 3486 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3487 Sleep(ms); 3488 } 3489 3490 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3491 void os::infinite_sleep() { 3492 while (true) { // sleep forever ... 3493 Sleep(100000); // ... 100 seconds at a time 3494 } 3495 } 3496 3497 typedef BOOL (WINAPI * STTSignature)(void); 3498 3499 void os::naked_yield() { 3500 // Use either SwitchToThread() or Sleep(0) 3501 // Consider passing back the return value from SwitchToThread(). 3502 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3503 SwitchToThread(); 3504 } else { 3505 Sleep(0); 3506 } 3507 } 3508 3509 // Win32 only gives you access to seven real priorities at a time, 3510 // so we compress Java's ten down to seven. It would be better 3511 // if we dynamically adjusted relative priorities. 3512 3513 int os::java_to_os_priority[CriticalPriority + 1] = { 3514 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3515 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3516 THREAD_PRIORITY_LOWEST, // 2 3517 THREAD_PRIORITY_BELOW_NORMAL, // 3 3518 THREAD_PRIORITY_BELOW_NORMAL, // 4 3519 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3520 THREAD_PRIORITY_NORMAL, // 6 3521 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3522 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3523 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3524 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3525 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3526 }; 3527 3528 int prio_policy1[CriticalPriority + 1] = { 3529 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3530 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3531 THREAD_PRIORITY_LOWEST, // 2 3532 THREAD_PRIORITY_BELOW_NORMAL, // 3 3533 THREAD_PRIORITY_BELOW_NORMAL, // 4 3534 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3535 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3536 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3537 THREAD_PRIORITY_HIGHEST, // 8 3538 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3539 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3540 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3541 }; 3542 3543 static int prio_init() { 3544 // If ThreadPriorityPolicy is 1, switch tables 3545 if (ThreadPriorityPolicy == 1) { 3546 int i; 3547 for (i = 0; i < CriticalPriority + 1; i++) { 3548 os::java_to_os_priority[i] = prio_policy1[i]; 3549 } 3550 } 3551 if (UseCriticalJavaThreadPriority) { 3552 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3553 } 3554 return 0; 3555 } 3556 3557 OSReturn os::set_native_priority(Thread* thread, int priority) { 3558 if (!UseThreadPriorities) return OS_OK; 3559 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3560 return ret ? OS_OK : OS_ERR; 3561 } 3562 3563 OSReturn os::get_native_priority(const Thread* const thread, 3564 int* priority_ptr) { 3565 if (!UseThreadPriorities) { 3566 *priority_ptr = java_to_os_priority[NormPriority]; 3567 return OS_OK; 3568 } 3569 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3570 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3571 assert(false, "GetThreadPriority failed"); 3572 return OS_ERR; 3573 } 3574 *priority_ptr = os_prio; 3575 return OS_OK; 3576 } 3577 3578 3579 // Hint to the underlying OS that a task switch would not be good. 3580 // Void return because it's a hint and can fail. 3581 void os::hint_no_preempt() {} 3582 3583 void os::interrupt(Thread* thread) { 3584 assert(!thread->is_Java_thread() || Thread::current() == thread || 3585 Threads_lock->owned_by_self(), 3586 "possibility of dangling Thread pointer"); 3587 3588 OSThread* osthread = thread->osthread(); 3589 osthread->set_interrupted(true); 3590 // More than one thread can get here with the same value of osthread, 3591 // resulting in multiple notifications. We do, however, want the store 3592 // to interrupted() to be visible to other threads before we post 3593 // the interrupt event. 3594 OrderAccess::release(); 3595 SetEvent(osthread->interrupt_event()); 3596 // For JSR166: unpark after setting status 3597 if (thread->is_Java_thread()) { 3598 ((JavaThread*)thread)->parker()->unpark(); 3599 } 3600 3601 ParkEvent * ev = thread->_ParkEvent; 3602 if (ev != NULL) ev->unpark(); 3603 } 3604 3605 3606 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3607 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3608 "possibility of dangling Thread pointer"); 3609 3610 OSThread* osthread = thread->osthread(); 3611 // There is no synchronization between the setting of the interrupt 3612 // and it being cleared here. It is critical - see 6535709 - that 3613 // we only clear the interrupt state, and reset the interrupt event, 3614 // if we are going to report that we were indeed interrupted - else 3615 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3616 // depending on the timing. By checking thread interrupt event to see 3617 // if the thread gets real interrupt thus prevent spurious wakeup. 3618 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3619 if (interrupted && clear_interrupted) { 3620 osthread->set_interrupted(false); 3621 ResetEvent(osthread->interrupt_event()); 3622 } // Otherwise leave the interrupted state alone 3623 3624 return interrupted; 3625 } 3626 3627 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3628 ExtendedPC os::get_thread_pc(Thread* thread) { 3629 CONTEXT context; 3630 context.ContextFlags = CONTEXT_CONTROL; 3631 HANDLE handle = thread->osthread()->thread_handle(); 3632 #ifdef _M_IA64 3633 assert(0, "Fix get_thread_pc"); 3634 return ExtendedPC(NULL); 3635 #else 3636 if (GetThreadContext(handle, &context)) { 3637 #ifdef _M_AMD64 3638 return ExtendedPC((address) context.Rip); 3639 #else 3640 return ExtendedPC((address) context.Eip); 3641 #endif 3642 } else { 3643 return ExtendedPC(NULL); 3644 } 3645 #endif 3646 } 3647 3648 // GetCurrentThreadId() returns DWORD 3649 intx os::current_thread_id() { return GetCurrentThreadId(); } 3650 3651 static int _initial_pid = 0; 3652 3653 int os::current_process_id() { 3654 return (_initial_pid ? _initial_pid : _getpid()); 3655 } 3656 3657 int os::win32::_vm_page_size = 0; 3658 int os::win32::_vm_allocation_granularity = 0; 3659 int os::win32::_processor_type = 0; 3660 // Processor level is not available on non-NT systems, use vm_version instead 3661 int os::win32::_processor_level = 0; 3662 julong os::win32::_physical_memory = 0; 3663 size_t os::win32::_default_stack_size = 0; 3664 3665 intx os::win32::_os_thread_limit = 0; 3666 volatile intx os::win32::_os_thread_count = 0; 3667 3668 bool os::win32::_is_nt = false; 3669 bool os::win32::_is_windows_2003 = false; 3670 bool os::win32::_is_windows_server = false; 3671 3672 // 6573254 3673 // Currently, the bug is observed across all the supported Windows releases, 3674 // including the latest one (as of this writing - Windows Server 2012 R2) 3675 bool os::win32::_has_exit_bug = true; 3676 bool os::win32::_has_performance_count = 0; 3677 3678 void os::win32::initialize_system_info() { 3679 SYSTEM_INFO si; 3680 GetSystemInfo(&si); 3681 _vm_page_size = si.dwPageSize; 3682 _vm_allocation_granularity = si.dwAllocationGranularity; 3683 _processor_type = si.dwProcessorType; 3684 _processor_level = si.wProcessorLevel; 3685 set_processor_count(si.dwNumberOfProcessors); 3686 3687 MEMORYSTATUSEX ms; 3688 ms.dwLength = sizeof(ms); 3689 3690 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3691 // dwMemoryLoad (% of memory in use) 3692 GlobalMemoryStatusEx(&ms); 3693 _physical_memory = ms.ullTotalPhys; 3694 3695 OSVERSIONINFOEX oi; 3696 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3697 GetVersionEx((OSVERSIONINFO*)&oi); 3698 switch (oi.dwPlatformId) { 3699 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3700 case VER_PLATFORM_WIN32_NT: 3701 _is_nt = true; 3702 { 3703 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3704 if (os_vers == 5002) { 3705 _is_windows_2003 = true; 3706 } 3707 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3708 oi.wProductType == VER_NT_SERVER) { 3709 _is_windows_server = true; 3710 } 3711 } 3712 break; 3713 default: fatal("Unknown platform"); 3714 } 3715 3716 _default_stack_size = os::current_stack_size(); 3717 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3718 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3719 "stack size not a multiple of page size"); 3720 3721 initialize_performance_counter(); 3722 3723 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3724 // known to deadlock the system, if the VM issues to thread operations with 3725 // a too high frequency, e.g., such as changing the priorities. 3726 // The 6000 seems to work well - no deadlocks has been notices on the test 3727 // programs that we have seen experience this problem. 3728 if (!os::win32::is_nt()) { 3729 StarvationMonitorInterval = 6000; 3730 } 3731 } 3732 3733 3734 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3735 int ebuflen) { 3736 char path[MAX_PATH]; 3737 DWORD size; 3738 DWORD pathLen = (DWORD)sizeof(path); 3739 HINSTANCE result = NULL; 3740 3741 // only allow library name without path component 3742 assert(strchr(name, '\\') == NULL, "path not allowed"); 3743 assert(strchr(name, ':') == NULL, "path not allowed"); 3744 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3745 jio_snprintf(ebuf, ebuflen, 3746 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3747 return NULL; 3748 } 3749 3750 // search system directory 3751 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3752 if (size >= pathLen) { 3753 return NULL; // truncated 3754 } 3755 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3756 return NULL; // truncated 3757 } 3758 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3759 return result; 3760 } 3761 } 3762 3763 // try Windows directory 3764 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3765 if (size >= pathLen) { 3766 return NULL; // truncated 3767 } 3768 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3769 return NULL; // truncated 3770 } 3771 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3772 return result; 3773 } 3774 } 3775 3776 jio_snprintf(ebuf, ebuflen, 3777 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3778 return NULL; 3779 } 3780 3781 #define EXIT_TIMEOUT PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */ 3782 3783 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3784 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3785 return TRUE; 3786 } 3787 3788 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3789 // Basic approach: 3790 // - Each exiting thread registers its intent to exit and then does so. 3791 // - A thread trying to terminate the process must wait for all 3792 // threads currently exiting to complete their exit. 3793 3794 if (os::win32::has_exit_bug()) { 3795 // The array holds handles of the threads that have started exiting by calling 3796 // _endthreadex(). 3797 // Should be large enough to avoid blocking the exiting thread due to lack of 3798 // a free slot. 3799 static HANDLE handles[MAXIMUM_WAIT_OBJECTS]; 3800 static int handle_count = 0; 3801 3802 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3803 static CRITICAL_SECTION crit_sect; 3804 static volatile jint process_exiting = 0; 3805 int i, j; 3806 DWORD res; 3807 HANDLE hproc, hthr; 3808 3809 // The first thread that reached this point, initializes the critical section. 3810 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3811 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3812 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3813 EnterCriticalSection(&crit_sect); 3814 3815 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3816 // Remove from the array those handles of the threads that have completed exiting. 3817 for (i = 0, j = 0; i < handle_count; ++i) { 3818 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3819 if (res == WAIT_TIMEOUT) { 3820 handles[j++] = handles[i]; 3821 } else { 3822 if (res == WAIT_FAILED) { 3823 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3824 GetLastError(), __FILE__, __LINE__); 3825 } 3826 // Don't keep the handle, if we failed waiting for it. 3827 CloseHandle(handles[i]); 3828 } 3829 } 3830 3831 // If there's no free slot in the array of the kept handles, we'll have to 3832 // wait until at least one thread completes exiting. 3833 if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) { 3834 // Raise the priority of the oldest exiting thread to increase its chances 3835 // to complete sooner. 3836 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3837 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3838 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3839 i = (res - WAIT_OBJECT_0); 3840 handle_count = MAXIMUM_WAIT_OBJECTS - 1; 3841 for (; i < handle_count; ++i) { 3842 handles[i] = handles[i + 1]; 3843 } 3844 } else { 3845 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3846 (res == WAIT_FAILED ? "failed" : "timed out"), 3847 GetLastError(), __FILE__, __LINE__); 3848 // Don't keep handles, if we failed waiting for them. 3849 for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) { 3850 CloseHandle(handles[i]); 3851 } 3852 handle_count = 0; 3853 } 3854 } 3855 3856 // Store a duplicate of the current thread handle in the array of handles. 3857 hproc = GetCurrentProcess(); 3858 hthr = GetCurrentThread(); 3859 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3860 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3861 warning("DuplicateHandle failed (%u) in %s: %d\n", 3862 GetLastError(), __FILE__, __LINE__); 3863 } else { 3864 ++handle_count; 3865 } 3866 3867 // The current exiting thread has stored its handle in the array, and now 3868 // should leave the critical section before calling _endthreadex(). 3869 3870 } else if (what != EPT_THREAD) { 3871 if (handle_count > 0) { 3872 // Before ending the process, make sure all the threads that had called 3873 // _endthreadex() completed. 3874 3875 // Set the priority level of the current thread to the same value as 3876 // the priority level of exiting threads. 3877 // This is to ensure it will be given a fair chance to execute if 3878 // the timeout expires. 3879 hthr = GetCurrentThread(); 3880 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3881 for (i = 0; i < handle_count; ++i) { 3882 SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL); 3883 } 3884 res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT); 3885 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3886 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3887 (res == WAIT_FAILED ? "failed" : "timed out"), 3888 GetLastError(), __FILE__, __LINE__); 3889 } 3890 for (i = 0; i < handle_count; ++i) { 3891 CloseHandle(handles[i]); 3892 } 3893 handle_count = 0; 3894 } 3895 3896 OrderAccess::release_store(&process_exiting, 1); 3897 } 3898 3899 LeaveCriticalSection(&crit_sect); 3900 } 3901 3902 if (what == EPT_THREAD) { 3903 while (OrderAccess::load_acquire(&process_exiting) != 0) { 3904 // Some other thread is about to call exit(), so we 3905 // don't let the current thread proceed to _endthreadex() 3906 SuspendThread(GetCurrentThread()); 3907 // Avoid busy-wait loop, if SuspendThread() failed. 3908 Sleep(EXIT_TIMEOUT); 3909 } 3910 } 3911 } 3912 3913 // We are here if either 3914 // - there's no 'race at exit' bug on this OS release; 3915 // - initialization of the critical section failed (unlikely); 3916 // - the current thread has stored its handle and left the critical section; 3917 // - the process-exiting thread has raised the flag and left the critical section. 3918 if (what == EPT_THREAD) { 3919 _endthreadex((unsigned)exit_code); 3920 } else if (what == EPT_PROCESS) { 3921 ::exit(exit_code); 3922 } else { 3923 _exit(exit_code); 3924 } 3925 3926 // Should not reach here 3927 return exit_code; 3928 } 3929 3930 #undef EXIT_TIMEOUT 3931 3932 void os::win32::setmode_streams() { 3933 _setmode(_fileno(stdin), _O_BINARY); 3934 _setmode(_fileno(stdout), _O_BINARY); 3935 _setmode(_fileno(stderr), _O_BINARY); 3936 } 3937 3938 3939 bool os::is_debugger_attached() { 3940 return IsDebuggerPresent() ? true : false; 3941 } 3942 3943 3944 void os::wait_for_keypress_at_exit(void) { 3945 if (PauseAtExit) { 3946 fprintf(stderr, "Press any key to continue...\n"); 3947 fgetc(stdin); 3948 } 3949 } 3950 3951 3952 int os::message_box(const char* title, const char* message) { 3953 int result = MessageBox(NULL, message, title, 3954 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3955 return result == IDYES; 3956 } 3957 3958 int os::allocate_thread_local_storage() { 3959 return TlsAlloc(); 3960 } 3961 3962 3963 void os::free_thread_local_storage(int index) { 3964 TlsFree(index); 3965 } 3966 3967 3968 void os::thread_local_storage_at_put(int index, void* value) { 3969 TlsSetValue(index, value); 3970 assert(thread_local_storage_at(index) == value, "Just checking"); 3971 } 3972 3973 3974 void* os::thread_local_storage_at(int index) { 3975 return TlsGetValue(index); 3976 } 3977 3978 3979 #ifndef PRODUCT 3980 #ifndef _WIN64 3981 // Helpers to check whether NX protection is enabled 3982 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3983 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3984 pex->ExceptionRecord->NumberParameters > 0 && 3985 pex->ExceptionRecord->ExceptionInformation[0] == 3986 EXCEPTION_INFO_EXEC_VIOLATION) { 3987 return EXCEPTION_EXECUTE_HANDLER; 3988 } 3989 return EXCEPTION_CONTINUE_SEARCH; 3990 } 3991 3992 void nx_check_protection() { 3993 // If NX is enabled we'll get an exception calling into code on the stack 3994 char code[] = { (char)0xC3 }; // ret 3995 void *code_ptr = (void *)code; 3996 __try { 3997 __asm call code_ptr 3998 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3999 tty->print_raw_cr("NX protection detected."); 4000 } 4001 } 4002 #endif // _WIN64 4003 #endif // PRODUCT 4004 4005 // this is called _before_ the global arguments have been parsed 4006 void os::init(void) { 4007 _initial_pid = _getpid(); 4008 4009 init_random(1234567); 4010 4011 win32::initialize_system_info(); 4012 win32::setmode_streams(); 4013 init_page_sizes((size_t) win32::vm_page_size()); 4014 4015 // This may be overridden later when argument processing is done. 4016 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 4017 os::win32::is_windows_2003()); 4018 4019 // Initialize main_process and main_thread 4020 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4021 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4022 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4023 fatal("DuplicateHandle failed\n"); 4024 } 4025 main_thread_id = (int) GetCurrentThreadId(); 4026 } 4027 4028 // To install functions for atexit processing 4029 extern "C" { 4030 static void perfMemory_exit_helper() { 4031 perfMemory_exit(); 4032 } 4033 } 4034 4035 static jint initSock(); 4036 4037 // this is called _after_ the global arguments have been parsed 4038 jint os::init_2(void) { 4039 // Allocate a single page and mark it as readable for safepoint polling 4040 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4041 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4042 4043 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4044 guarantee(return_page != NULL, "Commit Failed for polling page"); 4045 4046 os::set_polling_page(polling_page); 4047 4048 #ifndef PRODUCT 4049 if (Verbose && PrintMiscellaneous) { 4050 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4051 (intptr_t)polling_page); 4052 } 4053 #endif 4054 4055 if (!UseMembar) { 4056 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4057 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4058 4059 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4060 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4061 4062 os::set_memory_serialize_page(mem_serialize_page); 4063 4064 #ifndef PRODUCT 4065 if (Verbose && PrintMiscellaneous) { 4066 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4067 (intptr_t)mem_serialize_page); 4068 } 4069 #endif 4070 } 4071 4072 // Setup Windows Exceptions 4073 4074 // for debugging float code generation bugs 4075 if (ForceFloatExceptions) { 4076 #ifndef _WIN64 4077 static long fp_control_word = 0; 4078 __asm { fstcw fp_control_word } 4079 // see Intel PPro Manual, Vol. 2, p 7-16 4080 const long precision = 0x20; 4081 const long underflow = 0x10; 4082 const long overflow = 0x08; 4083 const long zero_div = 0x04; 4084 const long denorm = 0x02; 4085 const long invalid = 0x01; 4086 fp_control_word |= invalid; 4087 __asm { fldcw fp_control_word } 4088 #endif 4089 } 4090 4091 // If stack_commit_size is 0, windows will reserve the default size, 4092 // but only commit a small portion of it. 4093 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4094 size_t default_reserve_size = os::win32::default_stack_size(); 4095 size_t actual_reserve_size = stack_commit_size; 4096 if (stack_commit_size < default_reserve_size) { 4097 // If stack_commit_size == 0, we want this too 4098 actual_reserve_size = default_reserve_size; 4099 } 4100 4101 // Check minimum allowable stack size for thread creation and to initialize 4102 // the java system classes, including StackOverflowError - depends on page 4103 // size. Add a page for compiler2 recursion in main thread. 4104 // Add in 2*BytesPerWord times page size to account for VM stack during 4105 // class initialization depending on 32 or 64 bit VM. 4106 size_t min_stack_allowed = 4107 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4108 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4109 if (actual_reserve_size < min_stack_allowed) { 4110 tty->print_cr("\nThe stack size specified is too small, " 4111 "Specify at least %dk", 4112 min_stack_allowed / K); 4113 return JNI_ERR; 4114 } 4115 4116 JavaThread::set_stack_size_at_create(stack_commit_size); 4117 4118 // Calculate theoretical max. size of Threads to guard gainst artifical 4119 // out-of-memory situations, where all available address-space has been 4120 // reserved by thread stacks. 4121 assert(actual_reserve_size != 0, "Must have a stack"); 4122 4123 // Calculate the thread limit when we should start doing Virtual Memory 4124 // banging. Currently when the threads will have used all but 200Mb of space. 4125 // 4126 // TODO: consider performing a similar calculation for commit size instead 4127 // as reserve size, since on a 64-bit platform we'll run into that more 4128 // often than running out of virtual memory space. We can use the 4129 // lower value of the two calculations as the os_thread_limit. 4130 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4131 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4132 4133 // at exit methods are called in the reverse order of their registration. 4134 // there is no limit to the number of functions registered. atexit does 4135 // not set errno. 4136 4137 if (PerfAllowAtExitRegistration) { 4138 // only register atexit functions if PerfAllowAtExitRegistration is set. 4139 // atexit functions can be delayed until process exit time, which 4140 // can be problematic for embedded VM situations. Embedded VMs should 4141 // call DestroyJavaVM() to assure that VM resources are released. 4142 4143 // note: perfMemory_exit_helper atexit function may be removed in 4144 // the future if the appropriate cleanup code can be added to the 4145 // VM_Exit VMOperation's doit method. 4146 if (atexit(perfMemory_exit_helper) != 0) { 4147 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4148 } 4149 } 4150 4151 #ifndef _WIN64 4152 // Print something if NX is enabled (win32 on AMD64) 4153 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4154 #endif 4155 4156 // initialize thread priority policy 4157 prio_init(); 4158 4159 if (UseNUMA && !ForceNUMA) { 4160 UseNUMA = false; // We don't fully support this yet 4161 } 4162 4163 if (UseNUMAInterleaving) { 4164 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4165 bool success = numa_interleaving_init(); 4166 if (!success) UseNUMAInterleaving = false; 4167 } 4168 4169 if (initSock() != JNI_OK) { 4170 return JNI_ERR; 4171 } 4172 4173 return JNI_OK; 4174 } 4175 4176 // Mark the polling page as unreadable 4177 void os::make_polling_page_unreadable(void) { 4178 DWORD old_status; 4179 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4180 PAGE_NOACCESS, &old_status)) { 4181 fatal("Could not disable polling page"); 4182 } 4183 } 4184 4185 // Mark the polling page as readable 4186 void os::make_polling_page_readable(void) { 4187 DWORD old_status; 4188 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4189 PAGE_READONLY, &old_status)) { 4190 fatal("Could not enable polling page"); 4191 } 4192 } 4193 4194 4195 int os::stat(const char *path, struct stat *sbuf) { 4196 char pathbuf[MAX_PATH]; 4197 if (strlen(path) > MAX_PATH - 1) { 4198 errno = ENAMETOOLONG; 4199 return -1; 4200 } 4201 os::native_path(strcpy(pathbuf, path)); 4202 int ret = ::stat(pathbuf, sbuf); 4203 if (sbuf != NULL && UseUTCFileTimestamp) { 4204 // Fix for 6539723. st_mtime returned from stat() is dependent on 4205 // the system timezone and so can return different values for the 4206 // same file if/when daylight savings time changes. This adjustment 4207 // makes sure the same timestamp is returned regardless of the TZ. 4208 // 4209 // See: 4210 // http://msdn.microsoft.com/library/ 4211 // default.asp?url=/library/en-us/sysinfo/base/ 4212 // time_zone_information_str.asp 4213 // and 4214 // http://msdn.microsoft.com/library/default.asp?url= 4215 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4216 // 4217 // NOTE: there is a insidious bug here: If the timezone is changed 4218 // after the call to stat() but before 'GetTimeZoneInformation()', then 4219 // the adjustment we do here will be wrong and we'll return the wrong 4220 // value (which will likely end up creating an invalid class data 4221 // archive). Absent a better API for this, or some time zone locking 4222 // mechanism, we'll have to live with this risk. 4223 TIME_ZONE_INFORMATION tz; 4224 DWORD tzid = GetTimeZoneInformation(&tz); 4225 int daylightBias = 4226 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4227 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4228 } 4229 return ret; 4230 } 4231 4232 4233 #define FT2INT64(ft) \ 4234 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4235 4236 4237 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4238 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4239 // of a thread. 4240 // 4241 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4242 // the fast estimate available on the platform. 4243 4244 // current_thread_cpu_time() is not optimized for Windows yet 4245 jlong os::current_thread_cpu_time() { 4246 // return user + sys since the cost is the same 4247 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4248 } 4249 4250 jlong os::thread_cpu_time(Thread* thread) { 4251 // consistent with what current_thread_cpu_time() returns. 4252 return os::thread_cpu_time(thread, true /* user+sys */); 4253 } 4254 4255 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4256 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4257 } 4258 4259 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4260 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4261 // If this function changes, os::is_thread_cpu_time_supported() should too 4262 if (os::win32::is_nt()) { 4263 FILETIME CreationTime; 4264 FILETIME ExitTime; 4265 FILETIME KernelTime; 4266 FILETIME UserTime; 4267 4268 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4269 &ExitTime, &KernelTime, &UserTime) == 0) { 4270 return -1; 4271 } else if (user_sys_cpu_time) { 4272 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4273 } else { 4274 return FT2INT64(UserTime) * 100; 4275 } 4276 } else { 4277 return (jlong) timeGetTime() * 1000000; 4278 } 4279 } 4280 4281 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4282 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4283 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4284 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4285 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4286 } 4287 4288 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4289 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4290 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4291 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4292 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4293 } 4294 4295 bool os::is_thread_cpu_time_supported() { 4296 // see os::thread_cpu_time 4297 if (os::win32::is_nt()) { 4298 FILETIME CreationTime; 4299 FILETIME ExitTime; 4300 FILETIME KernelTime; 4301 FILETIME UserTime; 4302 4303 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4304 &KernelTime, &UserTime) == 0) { 4305 return false; 4306 } else { 4307 return true; 4308 } 4309 } else { 4310 return false; 4311 } 4312 } 4313 4314 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4315 // It does have primitives (PDH API) to get CPU usage and run queue length. 4316 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4317 // If we wanted to implement loadavg on Windows, we have a few options: 4318 // 4319 // a) Query CPU usage and run queue length and "fake" an answer by 4320 // returning the CPU usage if it's under 100%, and the run queue 4321 // length otherwise. It turns out that querying is pretty slow 4322 // on Windows, on the order of 200 microseconds on a fast machine. 4323 // Note that on the Windows the CPU usage value is the % usage 4324 // since the last time the API was called (and the first call 4325 // returns 100%), so we'd have to deal with that as well. 4326 // 4327 // b) Sample the "fake" answer using a sampling thread and store 4328 // the answer in a global variable. The call to loadavg would 4329 // just return the value of the global, avoiding the slow query. 4330 // 4331 // c) Sample a better answer using exponential decay to smooth the 4332 // value. This is basically the algorithm used by UNIX kernels. 4333 // 4334 // Note that sampling thread starvation could affect both (b) and (c). 4335 int os::loadavg(double loadavg[], int nelem) { 4336 return -1; 4337 } 4338 4339 4340 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4341 bool os::dont_yield() { 4342 return DontYieldALot; 4343 } 4344 4345 // This method is a slightly reworked copy of JDK's sysOpen 4346 // from src/windows/hpi/src/sys_api_md.c 4347 4348 int os::open(const char *path, int oflag, int mode) { 4349 char pathbuf[MAX_PATH]; 4350 4351 if (strlen(path) > MAX_PATH - 1) { 4352 errno = ENAMETOOLONG; 4353 return -1; 4354 } 4355 os::native_path(strcpy(pathbuf, path)); 4356 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4357 } 4358 4359 FILE* os::open(int fd, const char* mode) { 4360 return ::_fdopen(fd, mode); 4361 } 4362 4363 // Is a (classpath) directory empty? 4364 bool os::dir_is_empty(const char* path) { 4365 WIN32_FIND_DATA fd; 4366 HANDLE f = FindFirstFile(path, &fd); 4367 if (f == INVALID_HANDLE_VALUE) { 4368 return true; 4369 } 4370 FindClose(f); 4371 return false; 4372 } 4373 4374 // create binary file, rewriting existing file if required 4375 int os::create_binary_file(const char* path, bool rewrite_existing) { 4376 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4377 if (!rewrite_existing) { 4378 oflags |= _O_EXCL; 4379 } 4380 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4381 } 4382 4383 // return current position of file pointer 4384 jlong os::current_file_offset(int fd) { 4385 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4386 } 4387 4388 // move file pointer to the specified offset 4389 jlong os::seek_to_file_offset(int fd, jlong offset) { 4390 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4391 } 4392 4393 4394 jlong os::lseek(int fd, jlong offset, int whence) { 4395 return (jlong) ::_lseeki64(fd, offset, whence); 4396 } 4397 4398 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4399 OVERLAPPED ov; 4400 DWORD nread; 4401 BOOL result; 4402 4403 ZeroMemory(&ov, sizeof(ov)); 4404 ov.Offset = (DWORD)offset; 4405 ov.OffsetHigh = (DWORD)(offset >> 32); 4406 4407 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4408 4409 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4410 4411 return result ? nread : 0; 4412 } 4413 4414 4415 // This method is a slightly reworked copy of JDK's sysNativePath 4416 // from src/windows/hpi/src/path_md.c 4417 4418 // Convert a pathname to native format. On win32, this involves forcing all 4419 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4420 // sometimes rejects '/') and removing redundant separators. The input path is 4421 // assumed to have been converted into the character encoding used by the local 4422 // system. Because this might be a double-byte encoding, care is taken to 4423 // treat double-byte lead characters correctly. 4424 // 4425 // This procedure modifies the given path in place, as the result is never 4426 // longer than the original. There is no error return; this operation always 4427 // succeeds. 4428 char * os::native_path(char *path) { 4429 char *src = path, *dst = path, *end = path; 4430 char *colon = NULL; // If a drive specifier is found, this will 4431 // point to the colon following the drive letter 4432 4433 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4434 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4435 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4436 4437 // Check for leading separators 4438 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4439 while (isfilesep(*src)) { 4440 src++; 4441 } 4442 4443 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4444 // Remove leading separators if followed by drive specifier. This 4445 // hack is necessary to support file URLs containing drive 4446 // specifiers (e.g., "file://c:/path"). As a side effect, 4447 // "/c:/path" can be used as an alternative to "c:/path". 4448 *dst++ = *src++; 4449 colon = dst; 4450 *dst++ = ':'; 4451 src++; 4452 } else { 4453 src = path; 4454 if (isfilesep(src[0]) && isfilesep(src[1])) { 4455 // UNC pathname: Retain first separator; leave src pointed at 4456 // second separator so that further separators will be collapsed 4457 // into the second separator. The result will be a pathname 4458 // beginning with "\\\\" followed (most likely) by a host name. 4459 src = dst = path + 1; 4460 path[0] = '\\'; // Force first separator to '\\' 4461 } 4462 } 4463 4464 end = dst; 4465 4466 // Remove redundant separators from remainder of path, forcing all 4467 // separators to be '\\' rather than '/'. Also, single byte space 4468 // characters are removed from the end of the path because those 4469 // are not legal ending characters on this operating system. 4470 // 4471 while (*src != '\0') { 4472 if (isfilesep(*src)) { 4473 *dst++ = '\\'; src++; 4474 while (isfilesep(*src)) src++; 4475 if (*src == '\0') { 4476 // Check for trailing separator 4477 end = dst; 4478 if (colon == dst - 2) break; // "z:\\" 4479 if (dst == path + 1) break; // "\\" 4480 if (dst == path + 2 && isfilesep(path[0])) { 4481 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4482 // beginning of a UNC pathname. Even though it is not, by 4483 // itself, a valid UNC pathname, we leave it as is in order 4484 // to be consistent with the path canonicalizer as well 4485 // as the win32 APIs, which treat this case as an invalid 4486 // UNC pathname rather than as an alias for the root 4487 // directory of the current drive. 4488 break; 4489 } 4490 end = --dst; // Path does not denote a root directory, so 4491 // remove trailing separator 4492 break; 4493 } 4494 end = dst; 4495 } else { 4496 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4497 *dst++ = *src++; 4498 if (*src) *dst++ = *src++; 4499 end = dst; 4500 } else { // Copy a single-byte character 4501 char c = *src++; 4502 *dst++ = c; 4503 // Space is not a legal ending character 4504 if (c != ' ') end = dst; 4505 } 4506 } 4507 } 4508 4509 *end = '\0'; 4510 4511 // For "z:", add "." to work around a bug in the C runtime library 4512 if (colon == dst - 1) { 4513 path[2] = '.'; 4514 path[3] = '\0'; 4515 } 4516 4517 return path; 4518 } 4519 4520 // This code is a copy of JDK's sysSetLength 4521 // from src/windows/hpi/src/sys_api_md.c 4522 4523 int os::ftruncate(int fd, jlong length) { 4524 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4525 long high = (long)(length >> 32); 4526 DWORD ret; 4527 4528 if (h == (HANDLE)(-1)) { 4529 return -1; 4530 } 4531 4532 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4533 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4534 return -1; 4535 } 4536 4537 if (::SetEndOfFile(h) == FALSE) { 4538 return -1; 4539 } 4540 4541 return 0; 4542 } 4543 4544 4545 // This code is a copy of JDK's sysSync 4546 // from src/windows/hpi/src/sys_api_md.c 4547 // except for the legacy workaround for a bug in Win 98 4548 4549 int os::fsync(int fd) { 4550 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4551 4552 if ((!::FlushFileBuffers(handle)) && 4553 (GetLastError() != ERROR_ACCESS_DENIED)) { 4554 // from winerror.h 4555 return -1; 4556 } 4557 return 0; 4558 } 4559 4560 static int nonSeekAvailable(int, long *); 4561 static int stdinAvailable(int, long *); 4562 4563 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4564 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4565 4566 // This code is a copy of JDK's sysAvailable 4567 // from src/windows/hpi/src/sys_api_md.c 4568 4569 int os::available(int fd, jlong *bytes) { 4570 jlong cur, end; 4571 struct _stati64 stbuf64; 4572 4573 if (::_fstati64(fd, &stbuf64) >= 0) { 4574 int mode = stbuf64.st_mode; 4575 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4576 int ret; 4577 long lpbytes; 4578 if (fd == 0) { 4579 ret = stdinAvailable(fd, &lpbytes); 4580 } else { 4581 ret = nonSeekAvailable(fd, &lpbytes); 4582 } 4583 (*bytes) = (jlong)(lpbytes); 4584 return ret; 4585 } 4586 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4587 return FALSE; 4588 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4589 return FALSE; 4590 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4591 return FALSE; 4592 } 4593 *bytes = end - cur; 4594 return TRUE; 4595 } else { 4596 return FALSE; 4597 } 4598 } 4599 4600 // This code is a copy of JDK's nonSeekAvailable 4601 // from src/windows/hpi/src/sys_api_md.c 4602 4603 static int nonSeekAvailable(int fd, long *pbytes) { 4604 // This is used for available on non-seekable devices 4605 // (like both named and anonymous pipes, such as pipes 4606 // connected to an exec'd process). 4607 // Standard Input is a special case. 4608 HANDLE han; 4609 4610 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4611 return FALSE; 4612 } 4613 4614 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4615 // PeekNamedPipe fails when at EOF. In that case we 4616 // simply make *pbytes = 0 which is consistent with the 4617 // behavior we get on Solaris when an fd is at EOF. 4618 // The only alternative is to raise an Exception, 4619 // which isn't really warranted. 4620 // 4621 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4622 return FALSE; 4623 } 4624 *pbytes = 0; 4625 } 4626 return TRUE; 4627 } 4628 4629 #define MAX_INPUT_EVENTS 2000 4630 4631 // This code is a copy of JDK's stdinAvailable 4632 // from src/windows/hpi/src/sys_api_md.c 4633 4634 static int stdinAvailable(int fd, long *pbytes) { 4635 HANDLE han; 4636 DWORD numEventsRead = 0; // Number of events read from buffer 4637 DWORD numEvents = 0; // Number of events in buffer 4638 DWORD i = 0; // Loop index 4639 DWORD curLength = 0; // Position marker 4640 DWORD actualLength = 0; // Number of bytes readable 4641 BOOL error = FALSE; // Error holder 4642 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4643 4644 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4645 return FALSE; 4646 } 4647 4648 // Construct an array of input records in the console buffer 4649 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4650 if (error == 0) { 4651 return nonSeekAvailable(fd, pbytes); 4652 } 4653 4654 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4655 if (numEvents > MAX_INPUT_EVENTS) { 4656 numEvents = MAX_INPUT_EVENTS; 4657 } 4658 4659 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4660 if (lpBuffer == NULL) { 4661 return FALSE; 4662 } 4663 4664 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4665 if (error == 0) { 4666 os::free(lpBuffer); 4667 return FALSE; 4668 } 4669 4670 // Examine input records for the number of bytes available 4671 for (i=0; i<numEvents; i++) { 4672 if (lpBuffer[i].EventType == KEY_EVENT) { 4673 4674 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4675 &(lpBuffer[i].Event); 4676 if (keyRecord->bKeyDown == TRUE) { 4677 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4678 curLength++; 4679 if (*keyPressed == '\r') { 4680 actualLength = curLength; 4681 } 4682 } 4683 } 4684 } 4685 4686 if (lpBuffer != NULL) { 4687 os::free(lpBuffer); 4688 } 4689 4690 *pbytes = (long) actualLength; 4691 return TRUE; 4692 } 4693 4694 // Map a block of memory. 4695 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4696 char *addr, size_t bytes, bool read_only, 4697 bool allow_exec) { 4698 HANDLE hFile; 4699 char* base; 4700 4701 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4702 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4703 if (hFile == NULL) { 4704 if (PrintMiscellaneous && Verbose) { 4705 DWORD err = GetLastError(); 4706 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4707 } 4708 return NULL; 4709 } 4710 4711 if (allow_exec) { 4712 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4713 // unless it comes from a PE image (which the shared archive is not.) 4714 // Even VirtualProtect refuses to give execute access to mapped memory 4715 // that was not previously executable. 4716 // 4717 // Instead, stick the executable region in anonymous memory. Yuck. 4718 // Penalty is that ~4 pages will not be shareable - in the future 4719 // we might consider DLLizing the shared archive with a proper PE 4720 // header so that mapping executable + sharing is possible. 4721 4722 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4723 PAGE_READWRITE); 4724 if (base == NULL) { 4725 if (PrintMiscellaneous && Verbose) { 4726 DWORD err = GetLastError(); 4727 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4728 } 4729 CloseHandle(hFile); 4730 return NULL; 4731 } 4732 4733 DWORD bytes_read; 4734 OVERLAPPED overlapped; 4735 overlapped.Offset = (DWORD)file_offset; 4736 overlapped.OffsetHigh = 0; 4737 overlapped.hEvent = NULL; 4738 // ReadFile guarantees that if the return value is true, the requested 4739 // number of bytes were read before returning. 4740 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4741 if (!res) { 4742 if (PrintMiscellaneous && Verbose) { 4743 DWORD err = GetLastError(); 4744 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4745 } 4746 release_memory(base, bytes); 4747 CloseHandle(hFile); 4748 return NULL; 4749 } 4750 } else { 4751 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4752 NULL /* file_name */); 4753 if (hMap == NULL) { 4754 if (PrintMiscellaneous && Verbose) { 4755 DWORD err = GetLastError(); 4756 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4757 } 4758 CloseHandle(hFile); 4759 return NULL; 4760 } 4761 4762 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4763 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4764 (DWORD)bytes, addr); 4765 if (base == NULL) { 4766 if (PrintMiscellaneous && Verbose) { 4767 DWORD err = GetLastError(); 4768 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4769 } 4770 CloseHandle(hMap); 4771 CloseHandle(hFile); 4772 return NULL; 4773 } 4774 4775 if (CloseHandle(hMap) == 0) { 4776 if (PrintMiscellaneous && Verbose) { 4777 DWORD err = GetLastError(); 4778 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4779 } 4780 CloseHandle(hFile); 4781 return base; 4782 } 4783 } 4784 4785 if (allow_exec) { 4786 DWORD old_protect; 4787 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4788 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4789 4790 if (!res) { 4791 if (PrintMiscellaneous && Verbose) { 4792 DWORD err = GetLastError(); 4793 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4794 } 4795 // Don't consider this a hard error, on IA32 even if the 4796 // VirtualProtect fails, we should still be able to execute 4797 CloseHandle(hFile); 4798 return base; 4799 } 4800 } 4801 4802 if (CloseHandle(hFile) == 0) { 4803 if (PrintMiscellaneous && Verbose) { 4804 DWORD err = GetLastError(); 4805 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4806 } 4807 return base; 4808 } 4809 4810 return base; 4811 } 4812 4813 4814 // Remap a block of memory. 4815 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4816 char *addr, size_t bytes, bool read_only, 4817 bool allow_exec) { 4818 // This OS does not allow existing memory maps to be remapped so we 4819 // have to unmap the memory before we remap it. 4820 if (!os::unmap_memory(addr, bytes)) { 4821 return NULL; 4822 } 4823 4824 // There is a very small theoretical window between the unmap_memory() 4825 // call above and the map_memory() call below where a thread in native 4826 // code may be able to access an address that is no longer mapped. 4827 4828 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4829 read_only, allow_exec); 4830 } 4831 4832 4833 // Unmap a block of memory. 4834 // Returns true=success, otherwise false. 4835 4836 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4837 BOOL result = UnmapViewOfFile(addr); 4838 if (result == 0) { 4839 if (PrintMiscellaneous && Verbose) { 4840 DWORD err = GetLastError(); 4841 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4842 } 4843 return false; 4844 } 4845 return true; 4846 } 4847 4848 void os::pause() { 4849 char filename[MAX_PATH]; 4850 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4851 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4852 } else { 4853 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4854 } 4855 4856 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4857 if (fd != -1) { 4858 struct stat buf; 4859 ::close(fd); 4860 while (::stat(filename, &buf) == 0) { 4861 Sleep(100); 4862 } 4863 } else { 4864 jio_fprintf(stderr, 4865 "Could not open pause file '%s', continuing immediately.\n", filename); 4866 } 4867 } 4868 4869 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4870 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4871 } 4872 4873 // See the caveats for this class in os_windows.hpp 4874 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4875 // into this method and returns false. If no OS EXCEPTION was raised, returns 4876 // true. 4877 // The callback is supposed to provide the method that should be protected. 4878 // 4879 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4880 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4881 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4882 "crash_protection already set?"); 4883 4884 bool success = true; 4885 __try { 4886 WatcherThread::watcher_thread()->set_crash_protection(this); 4887 cb.call(); 4888 } __except(EXCEPTION_EXECUTE_HANDLER) { 4889 // only for protection, nothing to do 4890 success = false; 4891 } 4892 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4893 return success; 4894 } 4895 4896 // An Event wraps a win32 "CreateEvent" kernel handle. 4897 // 4898 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4899 // 4900 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4901 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4902 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4903 // In addition, an unpark() operation might fetch the handle field, but the 4904 // event could recycle between the fetch and the SetEvent() operation. 4905 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4906 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4907 // on an stale but recycled handle would be harmless, but in practice this might 4908 // confuse other non-Sun code, so it's not a viable approach. 4909 // 4910 // 2: Once a win32 event handle is associated with an Event, it remains associated 4911 // with the Event. The event handle is never closed. This could be construed 4912 // as handle leakage, but only up to the maximum # of threads that have been extant 4913 // at any one time. This shouldn't be an issue, as windows platforms typically 4914 // permit a process to have hundreds of thousands of open handles. 4915 // 4916 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4917 // and release unused handles. 4918 // 4919 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4920 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4921 // 4922 // 5. Use an RCU-like mechanism (Read-Copy Update). 4923 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4924 // 4925 // We use (2). 4926 // 4927 // TODO-FIXME: 4928 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4929 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4930 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4931 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4932 // into a single win32 CreateEvent() handle. 4933 // 4934 // Assumption: 4935 // Only one parker can exist on an event, which is why we allocate 4936 // them per-thread. Multiple unparkers can coexist. 4937 // 4938 // _Event transitions in park() 4939 // -1 => -1 : illegal 4940 // 1 => 0 : pass - return immediately 4941 // 0 => -1 : block; then set _Event to 0 before returning 4942 // 4943 // _Event transitions in unpark() 4944 // 0 => 1 : just return 4945 // 1 => 1 : just return 4946 // -1 => either 0 or 1; must signal target thread 4947 // That is, we can safely transition _Event from -1 to either 4948 // 0 or 1. 4949 // 4950 // _Event serves as a restricted-range semaphore. 4951 // -1 : thread is blocked, i.e. there is a waiter 4952 // 0 : neutral: thread is running or ready, 4953 // could have been signaled after a wait started 4954 // 1 : signaled - thread is running or ready 4955 // 4956 // Another possible encoding of _Event would be with 4957 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4958 // 4959 4960 int os::PlatformEvent::park(jlong Millis) { 4961 // Transitions for _Event: 4962 // -1 => -1 : illegal 4963 // 1 => 0 : pass - return immediately 4964 // 0 => -1 : block; then set _Event to 0 before returning 4965 4966 guarantee(_ParkHandle != NULL , "Invariant"); 4967 guarantee(Millis > 0 , "Invariant"); 4968 4969 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4970 // the initial park() operation. 4971 // Consider: use atomic decrement instead of CAS-loop 4972 4973 int v; 4974 for (;;) { 4975 v = _Event; 4976 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4977 } 4978 guarantee((v == 0) || (v == 1), "invariant"); 4979 if (v != 0) return OS_OK; 4980 4981 // Do this the hard way by blocking ... 4982 // TODO: consider a brief spin here, gated on the success of recent 4983 // spin attempts by this thread. 4984 // 4985 // We decompose long timeouts into series of shorter timed waits. 4986 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4987 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4988 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4989 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4990 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4991 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4992 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4993 // for the already waited time. This policy does not admit any new outcomes. 4994 // In the future, however, we might want to track the accumulated wait time and 4995 // adjust Millis accordingly if we encounter a spurious wakeup. 4996 4997 const int MAXTIMEOUT = 0x10000000; 4998 DWORD rv = WAIT_TIMEOUT; 4999 while (_Event < 0 && Millis > 0) { 5000 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5001 if (Millis > MAXTIMEOUT) { 5002 prd = MAXTIMEOUT; 5003 } 5004 rv = ::WaitForSingleObject(_ParkHandle, prd); 5005 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5006 if (rv == WAIT_TIMEOUT) { 5007 Millis -= prd; 5008 } 5009 } 5010 v = _Event; 5011 _Event = 0; 5012 // see comment at end of os::PlatformEvent::park() below: 5013 OrderAccess::fence(); 5014 // If we encounter a nearly simultanous timeout expiry and unpark() 5015 // we return OS_OK indicating we awoke via unpark(). 5016 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5017 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5018 } 5019 5020 void os::PlatformEvent::park() { 5021 // Transitions for _Event: 5022 // -1 => -1 : illegal 5023 // 1 => 0 : pass - return immediately 5024 // 0 => -1 : block; then set _Event to 0 before returning 5025 5026 guarantee(_ParkHandle != NULL, "Invariant"); 5027 // Invariant: Only the thread associated with the Event/PlatformEvent 5028 // may call park(). 5029 // Consider: use atomic decrement instead of CAS-loop 5030 int v; 5031 for (;;) { 5032 v = _Event; 5033 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5034 } 5035 guarantee((v == 0) || (v == 1), "invariant"); 5036 if (v != 0) return; 5037 5038 // Do this the hard way by blocking ... 5039 // TODO: consider a brief spin here, gated on the success of recent 5040 // spin attempts by this thread. 5041 while (_Event < 0) { 5042 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5043 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5044 } 5045 5046 // Usually we'll find _Event == 0 at this point, but as 5047 // an optional optimization we clear it, just in case can 5048 // multiple unpark() operations drove _Event up to 1. 5049 _Event = 0; 5050 OrderAccess::fence(); 5051 guarantee(_Event >= 0, "invariant"); 5052 } 5053 5054 void os::PlatformEvent::unpark() { 5055 guarantee(_ParkHandle != NULL, "Invariant"); 5056 5057 // Transitions for _Event: 5058 // 0 => 1 : just return 5059 // 1 => 1 : just return 5060 // -1 => either 0 or 1; must signal target thread 5061 // That is, we can safely transition _Event from -1 to either 5062 // 0 or 1. 5063 // See also: "Semaphores in Plan 9" by Mullender & Cox 5064 // 5065 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5066 // that it will take two back-to-back park() calls for the owning 5067 // thread to block. This has the benefit of forcing a spurious return 5068 // from the first park() call after an unpark() call which will help 5069 // shake out uses of park() and unpark() without condition variables. 5070 5071 if (Atomic::xchg(1, &_Event) >= 0) return; 5072 5073 ::SetEvent(_ParkHandle); 5074 } 5075 5076 5077 // JSR166 5078 // ------------------------------------------------------- 5079 5080 // The Windows implementation of Park is very straightforward: Basic 5081 // operations on Win32 Events turn out to have the right semantics to 5082 // use them directly. We opportunistically resuse the event inherited 5083 // from Monitor. 5084 5085 void Parker::park(bool isAbsolute, jlong time) { 5086 guarantee(_ParkEvent != NULL, "invariant"); 5087 // First, demultiplex/decode time arguments 5088 if (time < 0) { // don't wait 5089 return; 5090 } else if (time == 0 && !isAbsolute) { 5091 time = INFINITE; 5092 } else if (isAbsolute) { 5093 time -= os::javaTimeMillis(); // convert to relative time 5094 if (time <= 0) { // already elapsed 5095 return; 5096 } 5097 } else { // relative 5098 time /= 1000000; // Must coarsen from nanos to millis 5099 if (time == 0) { // Wait for the minimal time unit if zero 5100 time = 1; 5101 } 5102 } 5103 5104 JavaThread* thread = (JavaThread*)(Thread::current()); 5105 assert(thread->is_Java_thread(), "Must be JavaThread"); 5106 JavaThread *jt = (JavaThread *)thread; 5107 5108 // Don't wait if interrupted or already triggered 5109 if (Thread::is_interrupted(thread, false) || 5110 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5111 ResetEvent(_ParkEvent); 5112 return; 5113 } else { 5114 ThreadBlockInVM tbivm(jt); 5115 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5116 jt->set_suspend_equivalent(); 5117 5118 WaitForSingleObject(_ParkEvent, time); 5119 ResetEvent(_ParkEvent); 5120 5121 // If externally suspended while waiting, re-suspend 5122 if (jt->handle_special_suspend_equivalent_condition()) { 5123 jt->java_suspend_self(); 5124 } 5125 } 5126 } 5127 5128 void Parker::unpark() { 5129 guarantee(_ParkEvent != NULL, "invariant"); 5130 SetEvent(_ParkEvent); 5131 } 5132 5133 // Run the specified command in a separate process. Return its exit value, 5134 // or -1 on failure (e.g. can't create a new process). 5135 int os::fork_and_exec(char* cmd) { 5136 STARTUPINFO si; 5137 PROCESS_INFORMATION pi; 5138 5139 memset(&si, 0, sizeof(si)); 5140 si.cb = sizeof(si); 5141 memset(&pi, 0, sizeof(pi)); 5142 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5143 cmd, // command line 5144 NULL, // process security attribute 5145 NULL, // thread security attribute 5146 TRUE, // inherits system handles 5147 0, // no creation flags 5148 NULL, // use parent's environment block 5149 NULL, // use parent's starting directory 5150 &si, // (in) startup information 5151 &pi); // (out) process information 5152 5153 if (rslt) { 5154 // Wait until child process exits. 5155 WaitForSingleObject(pi.hProcess, INFINITE); 5156 5157 DWORD exit_code; 5158 GetExitCodeProcess(pi.hProcess, &exit_code); 5159 5160 // Close process and thread handles. 5161 CloseHandle(pi.hProcess); 5162 CloseHandle(pi.hThread); 5163 5164 return (int)exit_code; 5165 } else { 5166 return -1; 5167 } 5168 } 5169 5170 //-------------------------------------------------------------------------------------------------- 5171 // Non-product code 5172 5173 static int mallocDebugIntervalCounter = 0; 5174 static int mallocDebugCounter = 0; 5175 bool os::check_heap(bool force) { 5176 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5177 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5178 // Note: HeapValidate executes two hardware breakpoints when it finds something 5179 // wrong; at these points, eax contains the address of the offending block (I think). 5180 // To get to the exlicit error message(s) below, just continue twice. 5181 HANDLE heap = GetProcessHeap(); 5182 5183 // If we fail to lock the heap, then gflags.exe has been used 5184 // or some other special heap flag has been set that prevents 5185 // locking. We don't try to walk a heap we can't lock. 5186 if (HeapLock(heap) != 0) { 5187 PROCESS_HEAP_ENTRY phe; 5188 phe.lpData = NULL; 5189 while (HeapWalk(heap, &phe) != 0) { 5190 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5191 !HeapValidate(heap, 0, phe.lpData)) { 5192 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5193 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5194 fatal("corrupted C heap"); 5195 } 5196 } 5197 DWORD err = GetLastError(); 5198 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5199 fatal(err_msg("heap walk aborted with error %d", err)); 5200 } 5201 HeapUnlock(heap); 5202 } 5203 mallocDebugIntervalCounter = 0; 5204 } 5205 return true; 5206 } 5207 5208 5209 bool os::find(address addr, outputStream* st) { 5210 // Nothing yet 5211 return false; 5212 } 5213 5214 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5215 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5216 5217 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5218 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5219 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5220 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5221 5222 if (os::is_memory_serialize_page(thread, addr)) { 5223 return EXCEPTION_CONTINUE_EXECUTION; 5224 } 5225 } 5226 5227 return EXCEPTION_CONTINUE_SEARCH; 5228 } 5229 5230 // We don't build a headless jre for Windows 5231 bool os::is_headless_jre() { return false; } 5232 5233 static jint initSock() { 5234 WSADATA wsadata; 5235 5236 if (!os::WinSock2Dll::WinSock2Available()) { 5237 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5238 ::GetLastError()); 5239 return JNI_ERR; 5240 } 5241 5242 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5243 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5244 ::GetLastError()); 5245 return JNI_ERR; 5246 } 5247 return JNI_OK; 5248 } 5249 5250 struct hostent* os::get_host_by_name(char* name) { 5251 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5252 } 5253 5254 int os::socket_close(int fd) { 5255 return ::closesocket(fd); 5256 } 5257 5258 int os::socket(int domain, int type, int protocol) { 5259 return ::socket(domain, type, protocol); 5260 } 5261 5262 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5263 return ::connect(fd, him, len); 5264 } 5265 5266 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5267 return ::recv(fd, buf, (int)nBytes, flags); 5268 } 5269 5270 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5271 return ::send(fd, buf, (int)nBytes, flags); 5272 } 5273 5274 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5275 return ::send(fd, buf, (int)nBytes, flags); 5276 } 5277 5278 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5279 #if defined(IA32) 5280 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5281 #elif defined (AMD64) 5282 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5283 #endif 5284 5285 // returns true if thread could be suspended, 5286 // false otherwise 5287 static bool do_suspend(HANDLE* h) { 5288 if (h != NULL) { 5289 if (SuspendThread(*h) != ~0) { 5290 return true; 5291 } 5292 } 5293 return false; 5294 } 5295 5296 // resume the thread 5297 // calling resume on an active thread is a no-op 5298 static void do_resume(HANDLE* h) { 5299 if (h != NULL) { 5300 ResumeThread(*h); 5301 } 5302 } 5303 5304 // retrieve a suspend/resume context capable handle 5305 // from the tid. Caller validates handle return value. 5306 void get_thread_handle_for_extended_context(HANDLE* h, 5307 OSThread::thread_id_t tid) { 5308 if (h != NULL) { 5309 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5310 } 5311 } 5312 5313 // Thread sampling implementation 5314 // 5315 void os::SuspendedThreadTask::internal_do_task() { 5316 CONTEXT ctxt; 5317 HANDLE h = NULL; 5318 5319 // get context capable handle for thread 5320 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5321 5322 // sanity 5323 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5324 return; 5325 } 5326 5327 // suspend the thread 5328 if (do_suspend(&h)) { 5329 ctxt.ContextFlags = sampling_context_flags; 5330 // get thread context 5331 GetThreadContext(h, &ctxt); 5332 SuspendedThreadTaskContext context(_thread, &ctxt); 5333 // pass context to Thread Sampling impl 5334 do_task(context); 5335 // resume thread 5336 do_resume(&h); 5337 } 5338 5339 // close handle 5340 CloseHandle(h); 5341 } 5342 5343 5344 // Kernel32 API 5345 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5346 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn)(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5347 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn)(PULONG); 5348 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn)(UCHAR, PULONGLONG); 5349 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5350 5351 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5352 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5353 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5354 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5355 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5356 5357 5358 BOOL os::Kernel32Dll::initialized = FALSE; 5359 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5360 assert(initialized && _GetLargePageMinimum != NULL, 5361 "GetLargePageMinimumAvailable() not yet called"); 5362 return _GetLargePageMinimum(); 5363 } 5364 5365 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5366 if (!initialized) { 5367 initialize(); 5368 } 5369 return _GetLargePageMinimum != NULL; 5370 } 5371 5372 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5373 if (!initialized) { 5374 initialize(); 5375 } 5376 return _VirtualAllocExNuma != NULL; 5377 } 5378 5379 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, 5380 SIZE_T bytes, DWORD flags, 5381 DWORD prot, DWORD node) { 5382 assert(initialized && _VirtualAllocExNuma != NULL, 5383 "NUMACallsAvailable() not yet called"); 5384 5385 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5386 } 5387 5388 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5389 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5390 "NUMACallsAvailable() not yet called"); 5391 5392 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5393 } 5394 5395 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, 5396 PULONGLONG proc_mask) { 5397 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5398 "NUMACallsAvailable() not yet called"); 5399 5400 return _GetNumaNodeProcessorMask(node, proc_mask); 5401 } 5402 5403 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5404 ULONG FrameToCapture, 5405 PVOID* BackTrace, 5406 PULONG BackTraceHash) { 5407 if (!initialized) { 5408 initialize(); 5409 } 5410 5411 if (_RtlCaptureStackBackTrace != NULL) { 5412 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5413 BackTrace, BackTraceHash); 5414 } else { 5415 return 0; 5416 } 5417 } 5418 5419 void os::Kernel32Dll::initializeCommon() { 5420 if (!initialized) { 5421 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5422 assert(handle != NULL, "Just check"); 5423 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5424 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5425 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5426 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5427 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5428 initialized = TRUE; 5429 } 5430 } 5431 5432 5433 5434 #ifndef JDK6_OR_EARLIER 5435 5436 void os::Kernel32Dll::initialize() { 5437 initializeCommon(); 5438 } 5439 5440 5441 // Kernel32 API 5442 inline BOOL os::Kernel32Dll::SwitchToThread() { 5443 return ::SwitchToThread(); 5444 } 5445 5446 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5447 return true; 5448 } 5449 5450 // Help tools 5451 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5452 return true; 5453 } 5454 5455 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5456 DWORD th32ProcessId) { 5457 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5458 } 5459 5460 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot, 5461 LPMODULEENTRY32 lpme) { 5462 return ::Module32First(hSnapshot, lpme); 5463 } 5464 5465 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5466 LPMODULEENTRY32 lpme) { 5467 return ::Module32Next(hSnapshot, lpme); 5468 } 5469 5470 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5471 ::GetNativeSystemInfo(lpSystemInfo); 5472 } 5473 5474 // PSAPI API 5475 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, 5476 HMODULE *lpModule, DWORD cb, 5477 LPDWORD lpcbNeeded) { 5478 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5479 } 5480 5481 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, 5482 HMODULE hModule, 5483 LPTSTR lpFilename, 5484 DWORD nSize) { 5485 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5486 } 5487 5488 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, 5489 HMODULE hModule, 5490 LPMODULEINFO lpmodinfo, 5491 DWORD cb) { 5492 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5493 } 5494 5495 inline BOOL os::PSApiDll::PSApiAvailable() { 5496 return true; 5497 } 5498 5499 5500 // WinSock2 API 5501 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, 5502 LPWSADATA lpWSAData) { 5503 return ::WSAStartup(wVersionRequested, lpWSAData); 5504 } 5505 5506 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5507 return ::gethostbyname(name); 5508 } 5509 5510 inline BOOL os::WinSock2Dll::WinSock2Available() { 5511 return true; 5512 } 5513 5514 // Advapi API 5515 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5516 BOOL DisableAllPrivileges, 5517 PTOKEN_PRIVILEGES NewState, 5518 DWORD BufferLength, 5519 PTOKEN_PRIVILEGES PreviousState, 5520 PDWORD ReturnLength) { 5521 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5522 BufferLength, PreviousState, ReturnLength); 5523 } 5524 5525 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5526 DWORD DesiredAccess, 5527 PHANDLE TokenHandle) { 5528 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5529 } 5530 5531 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5532 LPCTSTR lpName, 5533 PLUID lpLuid) { 5534 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5535 } 5536 5537 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5538 return true; 5539 } 5540 5541 void* os::get_default_process_handle() { 5542 return (void*)GetModuleHandle(NULL); 5543 } 5544 5545 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5546 // which is used to find statically linked in agents. 5547 // Additionally for windows, takes into account __stdcall names. 5548 // Parameters: 5549 // sym_name: Symbol in library we are looking for 5550 // lib_name: Name of library to look in, NULL for shared libs. 5551 // is_absolute_path == true if lib_name is absolute path to agent 5552 // such as "C:/a/b/L.dll" 5553 // == false if only the base name of the library is passed in 5554 // such as "L" 5555 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5556 bool is_absolute_path) { 5557 char *agent_entry_name; 5558 size_t len; 5559 size_t name_len; 5560 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5561 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5562 const char *start; 5563 5564 if (lib_name != NULL) { 5565 len = name_len = strlen(lib_name); 5566 if (is_absolute_path) { 5567 // Need to strip path, prefix and suffix 5568 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5569 lib_name = ++start; 5570 } else { 5571 // Need to check for drive prefix 5572 if ((start = strchr(lib_name, ':')) != NULL) { 5573 lib_name = ++start; 5574 } 5575 } 5576 if (len <= (prefix_len + suffix_len)) { 5577 return NULL; 5578 } 5579 lib_name += prefix_len; 5580 name_len = strlen(lib_name) - suffix_len; 5581 } 5582 } 5583 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5584 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5585 if (agent_entry_name == NULL) { 5586 return NULL; 5587 } 5588 if (lib_name != NULL) { 5589 const char *p = strrchr(sym_name, '@'); 5590 if (p != NULL && p != sym_name) { 5591 // sym_name == _Agent_OnLoad@XX 5592 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5593 agent_entry_name[(p-sym_name)] = '\0'; 5594 // agent_entry_name == _Agent_OnLoad 5595 strcat(agent_entry_name, "_"); 5596 strncat(agent_entry_name, lib_name, name_len); 5597 strcat(agent_entry_name, p); 5598 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5599 } else { 5600 strcpy(agent_entry_name, sym_name); 5601 strcat(agent_entry_name, "_"); 5602 strncat(agent_entry_name, lib_name, name_len); 5603 } 5604 } else { 5605 strcpy(agent_entry_name, sym_name); 5606 } 5607 return agent_entry_name; 5608 } 5609 5610 #else 5611 // Kernel32 API 5612 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5613 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD); 5614 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32); 5615 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32); 5616 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5617 5618 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5619 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5620 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5621 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5622 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5623 5624 void os::Kernel32Dll::initialize() { 5625 if (!initialized) { 5626 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5627 assert(handle != NULL, "Just check"); 5628 5629 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5630 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5631 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5632 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5633 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5634 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5635 initializeCommon(); // resolve the functions that always need resolving 5636 5637 initialized = TRUE; 5638 } 5639 } 5640 5641 BOOL os::Kernel32Dll::SwitchToThread() { 5642 assert(initialized && _SwitchToThread != NULL, 5643 "SwitchToThreadAvailable() not yet called"); 5644 return _SwitchToThread(); 5645 } 5646 5647 5648 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5649 if (!initialized) { 5650 initialize(); 5651 } 5652 return _SwitchToThread != NULL; 5653 } 5654 5655 // Help tools 5656 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5657 if (!initialized) { 5658 initialize(); 5659 } 5660 return _CreateToolhelp32Snapshot != NULL && 5661 _Module32First != NULL && 5662 _Module32Next != NULL; 5663 } 5664 5665 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5666 DWORD th32ProcessId) { 5667 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5668 "HelpToolsAvailable() not yet called"); 5669 5670 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5671 } 5672 5673 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5674 assert(initialized && _Module32First != NULL, 5675 "HelpToolsAvailable() not yet called"); 5676 5677 return _Module32First(hSnapshot, lpme); 5678 } 5679 5680 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5681 LPMODULEENTRY32 lpme) { 5682 assert(initialized && _Module32Next != NULL, 5683 "HelpToolsAvailable() not yet called"); 5684 5685 return _Module32Next(hSnapshot, lpme); 5686 } 5687 5688 5689 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5690 if (!initialized) { 5691 initialize(); 5692 } 5693 return _GetNativeSystemInfo != NULL; 5694 } 5695 5696 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5697 assert(initialized && _GetNativeSystemInfo != NULL, 5698 "GetNativeSystemInfoAvailable() not yet called"); 5699 5700 _GetNativeSystemInfo(lpSystemInfo); 5701 } 5702 5703 // PSAPI API 5704 5705 5706 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5707 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD); 5708 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5709 5710 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5711 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5712 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5713 BOOL os::PSApiDll::initialized = FALSE; 5714 5715 void os::PSApiDll::initialize() { 5716 if (!initialized) { 5717 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5718 if (handle != NULL) { 5719 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5720 "EnumProcessModules"); 5721 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5722 "GetModuleFileNameExA"); 5723 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5724 "GetModuleInformation"); 5725 } 5726 initialized = TRUE; 5727 } 5728 } 5729 5730 5731 5732 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, 5733 DWORD cb, LPDWORD lpcbNeeded) { 5734 assert(initialized && _EnumProcessModules != NULL, 5735 "PSApiAvailable() not yet called"); 5736 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5737 } 5738 5739 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, 5740 LPTSTR lpFilename, DWORD nSize) { 5741 assert(initialized && _GetModuleFileNameEx != NULL, 5742 "PSApiAvailable() not yet called"); 5743 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5744 } 5745 5746 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, 5747 LPMODULEINFO lpmodinfo, DWORD cb) { 5748 assert(initialized && _GetModuleInformation != NULL, 5749 "PSApiAvailable() not yet called"); 5750 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5751 } 5752 5753 BOOL os::PSApiDll::PSApiAvailable() { 5754 if (!initialized) { 5755 initialize(); 5756 } 5757 return _EnumProcessModules != NULL && 5758 _GetModuleFileNameEx != NULL && 5759 _GetModuleInformation != NULL; 5760 } 5761 5762 5763 // WinSock2 API 5764 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5765 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5766 5767 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5768 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5769 BOOL os::WinSock2Dll::initialized = FALSE; 5770 5771 void os::WinSock2Dll::initialize() { 5772 if (!initialized) { 5773 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5774 if (handle != NULL) { 5775 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5776 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5777 } 5778 initialized = TRUE; 5779 } 5780 } 5781 5782 5783 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5784 assert(initialized && _WSAStartup != NULL, 5785 "WinSock2Available() not yet called"); 5786 return _WSAStartup(wVersionRequested, lpWSAData); 5787 } 5788 5789 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5790 assert(initialized && _gethostbyname != NULL, 5791 "WinSock2Available() not yet called"); 5792 return _gethostbyname(name); 5793 } 5794 5795 BOOL os::WinSock2Dll::WinSock2Available() { 5796 if (!initialized) { 5797 initialize(); 5798 } 5799 return _WSAStartup != NULL && 5800 _gethostbyname != NULL; 5801 } 5802 5803 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5804 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5805 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5806 5807 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5808 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5809 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5810 BOOL os::Advapi32Dll::initialized = FALSE; 5811 5812 void os::Advapi32Dll::initialize() { 5813 if (!initialized) { 5814 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5815 if (handle != NULL) { 5816 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5817 "AdjustTokenPrivileges"); 5818 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5819 "OpenProcessToken"); 5820 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5821 "LookupPrivilegeValueA"); 5822 } 5823 initialized = TRUE; 5824 } 5825 } 5826 5827 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5828 BOOL DisableAllPrivileges, 5829 PTOKEN_PRIVILEGES NewState, 5830 DWORD BufferLength, 5831 PTOKEN_PRIVILEGES PreviousState, 5832 PDWORD ReturnLength) { 5833 assert(initialized && _AdjustTokenPrivileges != NULL, 5834 "AdvapiAvailable() not yet called"); 5835 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5836 BufferLength, PreviousState, ReturnLength); 5837 } 5838 5839 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5840 DWORD DesiredAccess, 5841 PHANDLE TokenHandle) { 5842 assert(initialized && _OpenProcessToken != NULL, 5843 "AdvapiAvailable() not yet called"); 5844 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5845 } 5846 5847 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5848 LPCTSTR lpName, PLUID lpLuid) { 5849 assert(initialized && _LookupPrivilegeValue != NULL, 5850 "AdvapiAvailable() not yet called"); 5851 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5852 } 5853 5854 BOOL os::Advapi32Dll::AdvapiAvailable() { 5855 if (!initialized) { 5856 initialize(); 5857 } 5858 return _AdjustTokenPrivileges != NULL && 5859 _OpenProcessToken != NULL && 5860 _LookupPrivilegeValue != NULL; 5861 } 5862 5863 #endif 5864 5865 #ifndef PRODUCT 5866 5867 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5868 // contiguous memory block at a particular address. 5869 // The test first tries to find a good approximate address to allocate at by using the same 5870 // method to allocate some memory at any address. The test then tries to allocate memory in 5871 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5872 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5873 // the previously allocated memory is available for allocation. The only actual failure 5874 // that is reported is when the test tries to allocate at a particular location but gets a 5875 // different valid one. A NULL return value at this point is not considered an error but may 5876 // be legitimate. 5877 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5878 void TestReserveMemorySpecial_test() { 5879 if (!UseLargePages) { 5880 if (VerboseInternalVMTests) { 5881 gclog_or_tty->print("Skipping test because large pages are disabled"); 5882 } 5883 return; 5884 } 5885 // save current value of globals 5886 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5887 bool old_use_numa_interleaving = UseNUMAInterleaving; 5888 5889 // set globals to make sure we hit the correct code path 5890 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5891 5892 // do an allocation at an address selected by the OS to get a good one. 5893 const size_t large_allocation_size = os::large_page_size() * 4; 5894 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5895 if (result == NULL) { 5896 if (VerboseInternalVMTests) { 5897 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5898 large_allocation_size); 5899 } 5900 } else { 5901 os::release_memory_special(result, large_allocation_size); 5902 5903 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5904 // we managed to get it once. 5905 const size_t expected_allocation_size = os::large_page_size(); 5906 char* expected_location = result + os::large_page_size(); 5907 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5908 if (actual_location == NULL) { 5909 if (VerboseInternalVMTests) { 5910 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5911 expected_location, large_allocation_size); 5912 } 5913 } else { 5914 // release memory 5915 os::release_memory_special(actual_location, expected_allocation_size); 5916 // only now check, after releasing any memory to avoid any leaks. 5917 assert(actual_location == expected_location, 5918 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5919 expected_location, expected_allocation_size, actual_location)); 5920 } 5921 } 5922 5923 // restore globals 5924 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5925 UseNUMAInterleaving = old_use_numa_interleaving; 5926 } 5927 #endif // PRODUCT