1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/decoder.hpp" 71 #include "utilities/defaultStream.hpp" 72 #include "utilities/events.hpp" 73 #include "utilities/growableArray.hpp" 74 #include "utilities/vmError.hpp" 75 76 #ifdef _DEBUG 77 #include <crtdbg.h> 78 #endif 79 80 81 #include <windows.h> 82 #include <sys/types.h> 83 #include <sys/stat.h> 84 #include <sys/timeb.h> 85 #include <objidl.h> 86 #include <shlobj.h> 87 88 #include <malloc.h> 89 #include <signal.h> 90 #include <direct.h> 91 #include <errno.h> 92 #include <fcntl.h> 93 #include <io.h> 94 #include <process.h> // For _beginthreadex(), _endthreadex() 95 #include <imagehlp.h> // For os::dll_address_to_function_name 96 // for enumerating dll libraries 97 #include <vdmdbg.h> 98 99 // for timer info max values which include all bits 100 #define ALL_64_BITS CONST64(-1) 101 102 // For DLL loading/load error detection 103 // Values of PE COFF 104 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 105 #define IMAGE_FILE_SIGNATURE_LENGTH 4 106 107 static HANDLE main_process; 108 static HANDLE main_thread; 109 static int main_thread_id; 110 111 static FILETIME process_creation_time; 112 static FILETIME process_exit_time; 113 static FILETIME process_user_time; 114 static FILETIME process_kernel_time; 115 116 #ifdef _M_IA64 117 #define __CPU__ ia64 118 #else 119 #ifdef _M_AMD64 120 #define __CPU__ amd64 121 #else 122 #define __CPU__ i486 123 #endif 124 #endif 125 126 // save DLL module handle, used by GetModuleFileName 127 128 HINSTANCE vm_lib_handle; 129 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 131 switch (reason) { 132 case DLL_PROCESS_ATTACH: 133 vm_lib_handle = hinst; 134 if (ForceTimeHighResolution) { 135 timeBeginPeriod(1L); 136 } 137 break; 138 case DLL_PROCESS_DETACH: 139 if (ForceTimeHighResolution) { 140 timeEndPeriod(1L); 141 } 142 break; 143 default: 144 break; 145 } 146 return true; 147 } 148 149 static inline double fileTimeAsDouble(FILETIME* time) { 150 const double high = (double) ((unsigned int) ~0); 151 const double split = 10000000.0; 152 double result = (time->dwLowDateTime / split) + 153 time->dwHighDateTime * (high/split); 154 return result; 155 } 156 157 // Implementation of os 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 // previous UnhandledExceptionFilter, if there is one 178 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 179 180 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 181 182 void os::init_system_properties_values() { 183 // sysclasspath, java_home, dll_dir 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH + 1]; 190 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 191 192 if (alt_home_dir != NULL) { 193 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 194 home_dir[MAX_PATH] = '\0'; 195 } else { 196 os::jvm_path(home_dir, sizeof(home_dir)); 197 // Found the full path to jvm.dll. 198 // Now cut the path to <java_home>/jre if we can. 199 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) { 202 *pslash = '\0'; // get rid of \{client|server} 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \bin 206 } 207 } 208 } 209 210 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 211 if (home_path == NULL) { 212 return; 213 } 214 strcpy(home_path, home_dir); 215 Arguments::set_java_home(home_path); 216 FREE_C_HEAP_ARRAY(char, home_path); 217 218 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 219 mtInternal); 220 if (dll_path == NULL) { 221 return; 222 } 223 strcpy(dll_path, home_dir); 224 strcat(dll_path, bin); 225 Arguments::set_dll_dir(dll_path); 226 FREE_C_HEAP_ARRAY(char, dll_path); 227 228 if (!set_boot_path('\\', ';')) { 229 return; 230 } 231 } 232 233 // library_path 234 #define EXT_DIR "\\lib\\ext" 235 #define BIN_DIR "\\bin" 236 #define PACKAGE_DIR "\\Sun\\Java" 237 { 238 // Win32 library search order (See the documentation for LoadLibrary): 239 // 240 // 1. The directory from which application is loaded. 241 // 2. The system wide Java Extensions directory (Java only) 242 // 3. System directory (GetSystemDirectory) 243 // 4. Windows directory (GetWindowsDirectory) 244 // 5. The PATH environment variable 245 // 6. The current directory 246 247 char *library_path; 248 char tmp[MAX_PATH]; 249 char *path_str = ::getenv("PATH"); 250 251 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 252 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 253 254 library_path[0] = '\0'; 255 256 GetModuleFileName(NULL, tmp, sizeof(tmp)); 257 *(strrchr(tmp, '\\')) = '\0'; 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 strcat(library_path, PACKAGE_DIR BIN_DIR); 264 265 GetSystemDirectory(tmp, sizeof(tmp)); 266 strcat(library_path, ";"); 267 strcat(library_path, tmp); 268 269 GetWindowsDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 if (path_str) { 274 strcat(library_path, ";"); 275 strcat(library_path, path_str); 276 } 277 278 strcat(library_path, ";."); 279 280 Arguments::set_library_path(library_path); 281 FREE_C_HEAP_ARRAY(char, library_path); 282 } 283 284 // Default extensions directory 285 { 286 char path[MAX_PATH]; 287 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 288 GetWindowsDirectory(path, MAX_PATH); 289 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 290 path, PACKAGE_DIR, EXT_DIR); 291 Arguments::set_ext_dirs(buf); 292 } 293 #undef EXT_DIR 294 #undef BIN_DIR 295 #undef PACKAGE_DIR 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 316 // So far, this method is only used by Native Memory Tracking, which is 317 // only supported on Windows XP or later. 318 // 319 int os::get_native_stack(address* stack, int frames, int toSkip) { 320 #ifdef _NMT_NOINLINE_ 321 toSkip++; 322 #endif 323 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 324 (PVOID*)stack, NULL); 325 for (int index = captured; index < frames; index ++) { 326 stack[index] = NULL; 327 } 328 return captured; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) { 350 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 351 if (stack_bottom == (address)minfo.AllocationBase) { 352 stack_size += minfo.RegionSize; 353 } else { 354 break; 355 } 356 } 357 358 #ifdef _M_IA64 359 // IA64 has memory and register stacks 360 // 361 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 362 // at thread creation (1MB backing store growing upwards, 1MB memory stack 363 // growing downwards, 2MB summed up) 364 // 365 // ... 366 // ------- top of stack (high address) ----- 367 // | 368 // | 1MB 369 // | Backing Store (Register Stack) 370 // | 371 // | / \ 372 // | | 373 // | | 374 // | | 375 // ------------------------ stack base ----- 376 // | 1MB 377 // | Memory Stack 378 // | 379 // | | 380 // | | 381 // | | 382 // | \ / 383 // | 384 // ----- bottom of stack (low address) ----- 385 // ... 386 387 stack_size = stack_size / 2; 388 #endif 389 return stack_bottom + stack_size; 390 } 391 392 size_t os::current_stack_size() { 393 size_t sz; 394 MEMORY_BASIC_INFORMATION minfo; 395 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 396 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 397 return sz; 398 } 399 400 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 401 const struct tm* time_struct_ptr = localtime(clock); 402 if (time_struct_ptr != NULL) { 403 *res = *time_struct_ptr; 404 return res; 405 } 406 return NULL; 407 } 408 409 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 410 411 // Thread start routine for all new Java threads 412 static unsigned __stdcall java_start(Thread* thread) { 413 // Try to randomize the cache line index of hot stack frames. 414 // This helps when threads of the same stack traces evict each other's 415 // cache lines. The threads can be either from the same JVM instance, or 416 // from different JVM instances. The benefit is especially true for 417 // processors with hyperthreading technology. 418 static int counter = 0; 419 int pid = os::current_process_id(); 420 _alloca(((pid ^ counter++) & 7) * 128); 421 422 OSThread* osthr = thread->osthread(); 423 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 424 425 if (UseNUMA) { 426 int lgrp_id = os::numa_get_group_id(); 427 if (lgrp_id != -1) { 428 thread->set_lgrp_id(lgrp_id); 429 } 430 } 431 432 // Diagnostic code to investigate JDK-6573254 433 int res = 30115; // non-java thread 434 if (thread->is_Java_thread()) { 435 res = 20115; // java thread 436 } 437 438 // Install a win32 structured exception handler around every thread created 439 // by VM, so VM can generate error dump when an exception occurred in non- 440 // Java thread (e.g. VM thread). 441 __try { 442 thread->run(); 443 } __except(topLevelExceptionFilter( 444 (_EXCEPTION_POINTERS*)_exception_info())) { 445 // Nothing to do. 446 } 447 448 // One less thread is executing 449 // When the VMThread gets here, the main thread may have already exited 450 // which frees the CodeHeap containing the Atomic::add code 451 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 452 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 453 } 454 455 // Thread must not return from exit_process_or_thread(), but if it does, 456 // let it proceed to exit normally 457 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 458 } 459 460 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 461 int thread_id) { 462 // Allocate the OSThread object 463 OSThread* osthread = new OSThread(NULL, NULL); 464 if (osthread == NULL) return NULL; 465 466 // Initialize support for Java interrupts 467 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 468 if (interrupt_event == NULL) { 469 delete osthread; 470 return NULL; 471 } 472 osthread->set_interrupt_event(interrupt_event); 473 474 // Store info on the Win32 thread into the OSThread 475 osthread->set_thread_handle(thread_handle); 476 osthread->set_thread_id(thread_id); 477 478 if (UseNUMA) { 479 int lgrp_id = os::numa_get_group_id(); 480 if (lgrp_id != -1) { 481 thread->set_lgrp_id(lgrp_id); 482 } 483 } 484 485 // Initial thread state is INITIALIZED, not SUSPENDED 486 osthread->set_state(INITIALIZED); 487 488 return osthread; 489 } 490 491 492 bool os::create_attached_thread(JavaThread* thread) { 493 #ifdef ASSERT 494 thread->verify_not_published(); 495 #endif 496 HANDLE thread_h; 497 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 498 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 499 fatal("DuplicateHandle failed\n"); 500 } 501 OSThread* osthread = create_os_thread(thread, thread_h, 502 (int)current_thread_id()); 503 if (osthread == NULL) { 504 return false; 505 } 506 507 // Initial thread state is RUNNABLE 508 osthread->set_state(RUNNABLE); 509 510 thread->set_osthread(osthread); 511 return true; 512 } 513 514 bool os::create_main_thread(JavaThread* thread) { 515 #ifdef ASSERT 516 thread->verify_not_published(); 517 #endif 518 if (_starting_thread == NULL) { 519 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 520 if (_starting_thread == NULL) { 521 return false; 522 } 523 } 524 525 // The primordial thread is runnable from the start) 526 _starting_thread->set_state(RUNNABLE); 527 528 thread->set_osthread(_starting_thread); 529 return true; 530 } 531 532 // Allocate and initialize a new OSThread 533 bool os::create_thread(Thread* thread, ThreadType thr_type, 534 size_t stack_size) { 535 unsigned thread_id; 536 537 // Allocate the OSThread object 538 OSThread* osthread = new OSThread(NULL, NULL); 539 if (osthread == NULL) { 540 return false; 541 } 542 543 // Initialize support for Java interrupts 544 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 545 if (interrupt_event == NULL) { 546 delete osthread; 547 return NULL; 548 } 549 osthread->set_interrupt_event(interrupt_event); 550 osthread->set_interrupted(false); 551 552 thread->set_osthread(osthread); 553 554 if (stack_size == 0) { 555 switch (thr_type) { 556 case os::java_thread: 557 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 558 if (JavaThread::stack_size_at_create() > 0) { 559 stack_size = JavaThread::stack_size_at_create(); 560 } 561 break; 562 case os::compiler_thread: 563 if (CompilerThreadStackSize > 0) { 564 stack_size = (size_t)(CompilerThreadStackSize * K); 565 break; 566 } // else fall through: 567 // use VMThreadStackSize if CompilerThreadStackSize is not defined 568 case os::vm_thread: 569 case os::pgc_thread: 570 case os::cgc_thread: 571 case os::watcher_thread: 572 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 573 break; 574 } 575 } 576 577 // Create the Win32 thread 578 // 579 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 580 // does not specify stack size. Instead, it specifies the size of 581 // initially committed space. The stack size is determined by 582 // PE header in the executable. If the committed "stack_size" is larger 583 // than default value in the PE header, the stack is rounded up to the 584 // nearest multiple of 1MB. For example if the launcher has default 585 // stack size of 320k, specifying any size less than 320k does not 586 // affect the actual stack size at all, it only affects the initial 587 // commitment. On the other hand, specifying 'stack_size' larger than 588 // default value may cause significant increase in memory usage, because 589 // not only the stack space will be rounded up to MB, but also the 590 // entire space is committed upfront. 591 // 592 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 593 // for CreateThread() that can treat 'stack_size' as stack size. However we 594 // are not supposed to call CreateThread() directly according to MSDN 595 // document because JVM uses C runtime library. The good news is that the 596 // flag appears to work with _beginthredex() as well. 597 598 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 599 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 600 #endif 601 602 HANDLE thread_handle = 603 (HANDLE)_beginthreadex(NULL, 604 (unsigned)stack_size, 605 (unsigned (__stdcall *)(void*)) java_start, 606 thread, 607 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 608 &thread_id); 609 if (thread_handle == NULL) { 610 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 611 // without the flag. 612 thread_handle = 613 (HANDLE)_beginthreadex(NULL, 614 (unsigned)stack_size, 615 (unsigned (__stdcall *)(void*)) java_start, 616 thread, 617 CREATE_SUSPENDED, 618 &thread_id); 619 } 620 if (thread_handle == NULL) { 621 // Need to clean up stuff we've allocated so far 622 CloseHandle(osthread->interrupt_event()); 623 thread->set_osthread(NULL); 624 delete osthread; 625 return NULL; 626 } 627 628 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 629 630 // Store info on the Win32 thread into the OSThread 631 osthread->set_thread_handle(thread_handle); 632 osthread->set_thread_id(thread_id); 633 634 // Initial thread state is INITIALIZED, not SUSPENDED 635 osthread->set_state(INITIALIZED); 636 637 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 638 return true; 639 } 640 641 642 // Free Win32 resources related to the OSThread 643 void os::free_thread(OSThread* osthread) { 644 assert(osthread != NULL, "osthread not set"); 645 CloseHandle(osthread->thread_handle()); 646 CloseHandle(osthread->interrupt_event()); 647 delete osthread; 648 } 649 650 static jlong first_filetime; 651 static jlong initial_performance_count; 652 static jlong performance_frequency; 653 654 655 jlong as_long(LARGE_INTEGER x) { 656 jlong result = 0; // initialization to avoid warning 657 set_high(&result, x.HighPart); 658 set_low(&result, x.LowPart); 659 return result; 660 } 661 662 663 jlong os::elapsed_counter() { 664 LARGE_INTEGER count; 665 if (win32::_has_performance_count) { 666 QueryPerformanceCounter(&count); 667 return as_long(count) - initial_performance_count; 668 } else { 669 FILETIME wt; 670 GetSystemTimeAsFileTime(&wt); 671 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 672 } 673 } 674 675 676 jlong os::elapsed_frequency() { 677 if (win32::_has_performance_count) { 678 return performance_frequency; 679 } else { 680 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 681 return 10000000; 682 } 683 } 684 685 686 julong os::available_memory() { 687 return win32::available_memory(); 688 } 689 690 julong os::win32::available_memory() { 691 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 692 // value if total memory is larger than 4GB 693 MEMORYSTATUSEX ms; 694 ms.dwLength = sizeof(ms); 695 GlobalMemoryStatusEx(&ms); 696 697 return (julong)ms.ullAvailPhys; 698 } 699 700 julong os::physical_memory() { 701 return win32::physical_memory(); 702 } 703 704 bool os::has_allocatable_memory_limit(julong* limit) { 705 MEMORYSTATUSEX ms; 706 ms.dwLength = sizeof(ms); 707 GlobalMemoryStatusEx(&ms); 708 #ifdef _LP64 709 *limit = (julong)ms.ullAvailVirtual; 710 return true; 711 #else 712 // Limit to 1400m because of the 2gb address space wall 713 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 714 return true; 715 #endif 716 } 717 718 // VC6 lacks DWORD_PTR 719 #if _MSC_VER < 1300 720 typedef UINT_PTR DWORD_PTR; 721 #endif 722 723 int os::active_processor_count() { 724 DWORD_PTR lpProcessAffinityMask = 0; 725 DWORD_PTR lpSystemAffinityMask = 0; 726 int proc_count = processor_count(); 727 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 728 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 729 // Nof active processors is number of bits in process affinity mask 730 int bitcount = 0; 731 while (lpProcessAffinityMask != 0) { 732 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 733 bitcount++; 734 } 735 return bitcount; 736 } else { 737 return proc_count; 738 } 739 } 740 741 void os::set_native_thread_name(const char *name) { 742 743 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 744 // 745 // Note that unfortunately this only works if the process 746 // is already attached to a debugger; debugger must observe 747 // the exception below to show the correct name. 748 749 const DWORD MS_VC_EXCEPTION = 0x406D1388; 750 struct { 751 DWORD dwType; // must be 0x1000 752 LPCSTR szName; // pointer to name (in user addr space) 753 DWORD dwThreadID; // thread ID (-1=caller thread) 754 DWORD dwFlags; // reserved for future use, must be zero 755 } info; 756 757 info.dwType = 0x1000; 758 info.szName = name; 759 info.dwThreadID = -1; 760 info.dwFlags = 0; 761 762 __try { 763 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 764 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 765 } 766 767 bool os::distribute_processes(uint length, uint* distribution) { 768 // Not yet implemented. 769 return false; 770 } 771 772 bool os::bind_to_processor(uint processor_id) { 773 // Not yet implemented. 774 return false; 775 } 776 777 void os::win32::initialize_performance_counter() { 778 LARGE_INTEGER count; 779 if (QueryPerformanceFrequency(&count)) { 780 win32::_has_performance_count = 1; 781 performance_frequency = as_long(count); 782 QueryPerformanceCounter(&count); 783 initial_performance_count = as_long(count); 784 } else { 785 win32::_has_performance_count = 0; 786 FILETIME wt; 787 GetSystemTimeAsFileTime(&wt); 788 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 789 } 790 } 791 792 793 double os::elapsedTime() { 794 return (double) elapsed_counter() / (double) elapsed_frequency(); 795 } 796 797 798 // Windows format: 799 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 800 // Java format: 801 // Java standards require the number of milliseconds since 1/1/1970 802 803 // Constant offset - calculated using offset() 804 static jlong _offset = 116444736000000000; 805 // Fake time counter for reproducible results when debugging 806 static jlong fake_time = 0; 807 808 #ifdef ASSERT 809 // Just to be safe, recalculate the offset in debug mode 810 static jlong _calculated_offset = 0; 811 static int _has_calculated_offset = 0; 812 813 jlong offset() { 814 if (_has_calculated_offset) return _calculated_offset; 815 SYSTEMTIME java_origin; 816 java_origin.wYear = 1970; 817 java_origin.wMonth = 1; 818 java_origin.wDayOfWeek = 0; // ignored 819 java_origin.wDay = 1; 820 java_origin.wHour = 0; 821 java_origin.wMinute = 0; 822 java_origin.wSecond = 0; 823 java_origin.wMilliseconds = 0; 824 FILETIME jot; 825 if (!SystemTimeToFileTime(&java_origin, &jot)) { 826 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 827 } 828 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 829 _has_calculated_offset = 1; 830 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 831 return _calculated_offset; 832 } 833 #else 834 jlong offset() { 835 return _offset; 836 } 837 #endif 838 839 jlong windows_to_java_time(FILETIME wt) { 840 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 841 return (a - offset()) / 10000; 842 } 843 844 // Returns time ticks in (10th of micro seconds) 845 jlong windows_to_time_ticks(FILETIME wt) { 846 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 847 return (a - offset()); 848 } 849 850 FILETIME java_to_windows_time(jlong l) { 851 jlong a = (l * 10000) + offset(); 852 FILETIME result; 853 result.dwHighDateTime = high(a); 854 result.dwLowDateTime = low(a); 855 return result; 856 } 857 858 bool os::supports_vtime() { return true; } 859 bool os::enable_vtime() { return false; } 860 bool os::vtime_enabled() { return false; } 861 862 double os::elapsedVTime() { 863 FILETIME created; 864 FILETIME exited; 865 FILETIME kernel; 866 FILETIME user; 867 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 868 // the resolution of windows_to_java_time() should be sufficient (ms) 869 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 870 } else { 871 return elapsedTime(); 872 } 873 } 874 875 jlong os::javaTimeMillis() { 876 if (UseFakeTimers) { 877 return fake_time++; 878 } else { 879 FILETIME wt; 880 GetSystemTimeAsFileTime(&wt); 881 return windows_to_java_time(wt); 882 } 883 } 884 885 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 886 FILETIME wt; 887 GetSystemTimeAsFileTime(&wt); 888 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 889 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 890 seconds = secs; 891 nanos = jlong(ticks - (secs*10000000)) * 100; 892 } 893 894 jlong os::javaTimeNanos() { 895 if (!win32::_has_performance_count) { 896 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 897 } else { 898 LARGE_INTEGER current_count; 899 QueryPerformanceCounter(¤t_count); 900 double current = as_long(current_count); 901 double freq = performance_frequency; 902 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 903 return time; 904 } 905 } 906 907 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 908 if (!win32::_has_performance_count) { 909 // javaTimeMillis() doesn't have much percision, 910 // but it is not going to wrap -- so all 64 bits 911 info_ptr->max_value = ALL_64_BITS; 912 913 // this is a wall clock timer, so may skip 914 info_ptr->may_skip_backward = true; 915 info_ptr->may_skip_forward = true; 916 } else { 917 jlong freq = performance_frequency; 918 if (freq < NANOSECS_PER_SEC) { 919 // the performance counter is 64 bits and we will 920 // be multiplying it -- so no wrap in 64 bits 921 info_ptr->max_value = ALL_64_BITS; 922 } else if (freq > NANOSECS_PER_SEC) { 923 // use the max value the counter can reach to 924 // determine the max value which could be returned 925 julong max_counter = (julong)ALL_64_BITS; 926 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 927 } else { 928 // the performance counter is 64 bits and we will 929 // be using it directly -- so no wrap in 64 bits 930 info_ptr->max_value = ALL_64_BITS; 931 } 932 933 // using a counter, so no skipping 934 info_ptr->may_skip_backward = false; 935 info_ptr->may_skip_forward = false; 936 } 937 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 938 } 939 940 char* os::local_time_string(char *buf, size_t buflen) { 941 SYSTEMTIME st; 942 GetLocalTime(&st); 943 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 944 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 945 return buf; 946 } 947 948 bool os::getTimesSecs(double* process_real_time, 949 double* process_user_time, 950 double* process_system_time) { 951 HANDLE h_process = GetCurrentProcess(); 952 FILETIME create_time, exit_time, kernel_time, user_time; 953 BOOL result = GetProcessTimes(h_process, 954 &create_time, 955 &exit_time, 956 &kernel_time, 957 &user_time); 958 if (result != 0) { 959 FILETIME wt; 960 GetSystemTimeAsFileTime(&wt); 961 jlong rtc_millis = windows_to_java_time(wt); 962 jlong user_millis = windows_to_java_time(user_time); 963 jlong system_millis = windows_to_java_time(kernel_time); 964 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 965 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 966 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 967 return true; 968 } else { 969 return false; 970 } 971 } 972 973 void os::shutdown() { 974 // allow PerfMemory to attempt cleanup of any persistent resources 975 perfMemory_exit(); 976 977 // flush buffered output, finish log files 978 ostream_abort(); 979 980 // Check for abort hook 981 abort_hook_t abort_hook = Arguments::abort_hook(); 982 if (abort_hook != NULL) { 983 abort_hook(); 984 } 985 } 986 987 988 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 989 PMINIDUMP_EXCEPTION_INFORMATION, 990 PMINIDUMP_USER_STREAM_INFORMATION, 991 PMINIDUMP_CALLBACK_INFORMATION); 992 993 static HANDLE dumpFile = NULL; 994 995 // Check if dump file can be created. 996 void os::check_dump_limit(char* buffer, size_t buffsz) { 997 bool status = true; 998 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 999 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 1000 status = false; 1001 } else { 1002 const char* cwd = get_current_directory(NULL, 0); 1003 int pid = current_process_id(); 1004 if (cwd != NULL) { 1005 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1006 } else { 1007 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1008 } 1009 1010 if (dumpFile == NULL && 1011 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1012 == INVALID_HANDLE_VALUE) { 1013 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1014 status = false; 1015 } 1016 } 1017 VMError::record_coredump_status(buffer, status); 1018 } 1019 1020 void os::abort(bool dump_core, void* siginfo, void* context) { 1021 HINSTANCE dbghelp; 1022 EXCEPTION_POINTERS ep; 1023 MINIDUMP_EXCEPTION_INFORMATION mei; 1024 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1025 1026 HANDLE hProcess = GetCurrentProcess(); 1027 DWORD processId = GetCurrentProcessId(); 1028 MINIDUMP_TYPE dumpType; 1029 1030 shutdown(); 1031 if (!dump_core || dumpFile == NULL) { 1032 if (dumpFile != NULL) { 1033 CloseHandle(dumpFile); 1034 } 1035 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1036 } 1037 1038 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1039 1040 if (dbghelp == NULL) { 1041 jio_fprintf(stderr, "Failed to load dbghelp.dll\n"); 1042 CloseHandle(dumpFile); 1043 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1044 } 1045 1046 _MiniDumpWriteDump = 1047 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1048 PMINIDUMP_EXCEPTION_INFORMATION, 1049 PMINIDUMP_USER_STREAM_INFORMATION, 1050 PMINIDUMP_CALLBACK_INFORMATION), 1051 GetProcAddress(dbghelp, 1052 "MiniDumpWriteDump")); 1053 1054 if (_MiniDumpWriteDump == NULL) { 1055 jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n"); 1056 CloseHandle(dumpFile); 1057 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1058 } 1059 1060 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1061 1062 // Older versions of dbghelp.h do not contain all the dumptypes we want, dbghelp.h with 1063 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1064 #if API_VERSION_NUMBER >= 11 1065 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1066 MiniDumpWithUnloadedModules); 1067 #endif 1068 1069 if (siginfo != NULL && context != NULL) { 1070 ep.ContextRecord = (PCONTEXT) context; 1071 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1072 1073 mei.ThreadId = GetCurrentThreadId(); 1074 mei.ExceptionPointers = &ep; 1075 pmei = &mei; 1076 } else { 1077 pmei = NULL; 1078 } 1079 1080 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1081 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1082 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1083 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1084 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1085 } 1086 CloseHandle(dumpFile); 1087 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1088 } 1089 1090 void os::abort(bool dump_core) { 1091 abort(dump_core, NULL, NULL); 1092 } 1093 1094 // Die immediately, no exit hook, no abort hook, no cleanup. 1095 void os::die() { 1096 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1097 } 1098 1099 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1100 // * dirent_md.c 1.15 00/02/02 1101 // 1102 // The declarations for DIR and struct dirent are in jvm_win32.h. 1103 1104 // Caller must have already run dirname through JVM_NativePath, which removes 1105 // duplicate slashes and converts all instances of '/' into '\\'. 1106 1107 DIR * os::opendir(const char *dirname) { 1108 assert(dirname != NULL, "just checking"); // hotspot change 1109 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1110 DWORD fattr; // hotspot change 1111 char alt_dirname[4] = { 0, 0, 0, 0 }; 1112 1113 if (dirp == 0) { 1114 errno = ENOMEM; 1115 return 0; 1116 } 1117 1118 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1119 // as a directory in FindFirstFile(). We detect this case here and 1120 // prepend the current drive name. 1121 // 1122 if (dirname[1] == '\0' && dirname[0] == '\\') { 1123 alt_dirname[0] = _getdrive() + 'A' - 1; 1124 alt_dirname[1] = ':'; 1125 alt_dirname[2] = '\\'; 1126 alt_dirname[3] = '\0'; 1127 dirname = alt_dirname; 1128 } 1129 1130 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1131 if (dirp->path == 0) { 1132 free(dirp); 1133 errno = ENOMEM; 1134 return 0; 1135 } 1136 strcpy(dirp->path, dirname); 1137 1138 fattr = GetFileAttributes(dirp->path); 1139 if (fattr == 0xffffffff) { 1140 free(dirp->path); 1141 free(dirp); 1142 errno = ENOENT; 1143 return 0; 1144 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1145 free(dirp->path); 1146 free(dirp); 1147 errno = ENOTDIR; 1148 return 0; 1149 } 1150 1151 // Append "*.*", or possibly "\\*.*", to path 1152 if (dirp->path[1] == ':' && 1153 (dirp->path[2] == '\0' || 1154 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1155 // No '\\' needed for cases like "Z:" or "Z:\" 1156 strcat(dirp->path, "*.*"); 1157 } else { 1158 strcat(dirp->path, "\\*.*"); 1159 } 1160 1161 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1162 if (dirp->handle == INVALID_HANDLE_VALUE) { 1163 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1164 free(dirp->path); 1165 free(dirp); 1166 errno = EACCES; 1167 return 0; 1168 } 1169 } 1170 return dirp; 1171 } 1172 1173 // parameter dbuf unused on Windows 1174 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1175 assert(dirp != NULL, "just checking"); // hotspot change 1176 if (dirp->handle == INVALID_HANDLE_VALUE) { 1177 return 0; 1178 } 1179 1180 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1181 1182 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1183 if (GetLastError() == ERROR_INVALID_HANDLE) { 1184 errno = EBADF; 1185 return 0; 1186 } 1187 FindClose(dirp->handle); 1188 dirp->handle = INVALID_HANDLE_VALUE; 1189 } 1190 1191 return &dirp->dirent; 1192 } 1193 1194 int os::closedir(DIR *dirp) { 1195 assert(dirp != NULL, "just checking"); // hotspot change 1196 if (dirp->handle != INVALID_HANDLE_VALUE) { 1197 if (!FindClose(dirp->handle)) { 1198 errno = EBADF; 1199 return -1; 1200 } 1201 dirp->handle = INVALID_HANDLE_VALUE; 1202 } 1203 free(dirp->path); 1204 free(dirp); 1205 return 0; 1206 } 1207 1208 // This must be hard coded because it's the system's temporary 1209 // directory not the java application's temp directory, ala java.io.tmpdir. 1210 const char* os::get_temp_directory() { 1211 static char path_buf[MAX_PATH]; 1212 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1213 return path_buf; 1214 } else { 1215 path_buf[0] = '\0'; 1216 return path_buf; 1217 } 1218 } 1219 1220 static bool file_exists(const char* filename) { 1221 if (filename == NULL || strlen(filename) == 0) { 1222 return false; 1223 } 1224 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1225 } 1226 1227 bool os::dll_build_name(char *buffer, size_t buflen, 1228 const char* pname, const char* fname) { 1229 bool retval = false; 1230 const size_t pnamelen = pname ? strlen(pname) : 0; 1231 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1232 1233 // Return error on buffer overflow. 1234 if (pnamelen + strlen(fname) + 10 > buflen) { 1235 return retval; 1236 } 1237 1238 if (pnamelen == 0) { 1239 jio_snprintf(buffer, buflen, "%s.dll", fname); 1240 retval = true; 1241 } else if (c == ':' || c == '\\') { 1242 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1243 retval = true; 1244 } else if (strchr(pname, *os::path_separator()) != NULL) { 1245 int n; 1246 char** pelements = split_path(pname, &n); 1247 if (pelements == NULL) { 1248 return false; 1249 } 1250 for (int i = 0; i < n; i++) { 1251 char* path = pelements[i]; 1252 // Really shouldn't be NULL, but check can't hurt 1253 size_t plen = (path == NULL) ? 0 : strlen(path); 1254 if (plen == 0) { 1255 continue; // skip the empty path values 1256 } 1257 const char lastchar = path[plen - 1]; 1258 if (lastchar == ':' || lastchar == '\\') { 1259 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1260 } else { 1261 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1262 } 1263 if (file_exists(buffer)) { 1264 retval = true; 1265 break; 1266 } 1267 } 1268 // release the storage 1269 for (int i = 0; i < n; i++) { 1270 if (pelements[i] != NULL) { 1271 FREE_C_HEAP_ARRAY(char, pelements[i]); 1272 } 1273 } 1274 if (pelements != NULL) { 1275 FREE_C_HEAP_ARRAY(char*, pelements); 1276 } 1277 } else { 1278 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1279 retval = true; 1280 } 1281 return retval; 1282 } 1283 1284 // Needs to be in os specific directory because windows requires another 1285 // header file <direct.h> 1286 const char* os::get_current_directory(char *buf, size_t buflen) { 1287 int n = static_cast<int>(buflen); 1288 if (buflen > INT_MAX) n = INT_MAX; 1289 return _getcwd(buf, n); 1290 } 1291 1292 //----------------------------------------------------------- 1293 // Helper functions for fatal error handler 1294 #ifdef _WIN64 1295 // Helper routine which returns true if address in 1296 // within the NTDLL address space. 1297 // 1298 static bool _addr_in_ntdll(address addr) { 1299 HMODULE hmod; 1300 MODULEINFO minfo; 1301 1302 hmod = GetModuleHandle("NTDLL.DLL"); 1303 if (hmod == NULL) return false; 1304 if (!os::PSApiDll::GetModuleInformation(GetCurrentProcess(), hmod, 1305 &minfo, sizeof(MODULEINFO))) { 1306 return false; 1307 } 1308 1309 if ((addr >= minfo.lpBaseOfDll) && 1310 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1311 return true; 1312 } else { 1313 return false; 1314 } 1315 } 1316 #endif 1317 1318 struct _modinfo { 1319 address addr; 1320 char* full_path; // point to a char buffer 1321 int buflen; // size of the buffer 1322 address base_addr; 1323 }; 1324 1325 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1326 address top_address, void * param) { 1327 struct _modinfo *pmod = (struct _modinfo *)param; 1328 if (!pmod) return -1; 1329 1330 if (base_addr <= pmod->addr && 1331 top_address > pmod->addr) { 1332 // if a buffer is provided, copy path name to the buffer 1333 if (pmod->full_path) { 1334 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1335 } 1336 pmod->base_addr = base_addr; 1337 return 1; 1338 } 1339 return 0; 1340 } 1341 1342 bool os::dll_address_to_library_name(address addr, char* buf, 1343 int buflen, int* offset) { 1344 // buf is not optional, but offset is optional 1345 assert(buf != NULL, "sanity check"); 1346 1347 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1348 // return the full path to the DLL file, sometimes it returns path 1349 // to the corresponding PDB file (debug info); sometimes it only 1350 // returns partial path, which makes life painful. 1351 1352 struct _modinfo mi; 1353 mi.addr = addr; 1354 mi.full_path = buf; 1355 mi.buflen = buflen; 1356 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1357 // buf already contains path name 1358 if (offset) *offset = addr - mi.base_addr; 1359 return true; 1360 } 1361 1362 buf[0] = '\0'; 1363 if (offset) *offset = -1; 1364 return false; 1365 } 1366 1367 bool os::dll_address_to_function_name(address addr, char *buf, 1368 int buflen, int *offset) { 1369 // buf is not optional, but offset is optional 1370 assert(buf != NULL, "sanity check"); 1371 1372 if (Decoder::decode(addr, buf, buflen, offset)) { 1373 return true; 1374 } 1375 if (offset != NULL) *offset = -1; 1376 buf[0] = '\0'; 1377 return false; 1378 } 1379 1380 // save the start and end address of jvm.dll into param[0] and param[1] 1381 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1382 address top_address, void * param) { 1383 if (!param) return -1; 1384 1385 if (base_addr <= (address)_locate_jvm_dll && 1386 top_address > (address)_locate_jvm_dll) { 1387 ((address*)param)[0] = base_addr; 1388 ((address*)param)[1] = top_address; 1389 return 1; 1390 } 1391 return 0; 1392 } 1393 1394 address vm_lib_location[2]; // start and end address of jvm.dll 1395 1396 // check if addr is inside jvm.dll 1397 bool os::address_is_in_vm(address addr) { 1398 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1399 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1400 assert(false, "Can't find jvm module."); 1401 return false; 1402 } 1403 } 1404 1405 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1406 } 1407 1408 // print module info; param is outputStream* 1409 static int _print_module(const char* fname, address base_address, 1410 address top_address, void* param) { 1411 if (!param) return -1; 1412 1413 outputStream* st = (outputStream*)param; 1414 1415 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1416 return 0; 1417 } 1418 1419 // Loads .dll/.so and 1420 // in case of error it checks if .dll/.so was built for the 1421 // same architecture as Hotspot is running on 1422 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1423 void * result = LoadLibrary(name); 1424 if (result != NULL) { 1425 return result; 1426 } 1427 1428 DWORD errcode = GetLastError(); 1429 if (errcode == ERROR_MOD_NOT_FOUND) { 1430 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1431 ebuf[ebuflen - 1] = '\0'; 1432 return NULL; 1433 } 1434 1435 // Parsing dll below 1436 // If we can read dll-info and find that dll was built 1437 // for an architecture other than Hotspot is running in 1438 // - then print to buffer "DLL was built for a different architecture" 1439 // else call os::lasterror to obtain system error message 1440 1441 // Read system error message into ebuf 1442 // It may or may not be overwritten below (in the for loop and just above) 1443 lasterror(ebuf, (size_t) ebuflen); 1444 ebuf[ebuflen - 1] = '\0'; 1445 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1446 if (fd < 0) { 1447 return NULL; 1448 } 1449 1450 uint32_t signature_offset; 1451 uint16_t lib_arch = 0; 1452 bool failed_to_get_lib_arch = 1453 ( // Go to position 3c in the dll 1454 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1455 || 1456 // Read location of signature 1457 (sizeof(signature_offset) != 1458 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1459 || 1460 // Go to COFF File Header in dll 1461 // that is located after "signature" (4 bytes long) 1462 (os::seek_to_file_offset(fd, 1463 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1464 || 1465 // Read field that contains code of architecture 1466 // that dll was built for 1467 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1468 ); 1469 1470 ::close(fd); 1471 if (failed_to_get_lib_arch) { 1472 // file i/o error - report os::lasterror(...) msg 1473 return NULL; 1474 } 1475 1476 typedef struct { 1477 uint16_t arch_code; 1478 char* arch_name; 1479 } arch_t; 1480 1481 static const arch_t arch_array[] = { 1482 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1483 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1484 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1485 }; 1486 #if (defined _M_IA64) 1487 static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64; 1488 #elif (defined _M_AMD64) 1489 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1490 #elif (defined _M_IX86) 1491 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1492 #else 1493 #error Method os::dll_load requires that one of following \ 1494 is defined :_M_IA64,_M_AMD64 or _M_IX86 1495 #endif 1496 1497 1498 // Obtain a string for printf operation 1499 // lib_arch_str shall contain string what platform this .dll was built for 1500 // running_arch_str shall string contain what platform Hotspot was built for 1501 char *running_arch_str = NULL, *lib_arch_str = NULL; 1502 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1503 if (lib_arch == arch_array[i].arch_code) { 1504 lib_arch_str = arch_array[i].arch_name; 1505 } 1506 if (running_arch == arch_array[i].arch_code) { 1507 running_arch_str = arch_array[i].arch_name; 1508 } 1509 } 1510 1511 assert(running_arch_str, 1512 "Didn't find running architecture code in arch_array"); 1513 1514 // If the architecture is right 1515 // but some other error took place - report os::lasterror(...) msg 1516 if (lib_arch == running_arch) { 1517 return NULL; 1518 } 1519 1520 if (lib_arch_str != NULL) { 1521 ::_snprintf(ebuf, ebuflen - 1, 1522 "Can't load %s-bit .dll on a %s-bit platform", 1523 lib_arch_str, running_arch_str); 1524 } else { 1525 // don't know what architecture this dll was build for 1526 ::_snprintf(ebuf, ebuflen - 1, 1527 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1528 lib_arch, running_arch_str); 1529 } 1530 1531 return NULL; 1532 } 1533 1534 void os::print_dll_info(outputStream *st) { 1535 st->print_cr("Dynamic libraries:"); 1536 get_loaded_modules_info(_print_module, (void *)st); 1537 } 1538 1539 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1540 HANDLE hProcess; 1541 1542 # define MAX_NUM_MODULES 128 1543 HMODULE modules[MAX_NUM_MODULES]; 1544 static char filename[MAX_PATH]; 1545 int result = 0; 1546 1547 if (!os::PSApiDll::PSApiAvailable()) { 1548 return 0; 1549 } 1550 1551 int pid = os::current_process_id(); 1552 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1553 FALSE, pid); 1554 if (hProcess == NULL) return 0; 1555 1556 DWORD size_needed; 1557 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1558 sizeof(modules), &size_needed)) { 1559 CloseHandle(hProcess); 1560 return 0; 1561 } 1562 1563 // number of modules that are currently loaded 1564 int num_modules = size_needed / sizeof(HMODULE); 1565 1566 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1567 // Get Full pathname: 1568 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1569 filename, sizeof(filename))) { 1570 filename[0] = '\0'; 1571 } 1572 1573 MODULEINFO modinfo; 1574 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1575 &modinfo, sizeof(modinfo))) { 1576 modinfo.lpBaseOfDll = NULL; 1577 modinfo.SizeOfImage = 0; 1578 } 1579 1580 // Invoke callback function 1581 result = callback(filename, (address)modinfo.lpBaseOfDll, 1582 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1583 if (result) break; 1584 } 1585 1586 CloseHandle(hProcess); 1587 return result; 1588 } 1589 1590 void os::print_os_info_brief(outputStream* st) { 1591 os::print_os_info(st); 1592 } 1593 1594 void os::print_os_info(outputStream* st) { 1595 #ifdef ASSERT 1596 char buffer[1024]; 1597 DWORD size = sizeof(buffer); 1598 st->print(" HostName: "); 1599 if (GetComputerNameEx(ComputerNameDnsHostname, buffer, &size)) { 1600 st->print("%s", buffer); 1601 } else { 1602 st->print("N/A"); 1603 } 1604 #endif 1605 st->print(" OS:"); 1606 os::win32::print_windows_version(st); 1607 } 1608 1609 void os::win32::print_windows_version(outputStream* st) { 1610 OSVERSIONINFOEX osvi; 1611 VS_FIXEDFILEINFO *file_info; 1612 TCHAR kernel32_path[MAX_PATH]; 1613 UINT len, ret; 1614 1615 // Use the GetVersionEx information to see if we're on a server or 1616 // workstation edition of Windows. Starting with Windows 8.1 we can't 1617 // trust the OS version information returned by this API. 1618 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1619 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1620 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1621 st->print_cr("Call to GetVersionEx failed"); 1622 return; 1623 } 1624 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1625 1626 // Get the full path to \Windows\System32\kernel32.dll and use that for 1627 // determining what version of Windows we're running on. 1628 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1629 ret = GetSystemDirectory(kernel32_path, len); 1630 if (ret == 0 || ret > len) { 1631 st->print_cr("Call to GetSystemDirectory failed"); 1632 return; 1633 } 1634 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1635 1636 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1637 if (version_size == 0) { 1638 st->print_cr("Call to GetFileVersionInfoSize failed"); 1639 return; 1640 } 1641 1642 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1643 if (version_info == NULL) { 1644 st->print_cr("Failed to allocate version_info"); 1645 return; 1646 } 1647 1648 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1649 os::free(version_info); 1650 st->print_cr("Call to GetFileVersionInfo failed"); 1651 return; 1652 } 1653 1654 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1655 os::free(version_info); 1656 st->print_cr("Call to VerQueryValue failed"); 1657 return; 1658 } 1659 1660 int major_version = HIWORD(file_info->dwProductVersionMS); 1661 int minor_version = LOWORD(file_info->dwProductVersionMS); 1662 int build_number = HIWORD(file_info->dwProductVersionLS); 1663 int build_minor = LOWORD(file_info->dwProductVersionLS); 1664 int os_vers = major_version * 1000 + minor_version; 1665 os::free(version_info); 1666 1667 st->print(" Windows "); 1668 switch (os_vers) { 1669 1670 case 6000: 1671 if (is_workstation) { 1672 st->print("Vista"); 1673 } else { 1674 st->print("Server 2008"); 1675 } 1676 break; 1677 1678 case 6001: 1679 if (is_workstation) { 1680 st->print("7"); 1681 } else { 1682 st->print("Server 2008 R2"); 1683 } 1684 break; 1685 1686 case 6002: 1687 if (is_workstation) { 1688 st->print("8"); 1689 } else { 1690 st->print("Server 2012"); 1691 } 1692 break; 1693 1694 case 6003: 1695 if (is_workstation) { 1696 st->print("8.1"); 1697 } else { 1698 st->print("Server 2012 R2"); 1699 } 1700 break; 1701 1702 case 10000: 1703 if (is_workstation) { 1704 st->print("10"); 1705 } else { 1706 // The server version name of Windows 10 is not known at this time 1707 st->print("%d.%d", major_version, minor_version); 1708 } 1709 break; 1710 1711 default: 1712 // Unrecognized windows, print out its major and minor versions 1713 st->print("%d.%d", major_version, minor_version); 1714 break; 1715 } 1716 1717 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1718 // find out whether we are running on 64 bit processor or not 1719 SYSTEM_INFO si; 1720 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1721 os::Kernel32Dll::GetNativeSystemInfo(&si); 1722 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1723 st->print(" , 64 bit"); 1724 } 1725 1726 st->print(" Build %d", build_number); 1727 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1728 st->cr(); 1729 } 1730 1731 void os::pd_print_cpu_info(outputStream* st) { 1732 // Nothing to do for now. 1733 } 1734 1735 void os::print_memory_info(outputStream* st) { 1736 st->print("Memory:"); 1737 st->print(" %dk page", os::vm_page_size()>>10); 1738 1739 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1740 // value if total memory is larger than 4GB 1741 MEMORYSTATUSEX ms; 1742 ms.dwLength = sizeof(ms); 1743 GlobalMemoryStatusEx(&ms); 1744 1745 st->print(", physical %uk", os::physical_memory() >> 10); 1746 st->print("(%uk free)", os::available_memory() >> 10); 1747 1748 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1749 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1750 st->cr(); 1751 } 1752 1753 void os::print_siginfo(outputStream *st, void *siginfo) { 1754 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1755 st->print("siginfo:"); 1756 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1757 1758 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1759 er->NumberParameters >= 2) { 1760 switch (er->ExceptionInformation[0]) { 1761 case 0: st->print(", reading address"); break; 1762 case 1: st->print(", writing address"); break; 1763 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1764 er->ExceptionInformation[0]); 1765 } 1766 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1767 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1768 er->NumberParameters >= 2 && UseSharedSpaces) { 1769 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1770 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1771 st->print("\n\nError accessing class data sharing archive." \ 1772 " Mapped file inaccessible during execution, " \ 1773 " possible disk/network problem."); 1774 } 1775 } else { 1776 int num = er->NumberParameters; 1777 if (num > 0) { 1778 st->print(", ExceptionInformation="); 1779 for (int i = 0; i < num; i++) { 1780 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1781 } 1782 } 1783 } 1784 st->cr(); 1785 } 1786 1787 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1788 // do nothing 1789 } 1790 1791 static char saved_jvm_path[MAX_PATH] = {0}; 1792 1793 // Find the full path to the current module, jvm.dll 1794 void os::jvm_path(char *buf, jint buflen) { 1795 // Error checking. 1796 if (buflen < MAX_PATH) { 1797 assert(false, "must use a large-enough buffer"); 1798 buf[0] = '\0'; 1799 return; 1800 } 1801 // Lazy resolve the path to current module. 1802 if (saved_jvm_path[0] != 0) { 1803 strcpy(buf, saved_jvm_path); 1804 return; 1805 } 1806 1807 buf[0] = '\0'; 1808 if (Arguments::sun_java_launcher_is_altjvm()) { 1809 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1810 // for a JAVA_HOME environment variable and fix up the path so it 1811 // looks like jvm.dll is installed there (append a fake suffix 1812 // hotspot/jvm.dll). 1813 char* java_home_var = ::getenv("JAVA_HOME"); 1814 if (java_home_var != NULL && java_home_var[0] != 0 && 1815 strlen(java_home_var) < (size_t)buflen) { 1816 strncpy(buf, java_home_var, buflen); 1817 1818 // determine if this is a legacy image or modules image 1819 // modules image doesn't have "jre" subdirectory 1820 size_t len = strlen(buf); 1821 char* jrebin_p = buf + len; 1822 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1823 if (0 != _access(buf, 0)) { 1824 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1825 } 1826 len = strlen(buf); 1827 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1828 } 1829 } 1830 1831 if (buf[0] == '\0') { 1832 GetModuleFileName(vm_lib_handle, buf, buflen); 1833 } 1834 strncpy(saved_jvm_path, buf, MAX_PATH); 1835 saved_jvm_path[MAX_PATH - 1] = '\0'; 1836 } 1837 1838 1839 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1840 #ifndef _WIN64 1841 st->print("_"); 1842 #endif 1843 } 1844 1845 1846 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1847 #ifndef _WIN64 1848 st->print("@%d", args_size * sizeof(int)); 1849 #endif 1850 } 1851 1852 // This method is a copy of JDK's sysGetLastErrorString 1853 // from src/windows/hpi/src/system_md.c 1854 1855 size_t os::lasterror(char* buf, size_t len) { 1856 DWORD errval; 1857 1858 if ((errval = GetLastError()) != 0) { 1859 // DOS error 1860 size_t n = (size_t)FormatMessage( 1861 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1862 NULL, 1863 errval, 1864 0, 1865 buf, 1866 (DWORD)len, 1867 NULL); 1868 if (n > 3) { 1869 // Drop final '.', CR, LF 1870 if (buf[n - 1] == '\n') n--; 1871 if (buf[n - 1] == '\r') n--; 1872 if (buf[n - 1] == '.') n--; 1873 buf[n] = '\0'; 1874 } 1875 return n; 1876 } 1877 1878 if (errno != 0) { 1879 // C runtime error that has no corresponding DOS error code 1880 const char* s = strerror(errno); 1881 size_t n = strlen(s); 1882 if (n >= len) n = len - 1; 1883 strncpy(buf, s, n); 1884 buf[n] = '\0'; 1885 return n; 1886 } 1887 1888 return 0; 1889 } 1890 1891 int os::get_last_error() { 1892 DWORD error = GetLastError(); 1893 if (error == 0) { 1894 error = errno; 1895 } 1896 return (int)error; 1897 } 1898 1899 WindowsSemaphore::WindowsSemaphore(uint value) { 1900 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1901 1902 guarantee(_semaphore != NULL, err_msg("CreateSemaphore failed with error code: %lu", GetLastError())); 1903 } 1904 1905 WindowsSemaphore::~WindowsSemaphore() { 1906 if (_semaphore != NULL) { 1907 ::CloseHandle(_semaphore); 1908 } 1909 } 1910 1911 void WindowsSemaphore::signal(uint count) { 1912 if (count > 0) { 1913 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1914 1915 assert(ret != 0, err_msg("ReleaseSemaphore failed with error code: %lu", GetLastError())); 1916 } 1917 } 1918 1919 void WindowsSemaphore::wait() { 1920 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1921 assert(ret != WAIT_FAILED, err_msg("WaitForSingleObject failed with error code: %lu", GetLastError())); 1922 assert(ret == WAIT_OBJECT_0, err_msg("WaitForSingleObject failed with return value: %lu", ret)); 1923 } 1924 1925 // sun.misc.Signal 1926 // NOTE that this is a workaround for an apparent kernel bug where if 1927 // a signal handler for SIGBREAK is installed then that signal handler 1928 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1929 // See bug 4416763. 1930 static void (*sigbreakHandler)(int) = NULL; 1931 1932 static void UserHandler(int sig, void *siginfo, void *context) { 1933 os::signal_notify(sig); 1934 // We need to reinstate the signal handler each time... 1935 os::signal(sig, (void*)UserHandler); 1936 } 1937 1938 void* os::user_handler() { 1939 return (void*) UserHandler; 1940 } 1941 1942 void* os::signal(int signal_number, void* handler) { 1943 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1944 void (*oldHandler)(int) = sigbreakHandler; 1945 sigbreakHandler = (void (*)(int)) handler; 1946 return (void*) oldHandler; 1947 } else { 1948 return (void*)::signal(signal_number, (void (*)(int))handler); 1949 } 1950 } 1951 1952 void os::signal_raise(int signal_number) { 1953 raise(signal_number); 1954 } 1955 1956 // The Win32 C runtime library maps all console control events other than ^C 1957 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1958 // logoff, and shutdown events. We therefore install our own console handler 1959 // that raises SIGTERM for the latter cases. 1960 // 1961 static BOOL WINAPI consoleHandler(DWORD event) { 1962 switch (event) { 1963 case CTRL_C_EVENT: 1964 if (is_error_reported()) { 1965 // Ctrl-C is pressed during error reporting, likely because the error 1966 // handler fails to abort. Let VM die immediately. 1967 os::die(); 1968 } 1969 1970 os::signal_raise(SIGINT); 1971 return TRUE; 1972 break; 1973 case CTRL_BREAK_EVENT: 1974 if (sigbreakHandler != NULL) { 1975 (*sigbreakHandler)(SIGBREAK); 1976 } 1977 return TRUE; 1978 break; 1979 case CTRL_LOGOFF_EVENT: { 1980 // Don't terminate JVM if it is running in a non-interactive session, 1981 // such as a service process. 1982 USEROBJECTFLAGS flags; 1983 HANDLE handle = GetProcessWindowStation(); 1984 if (handle != NULL && 1985 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1986 sizeof(USEROBJECTFLAGS), NULL)) { 1987 // If it is a non-interactive session, let next handler to deal 1988 // with it. 1989 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1990 return FALSE; 1991 } 1992 } 1993 } 1994 case CTRL_CLOSE_EVENT: 1995 case CTRL_SHUTDOWN_EVENT: 1996 os::signal_raise(SIGTERM); 1997 return TRUE; 1998 break; 1999 default: 2000 break; 2001 } 2002 return FALSE; 2003 } 2004 2005 // The following code is moved from os.cpp for making this 2006 // code platform specific, which it is by its very nature. 2007 2008 // Return maximum OS signal used + 1 for internal use only 2009 // Used as exit signal for signal_thread 2010 int os::sigexitnum_pd() { 2011 return NSIG; 2012 } 2013 2014 // a counter for each possible signal value, including signal_thread exit signal 2015 static volatile jint pending_signals[NSIG+1] = { 0 }; 2016 static HANDLE sig_sem = NULL; 2017 2018 void os::signal_init_pd() { 2019 // Initialize signal structures 2020 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2021 2022 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2023 2024 // Programs embedding the VM do not want it to attempt to receive 2025 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2026 // shutdown hooks mechanism introduced in 1.3. For example, when 2027 // the VM is run as part of a Windows NT service (i.e., a servlet 2028 // engine in a web server), the correct behavior is for any console 2029 // control handler to return FALSE, not TRUE, because the OS's 2030 // "final" handler for such events allows the process to continue if 2031 // it is a service (while terminating it if it is not a service). 2032 // To make this behavior uniform and the mechanism simpler, we 2033 // completely disable the VM's usage of these console events if -Xrs 2034 // (=ReduceSignalUsage) is specified. This means, for example, that 2035 // the CTRL-BREAK thread dump mechanism is also disabled in this 2036 // case. See bugs 4323062, 4345157, and related bugs. 2037 2038 if (!ReduceSignalUsage) { 2039 // Add a CTRL-C handler 2040 SetConsoleCtrlHandler(consoleHandler, TRUE); 2041 } 2042 } 2043 2044 void os::signal_notify(int signal_number) { 2045 BOOL ret; 2046 if (sig_sem != NULL) { 2047 Atomic::inc(&pending_signals[signal_number]); 2048 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2049 assert(ret != 0, "ReleaseSemaphore() failed"); 2050 } 2051 } 2052 2053 static int check_pending_signals(bool wait_for_signal) { 2054 DWORD ret; 2055 while (true) { 2056 for (int i = 0; i < NSIG + 1; i++) { 2057 jint n = pending_signals[i]; 2058 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2059 return i; 2060 } 2061 } 2062 if (!wait_for_signal) { 2063 return -1; 2064 } 2065 2066 JavaThread *thread = JavaThread::current(); 2067 2068 ThreadBlockInVM tbivm(thread); 2069 2070 bool threadIsSuspended; 2071 do { 2072 thread->set_suspend_equivalent(); 2073 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2074 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2075 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2076 2077 // were we externally suspended while we were waiting? 2078 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2079 if (threadIsSuspended) { 2080 // The semaphore has been incremented, but while we were waiting 2081 // another thread suspended us. We don't want to continue running 2082 // while suspended because that would surprise the thread that 2083 // suspended us. 2084 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2085 assert(ret != 0, "ReleaseSemaphore() failed"); 2086 2087 thread->java_suspend_self(); 2088 } 2089 } while (threadIsSuspended); 2090 } 2091 } 2092 2093 int os::signal_lookup() { 2094 return check_pending_signals(false); 2095 } 2096 2097 int os::signal_wait() { 2098 return check_pending_signals(true); 2099 } 2100 2101 // Implicit OS exception handling 2102 2103 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2104 address handler) { 2105 JavaThread* thread = JavaThread::current(); 2106 // Save pc in thread 2107 #ifdef _M_IA64 2108 // Do not blow up if no thread info available. 2109 if (thread) { 2110 // Saving PRECISE pc (with slot information) in thread. 2111 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2112 // Convert precise PC into "Unix" format 2113 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2114 thread->set_saved_exception_pc((address)precise_pc); 2115 } 2116 // Set pc to handler 2117 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2118 // Clear out psr.ri (= Restart Instruction) in order to continue 2119 // at the beginning of the target bundle. 2120 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2121 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2122 #else 2123 #ifdef _M_AMD64 2124 // Do not blow up if no thread info available. 2125 if (thread) { 2126 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2127 } 2128 // Set pc to handler 2129 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2130 #else 2131 // Do not blow up if no thread info available. 2132 if (thread) { 2133 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2134 } 2135 // Set pc to handler 2136 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2137 #endif 2138 #endif 2139 2140 // Continue the execution 2141 return EXCEPTION_CONTINUE_EXECUTION; 2142 } 2143 2144 2145 // Used for PostMortemDump 2146 extern "C" void safepoints(); 2147 extern "C" void find(int x); 2148 extern "C" void events(); 2149 2150 // According to Windows API documentation, an illegal instruction sequence should generate 2151 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2152 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2153 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2154 2155 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2156 2157 // From "Execution Protection in the Windows Operating System" draft 0.35 2158 // Once a system header becomes available, the "real" define should be 2159 // included or copied here. 2160 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2161 2162 // Handle NAT Bit consumption on IA64. 2163 #ifdef _M_IA64 2164 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2165 #endif 2166 2167 // Windows Vista/2008 heap corruption check 2168 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2169 2170 #define def_excpt(val) #val, val 2171 2172 struct siglabel { 2173 char *name; 2174 int number; 2175 }; 2176 2177 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2178 // C++ compiler contain this error code. Because this is a compiler-generated 2179 // error, the code is not listed in the Win32 API header files. 2180 // The code is actually a cryptic mnemonic device, with the initial "E" 2181 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2182 // ASCII values of "msc". 2183 2184 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2185 2186 2187 struct siglabel exceptlabels[] = { 2188 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2189 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2190 def_excpt(EXCEPTION_BREAKPOINT), 2191 def_excpt(EXCEPTION_SINGLE_STEP), 2192 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2193 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2194 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2195 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2196 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2197 def_excpt(EXCEPTION_FLT_OVERFLOW), 2198 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2199 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2200 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2201 def_excpt(EXCEPTION_INT_OVERFLOW), 2202 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2203 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2204 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2205 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2206 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2207 def_excpt(EXCEPTION_STACK_OVERFLOW), 2208 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2209 def_excpt(EXCEPTION_GUARD_PAGE), 2210 def_excpt(EXCEPTION_INVALID_HANDLE), 2211 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2212 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2213 #ifdef _M_IA64 2214 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2215 #endif 2216 NULL, 0 2217 }; 2218 2219 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2220 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2221 if (exceptlabels[i].number == exception_code) { 2222 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2223 return buf; 2224 } 2225 } 2226 2227 return NULL; 2228 } 2229 2230 //----------------------------------------------------------------------------- 2231 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2232 // handle exception caused by idiv; should only happen for -MinInt/-1 2233 // (division by zero is handled explicitly) 2234 #ifdef _M_IA64 2235 assert(0, "Fix Handle_IDiv_Exception"); 2236 #else 2237 #ifdef _M_AMD64 2238 PCONTEXT ctx = exceptionInfo->ContextRecord; 2239 address pc = (address)ctx->Rip; 2240 assert(pc[0] == 0xF7, "not an idiv opcode"); 2241 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2242 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2243 // set correct result values and continue after idiv instruction 2244 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2245 ctx->Rax = (DWORD)min_jint; // result 2246 ctx->Rdx = (DWORD)0; // remainder 2247 // Continue the execution 2248 #else 2249 PCONTEXT ctx = exceptionInfo->ContextRecord; 2250 address pc = (address)ctx->Eip; 2251 assert(pc[0] == 0xF7, "not an idiv opcode"); 2252 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2253 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2254 // set correct result values and continue after idiv instruction 2255 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2256 ctx->Eax = (DWORD)min_jint; // result 2257 ctx->Edx = (DWORD)0; // remainder 2258 // Continue the execution 2259 #endif 2260 #endif 2261 return EXCEPTION_CONTINUE_EXECUTION; 2262 } 2263 2264 //----------------------------------------------------------------------------- 2265 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2266 PCONTEXT ctx = exceptionInfo->ContextRecord; 2267 #ifndef _WIN64 2268 // handle exception caused by native method modifying control word 2269 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2270 2271 switch (exception_code) { 2272 case EXCEPTION_FLT_DENORMAL_OPERAND: 2273 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2274 case EXCEPTION_FLT_INEXACT_RESULT: 2275 case EXCEPTION_FLT_INVALID_OPERATION: 2276 case EXCEPTION_FLT_OVERFLOW: 2277 case EXCEPTION_FLT_STACK_CHECK: 2278 case EXCEPTION_FLT_UNDERFLOW: 2279 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2280 if (fp_control_word != ctx->FloatSave.ControlWord) { 2281 // Restore FPCW and mask out FLT exceptions 2282 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2283 // Mask out pending FLT exceptions 2284 ctx->FloatSave.StatusWord &= 0xffffff00; 2285 return EXCEPTION_CONTINUE_EXECUTION; 2286 } 2287 } 2288 2289 if (prev_uef_handler != NULL) { 2290 // We didn't handle this exception so pass it to the previous 2291 // UnhandledExceptionFilter. 2292 return (prev_uef_handler)(exceptionInfo); 2293 } 2294 #else // !_WIN64 2295 // On Windows, the mxcsr control bits are non-volatile across calls 2296 // See also CR 6192333 2297 // 2298 jint MxCsr = INITIAL_MXCSR; 2299 // we can't use StubRoutines::addr_mxcsr_std() 2300 // because in Win64 mxcsr is not saved there 2301 if (MxCsr != ctx->MxCsr) { 2302 ctx->MxCsr = MxCsr; 2303 return EXCEPTION_CONTINUE_EXECUTION; 2304 } 2305 #endif // !_WIN64 2306 2307 return EXCEPTION_CONTINUE_SEARCH; 2308 } 2309 2310 static inline void report_error(Thread* t, DWORD exception_code, 2311 address addr, void* siginfo, void* context) { 2312 VMError err(t, exception_code, addr, siginfo, context); 2313 err.report_and_die(); 2314 2315 // If UseOsErrorReporting, this will return here and save the error file 2316 // somewhere where we can find it in the minidump. 2317 } 2318 2319 //----------------------------------------------------------------------------- 2320 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2321 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2322 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2323 #ifdef _M_IA64 2324 // On Itanium, we need the "precise pc", which has the slot number coded 2325 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2326 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2327 // Convert the pc to "Unix format", which has the slot number coded 2328 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2329 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2330 // information is saved in the Unix format. 2331 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2332 #else 2333 #ifdef _M_AMD64 2334 address pc = (address) exceptionInfo->ContextRecord->Rip; 2335 #else 2336 address pc = (address) exceptionInfo->ContextRecord->Eip; 2337 #endif 2338 #endif 2339 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2340 2341 // Handle SafeFetch32 and SafeFetchN exceptions. 2342 if (StubRoutines::is_safefetch_fault(pc)) { 2343 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2344 } 2345 2346 #ifndef _WIN64 2347 // Execution protection violation - win32 running on AMD64 only 2348 // Handled first to avoid misdiagnosis as a "normal" access violation; 2349 // This is safe to do because we have a new/unique ExceptionInformation 2350 // code for this condition. 2351 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2352 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2353 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2354 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2355 2356 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2357 int page_size = os::vm_page_size(); 2358 2359 // Make sure the pc and the faulting address are sane. 2360 // 2361 // If an instruction spans a page boundary, and the page containing 2362 // the beginning of the instruction is executable but the following 2363 // page is not, the pc and the faulting address might be slightly 2364 // different - we still want to unguard the 2nd page in this case. 2365 // 2366 // 15 bytes seems to be a (very) safe value for max instruction size. 2367 bool pc_is_near_addr = 2368 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2369 bool instr_spans_page_boundary = 2370 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2371 (intptr_t) page_size) > 0); 2372 2373 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2374 static volatile address last_addr = 2375 (address) os::non_memory_address_word(); 2376 2377 // In conservative mode, don't unguard unless the address is in the VM 2378 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2379 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2380 2381 // Set memory to RWX and retry 2382 address page_start = 2383 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2384 bool res = os::protect_memory((char*) page_start, page_size, 2385 os::MEM_PROT_RWX); 2386 2387 if (PrintMiscellaneous && Verbose) { 2388 char buf[256]; 2389 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2390 "at " INTPTR_FORMAT 2391 ", unguarding " INTPTR_FORMAT ": %s", addr, 2392 page_start, (res ? "success" : strerror(errno))); 2393 tty->print_raw_cr(buf); 2394 } 2395 2396 // Set last_addr so if we fault again at the same address, we don't 2397 // end up in an endless loop. 2398 // 2399 // There are two potential complications here. Two threads trapping 2400 // at the same address at the same time could cause one of the 2401 // threads to think it already unguarded, and abort the VM. Likely 2402 // very rare. 2403 // 2404 // The other race involves two threads alternately trapping at 2405 // different addresses and failing to unguard the page, resulting in 2406 // an endless loop. This condition is probably even more unlikely 2407 // than the first. 2408 // 2409 // Although both cases could be avoided by using locks or thread 2410 // local last_addr, these solutions are unnecessary complication: 2411 // this handler is a best-effort safety net, not a complete solution. 2412 // It is disabled by default and should only be used as a workaround 2413 // in case we missed any no-execute-unsafe VM code. 2414 2415 last_addr = addr; 2416 2417 return EXCEPTION_CONTINUE_EXECUTION; 2418 } 2419 } 2420 2421 // Last unguard failed or not unguarding 2422 tty->print_raw_cr("Execution protection violation"); 2423 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2424 exceptionInfo->ContextRecord); 2425 return EXCEPTION_CONTINUE_SEARCH; 2426 } 2427 } 2428 #endif // _WIN64 2429 2430 // Check to see if we caught the safepoint code in the 2431 // process of write protecting the memory serialization page. 2432 // It write enables the page immediately after protecting it 2433 // so just return. 2434 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2435 JavaThread* thread = (JavaThread*) t; 2436 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2437 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2438 if (os::is_memory_serialize_page(thread, addr)) { 2439 // Block current thread until the memory serialize page permission restored. 2440 os::block_on_serialize_page_trap(); 2441 return EXCEPTION_CONTINUE_EXECUTION; 2442 } 2443 } 2444 2445 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2446 VM_Version::is_cpuinfo_segv_addr(pc)) { 2447 // Verify that OS save/restore AVX registers. 2448 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2449 } 2450 2451 if (t != NULL && t->is_Java_thread()) { 2452 JavaThread* thread = (JavaThread*) t; 2453 bool in_java = thread->thread_state() == _thread_in_Java; 2454 2455 // Handle potential stack overflows up front. 2456 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2457 if (os::uses_stack_guard_pages()) { 2458 #ifdef _M_IA64 2459 // Use guard page for register stack. 2460 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2461 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2462 // Check for a register stack overflow on Itanium 2463 if (thread->addr_inside_register_stack_red_zone(addr)) { 2464 // Fatal red zone violation happens if the Java program 2465 // catches a StackOverflow error and does so much processing 2466 // that it runs beyond the unprotected yellow guard zone. As 2467 // a result, we are out of here. 2468 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2469 } else if(thread->addr_inside_register_stack(addr)) { 2470 // Disable the yellow zone which sets the state that 2471 // we've got a stack overflow problem. 2472 if (thread->stack_yellow_zone_enabled()) { 2473 thread->disable_stack_yellow_zone(); 2474 } 2475 // Give us some room to process the exception. 2476 thread->disable_register_stack_guard(); 2477 // Tracing with +Verbose. 2478 if (Verbose) { 2479 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2480 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2481 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2482 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2483 thread->register_stack_base(), 2484 thread->register_stack_base() + thread->stack_size()); 2485 } 2486 2487 // Reguard the permanent register stack red zone just to be sure. 2488 // We saw Windows silently disabling this without telling us. 2489 thread->enable_register_stack_red_zone(); 2490 2491 return Handle_Exception(exceptionInfo, 2492 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2493 } 2494 #endif 2495 if (thread->stack_yellow_zone_enabled()) { 2496 // Yellow zone violation. The o/s has unprotected the first yellow 2497 // zone page for us. Note: must call disable_stack_yellow_zone to 2498 // update the enabled status, even if the zone contains only one page. 2499 thread->disable_stack_yellow_zone(); 2500 // If not in java code, return and hope for the best. 2501 return in_java 2502 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2503 : EXCEPTION_CONTINUE_EXECUTION; 2504 } else { 2505 // Fatal red zone violation. 2506 thread->disable_stack_red_zone(); 2507 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2508 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2509 exceptionInfo->ContextRecord); 2510 return EXCEPTION_CONTINUE_SEARCH; 2511 } 2512 } else if (in_java) { 2513 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2514 // a one-time-only guard page, which it has released to us. The next 2515 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2516 return Handle_Exception(exceptionInfo, 2517 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2518 } else { 2519 // Can only return and hope for the best. Further stack growth will 2520 // result in an ACCESS_VIOLATION. 2521 return EXCEPTION_CONTINUE_EXECUTION; 2522 } 2523 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2524 // Either stack overflow or null pointer exception. 2525 if (in_java) { 2526 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2527 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2528 address stack_end = thread->stack_base() - thread->stack_size(); 2529 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2530 // Stack overflow. 2531 assert(!os::uses_stack_guard_pages(), 2532 "should be caught by red zone code above."); 2533 return Handle_Exception(exceptionInfo, 2534 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2535 } 2536 // Check for safepoint polling and implicit null 2537 // We only expect null pointers in the stubs (vtable) 2538 // the rest are checked explicitly now. 2539 CodeBlob* cb = CodeCache::find_blob(pc); 2540 if (cb != NULL) { 2541 if (os::is_poll_address(addr)) { 2542 address stub = SharedRuntime::get_poll_stub(pc); 2543 return Handle_Exception(exceptionInfo, stub); 2544 } 2545 } 2546 { 2547 #ifdef _WIN64 2548 // If it's a legal stack address map the entire region in 2549 // 2550 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2551 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2552 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2553 addr = (address)((uintptr_t)addr & 2554 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2555 os::commit_memory((char *)addr, thread->stack_base() - addr, 2556 !ExecMem); 2557 return EXCEPTION_CONTINUE_EXECUTION; 2558 } else 2559 #endif 2560 { 2561 // Null pointer exception. 2562 #ifdef _M_IA64 2563 // Process implicit null checks in compiled code. Note: Implicit null checks 2564 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2565 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2566 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2567 // Handle implicit null check in UEP method entry 2568 if (cb && (cb->is_frame_complete_at(pc) || 2569 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2570 if (Verbose) { 2571 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2572 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2573 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2574 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2575 *(bundle_start + 1), *bundle_start); 2576 } 2577 return Handle_Exception(exceptionInfo, 2578 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2579 } 2580 } 2581 2582 // Implicit null checks were processed above. Hence, we should not reach 2583 // here in the usual case => die! 2584 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2585 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2586 exceptionInfo->ContextRecord); 2587 return EXCEPTION_CONTINUE_SEARCH; 2588 2589 #else // !IA64 2590 2591 // Windows 98 reports faulting addresses incorrectly 2592 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2593 !os::win32::is_nt()) { 2594 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2595 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2596 } 2597 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2598 exceptionInfo->ContextRecord); 2599 return EXCEPTION_CONTINUE_SEARCH; 2600 #endif 2601 } 2602 } 2603 } 2604 2605 #ifdef _WIN64 2606 // Special care for fast JNI field accessors. 2607 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2608 // in and the heap gets shrunk before the field access. 2609 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2610 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2611 if (addr != (address)-1) { 2612 return Handle_Exception(exceptionInfo, addr); 2613 } 2614 } 2615 #endif 2616 2617 // Stack overflow or null pointer exception in native code. 2618 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2619 exceptionInfo->ContextRecord); 2620 return EXCEPTION_CONTINUE_SEARCH; 2621 } // /EXCEPTION_ACCESS_VIOLATION 2622 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2623 #if defined _M_IA64 2624 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2625 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2626 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2627 2628 // Compiled method patched to be non entrant? Following conditions must apply: 2629 // 1. must be first instruction in bundle 2630 // 2. must be a break instruction with appropriate code 2631 if ((((uint64_t) pc & 0x0F) == 0) && 2632 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2633 return Handle_Exception(exceptionInfo, 2634 (address)SharedRuntime::get_handle_wrong_method_stub()); 2635 } 2636 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2637 #endif 2638 2639 2640 if (in_java) { 2641 switch (exception_code) { 2642 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2643 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2644 2645 case EXCEPTION_INT_OVERFLOW: 2646 return Handle_IDiv_Exception(exceptionInfo); 2647 2648 } // switch 2649 } 2650 if (((thread->thread_state() == _thread_in_Java) || 2651 (thread->thread_state() == _thread_in_native)) && 2652 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2653 LONG result=Handle_FLT_Exception(exceptionInfo); 2654 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2655 } 2656 } 2657 2658 if (exception_code != EXCEPTION_BREAKPOINT) { 2659 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2660 exceptionInfo->ContextRecord); 2661 } 2662 return EXCEPTION_CONTINUE_SEARCH; 2663 } 2664 2665 #ifndef _WIN64 2666 // Special care for fast JNI accessors. 2667 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2668 // the heap gets shrunk before the field access. 2669 // Need to install our own structured exception handler since native code may 2670 // install its own. 2671 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2672 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2673 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2674 address pc = (address) exceptionInfo->ContextRecord->Eip; 2675 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2676 if (addr != (address)-1) { 2677 return Handle_Exception(exceptionInfo, addr); 2678 } 2679 } 2680 return EXCEPTION_CONTINUE_SEARCH; 2681 } 2682 2683 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2684 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2685 jobject obj, \ 2686 jfieldID fieldID) { \ 2687 __try { \ 2688 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2689 obj, \ 2690 fieldID); \ 2691 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2692 _exception_info())) { \ 2693 } \ 2694 return 0; \ 2695 } 2696 2697 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2698 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2699 DEFINE_FAST_GETFIELD(jchar, char, Char) 2700 DEFINE_FAST_GETFIELD(jshort, short, Short) 2701 DEFINE_FAST_GETFIELD(jint, int, Int) 2702 DEFINE_FAST_GETFIELD(jlong, long, Long) 2703 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2704 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2705 2706 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2707 switch (type) { 2708 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2709 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2710 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2711 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2712 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2713 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2714 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2715 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2716 default: ShouldNotReachHere(); 2717 } 2718 return (address)-1; 2719 } 2720 #endif 2721 2722 // Virtual Memory 2723 2724 int os::vm_page_size() { return os::win32::vm_page_size(); } 2725 int os::vm_allocation_granularity() { 2726 return os::win32::vm_allocation_granularity(); 2727 } 2728 2729 // Windows large page support is available on Windows 2003. In order to use 2730 // large page memory, the administrator must first assign additional privilege 2731 // to the user: 2732 // + select Control Panel -> Administrative Tools -> Local Security Policy 2733 // + select Local Policies -> User Rights Assignment 2734 // + double click "Lock pages in memory", add users and/or groups 2735 // + reboot 2736 // Note the above steps are needed for administrator as well, as administrators 2737 // by default do not have the privilege to lock pages in memory. 2738 // 2739 // Note about Windows 2003: although the API supports committing large page 2740 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2741 // scenario, I found through experiment it only uses large page if the entire 2742 // memory region is reserved and committed in a single VirtualAlloc() call. 2743 // This makes Windows large page support more or less like Solaris ISM, in 2744 // that the entire heap must be committed upfront. This probably will change 2745 // in the future, if so the code below needs to be revisited. 2746 2747 #ifndef MEM_LARGE_PAGES 2748 #define MEM_LARGE_PAGES 0x20000000 2749 #endif 2750 2751 static HANDLE _hProcess; 2752 static HANDLE _hToken; 2753 2754 // Container for NUMA node list info 2755 class NUMANodeListHolder { 2756 private: 2757 int *_numa_used_node_list; // allocated below 2758 int _numa_used_node_count; 2759 2760 void free_node_list() { 2761 if (_numa_used_node_list != NULL) { 2762 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2763 } 2764 } 2765 2766 public: 2767 NUMANodeListHolder() { 2768 _numa_used_node_count = 0; 2769 _numa_used_node_list = NULL; 2770 // do rest of initialization in build routine (after function pointers are set up) 2771 } 2772 2773 ~NUMANodeListHolder() { 2774 free_node_list(); 2775 } 2776 2777 bool build() { 2778 DWORD_PTR proc_aff_mask; 2779 DWORD_PTR sys_aff_mask; 2780 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2781 ULONG highest_node_number; 2782 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2783 free_node_list(); 2784 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2785 for (unsigned int i = 0; i <= highest_node_number; i++) { 2786 ULONGLONG proc_mask_numa_node; 2787 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2788 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2789 _numa_used_node_list[_numa_used_node_count++] = i; 2790 } 2791 } 2792 return (_numa_used_node_count > 1); 2793 } 2794 2795 int get_count() { return _numa_used_node_count; } 2796 int get_node_list_entry(int n) { 2797 // for indexes out of range, returns -1 2798 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2799 } 2800 2801 } numa_node_list_holder; 2802 2803 2804 2805 static size_t _large_page_size = 0; 2806 2807 static bool resolve_functions_for_large_page_init() { 2808 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2809 os::Advapi32Dll::AdvapiAvailable(); 2810 } 2811 2812 static bool request_lock_memory_privilege() { 2813 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2814 os::current_process_id()); 2815 2816 LUID luid; 2817 if (_hProcess != NULL && 2818 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2819 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2820 2821 TOKEN_PRIVILEGES tp; 2822 tp.PrivilegeCount = 1; 2823 tp.Privileges[0].Luid = luid; 2824 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2825 2826 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2827 // privilege. Check GetLastError() too. See MSDN document. 2828 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2829 (GetLastError() == ERROR_SUCCESS)) { 2830 return true; 2831 } 2832 } 2833 2834 return false; 2835 } 2836 2837 static void cleanup_after_large_page_init() { 2838 if (_hProcess) CloseHandle(_hProcess); 2839 _hProcess = NULL; 2840 if (_hToken) CloseHandle(_hToken); 2841 _hToken = NULL; 2842 } 2843 2844 static bool numa_interleaving_init() { 2845 bool success = false; 2846 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2847 2848 // print a warning if UseNUMAInterleaving flag is specified on command line 2849 bool warn_on_failure = use_numa_interleaving_specified; 2850 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2851 2852 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2853 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2854 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2855 2856 if (os::Kernel32Dll::NumaCallsAvailable()) { 2857 if (numa_node_list_holder.build()) { 2858 if (PrintMiscellaneous && Verbose) { 2859 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2860 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2861 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2862 } 2863 tty->print("\n"); 2864 } 2865 success = true; 2866 } else { 2867 WARN("Process does not cover multiple NUMA nodes."); 2868 } 2869 } else { 2870 WARN("NUMA Interleaving is not supported by the operating system."); 2871 } 2872 if (!success) { 2873 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2874 } 2875 return success; 2876 #undef WARN 2877 } 2878 2879 // this routine is used whenever we need to reserve a contiguous VA range 2880 // but we need to make separate VirtualAlloc calls for each piece of the range 2881 // Reasons for doing this: 2882 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2883 // * UseNUMAInterleaving requires a separate node for each piece 2884 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2885 DWORD prot, 2886 bool should_inject_error = false) { 2887 char * p_buf; 2888 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2889 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2890 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2891 2892 // first reserve enough address space in advance since we want to be 2893 // able to break a single contiguous virtual address range into multiple 2894 // large page commits but WS2003 does not allow reserving large page space 2895 // so we just use 4K pages for reserve, this gives us a legal contiguous 2896 // address space. then we will deallocate that reservation, and re alloc 2897 // using large pages 2898 const size_t size_of_reserve = bytes + chunk_size; 2899 if (bytes > size_of_reserve) { 2900 // Overflowed. 2901 return NULL; 2902 } 2903 p_buf = (char *) VirtualAlloc(addr, 2904 size_of_reserve, // size of Reserve 2905 MEM_RESERVE, 2906 PAGE_READWRITE); 2907 // If reservation failed, return NULL 2908 if (p_buf == NULL) return NULL; 2909 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2910 os::release_memory(p_buf, bytes + chunk_size); 2911 2912 // we still need to round up to a page boundary (in case we are using large pages) 2913 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2914 // instead we handle this in the bytes_to_rq computation below 2915 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2916 2917 // now go through and allocate one chunk at a time until all bytes are 2918 // allocated 2919 size_t bytes_remaining = bytes; 2920 // An overflow of align_size_up() would have been caught above 2921 // in the calculation of size_of_reserve. 2922 char * next_alloc_addr = p_buf; 2923 HANDLE hProc = GetCurrentProcess(); 2924 2925 #ifdef ASSERT 2926 // Variable for the failure injection 2927 long ran_num = os::random(); 2928 size_t fail_after = ran_num % bytes; 2929 #endif 2930 2931 int count=0; 2932 while (bytes_remaining) { 2933 // select bytes_to_rq to get to the next chunk_size boundary 2934 2935 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2936 // Note allocate and commit 2937 char * p_new; 2938 2939 #ifdef ASSERT 2940 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2941 #else 2942 const bool inject_error_now = false; 2943 #endif 2944 2945 if (inject_error_now) { 2946 p_new = NULL; 2947 } else { 2948 if (!UseNUMAInterleaving) { 2949 p_new = (char *) VirtualAlloc(next_alloc_addr, 2950 bytes_to_rq, 2951 flags, 2952 prot); 2953 } else { 2954 // get the next node to use from the used_node_list 2955 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2956 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2957 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2958 next_alloc_addr, 2959 bytes_to_rq, 2960 flags, 2961 prot, 2962 node); 2963 } 2964 } 2965 2966 if (p_new == NULL) { 2967 // Free any allocated pages 2968 if (next_alloc_addr > p_buf) { 2969 // Some memory was committed so release it. 2970 size_t bytes_to_release = bytes - bytes_remaining; 2971 // NMT has yet to record any individual blocks, so it 2972 // need to create a dummy 'reserve' record to match 2973 // the release. 2974 MemTracker::record_virtual_memory_reserve((address)p_buf, 2975 bytes_to_release, CALLER_PC); 2976 os::release_memory(p_buf, bytes_to_release); 2977 } 2978 #ifdef ASSERT 2979 if (should_inject_error) { 2980 if (TracePageSizes && Verbose) { 2981 tty->print_cr("Reserving pages individually failed."); 2982 } 2983 } 2984 #endif 2985 return NULL; 2986 } 2987 2988 bytes_remaining -= bytes_to_rq; 2989 next_alloc_addr += bytes_to_rq; 2990 count++; 2991 } 2992 // Although the memory is allocated individually, it is returned as one. 2993 // NMT records it as one block. 2994 if ((flags & MEM_COMMIT) != 0) { 2995 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2996 } else { 2997 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2998 } 2999 3000 // made it this far, success 3001 return p_buf; 3002 } 3003 3004 3005 3006 void os::large_page_init() { 3007 if (!UseLargePages) return; 3008 3009 // print a warning if any large page related flag is specified on command line 3010 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3011 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3012 bool success = false; 3013 3014 #define WARN(msg) if (warn_on_failure) { warning(msg); } 3015 if (resolve_functions_for_large_page_init()) { 3016 if (request_lock_memory_privilege()) { 3017 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3018 if (s) { 3019 #if defined(IA32) || defined(AMD64) 3020 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3021 WARN("JVM cannot use large pages bigger than 4mb."); 3022 } else { 3023 #endif 3024 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3025 _large_page_size = LargePageSizeInBytes; 3026 } else { 3027 _large_page_size = s; 3028 } 3029 success = true; 3030 #if defined(IA32) || defined(AMD64) 3031 } 3032 #endif 3033 } else { 3034 WARN("Large page is not supported by the processor."); 3035 } 3036 } else { 3037 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3038 } 3039 } else { 3040 WARN("Large page is not supported by the operating system."); 3041 } 3042 #undef WARN 3043 3044 const size_t default_page_size = (size_t) vm_page_size(); 3045 if (success && _large_page_size > default_page_size) { 3046 _page_sizes[0] = _large_page_size; 3047 _page_sizes[1] = default_page_size; 3048 _page_sizes[2] = 0; 3049 } 3050 3051 cleanup_after_large_page_init(); 3052 UseLargePages = success; 3053 } 3054 3055 // On win32, one cannot release just a part of reserved memory, it's an 3056 // all or nothing deal. When we split a reservation, we must break the 3057 // reservation into two reservations. 3058 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3059 bool realloc) { 3060 if (size > 0) { 3061 release_memory(base, size); 3062 if (realloc) { 3063 reserve_memory(split, base); 3064 } 3065 if (size != split) { 3066 reserve_memory(size - split, base + split); 3067 } 3068 } 3069 } 3070 3071 // Multiple threads can race in this code but it's not possible to unmap small sections of 3072 // virtual space to get requested alignment, like posix-like os's. 3073 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3074 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3075 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3076 "Alignment must be a multiple of allocation granularity (page size)"); 3077 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3078 3079 size_t extra_size = size + alignment; 3080 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3081 3082 char* aligned_base = NULL; 3083 3084 do { 3085 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3086 if (extra_base == NULL) { 3087 return NULL; 3088 } 3089 // Do manual alignment 3090 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3091 3092 os::release_memory(extra_base, extra_size); 3093 3094 aligned_base = os::reserve_memory(size, aligned_base); 3095 3096 } while (aligned_base == NULL); 3097 3098 return aligned_base; 3099 } 3100 3101 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3102 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3103 "reserve alignment"); 3104 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3105 char* res; 3106 // note that if UseLargePages is on, all the areas that require interleaving 3107 // will go thru reserve_memory_special rather than thru here. 3108 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3109 if (!use_individual) { 3110 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3111 } else { 3112 elapsedTimer reserveTimer; 3113 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3114 // in numa interleaving, we have to allocate pages individually 3115 // (well really chunks of NUMAInterleaveGranularity size) 3116 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3117 if (res == NULL) { 3118 warning("NUMA page allocation failed"); 3119 } 3120 if (Verbose && PrintMiscellaneous) { 3121 reserveTimer.stop(); 3122 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3123 reserveTimer.milliseconds(), reserveTimer.ticks()); 3124 } 3125 } 3126 assert(res == NULL || addr == NULL || addr == res, 3127 "Unexpected address from reserve."); 3128 3129 return res; 3130 } 3131 3132 // Reserve memory at an arbitrary address, only if that area is 3133 // available (and not reserved for something else). 3134 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3135 // Windows os::reserve_memory() fails of the requested address range is 3136 // not avilable. 3137 return reserve_memory(bytes, requested_addr); 3138 } 3139 3140 size_t os::large_page_size() { 3141 return _large_page_size; 3142 } 3143 3144 bool os::can_commit_large_page_memory() { 3145 // Windows only uses large page memory when the entire region is reserved 3146 // and committed in a single VirtualAlloc() call. This may change in the 3147 // future, but with Windows 2003 it's not possible to commit on demand. 3148 return false; 3149 } 3150 3151 bool os::can_execute_large_page_memory() { 3152 return true; 3153 } 3154 3155 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3156 bool exec) { 3157 assert(UseLargePages, "only for large pages"); 3158 3159 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3160 return NULL; // Fallback to small pages. 3161 } 3162 3163 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3164 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3165 3166 // with large pages, there are two cases where we need to use Individual Allocation 3167 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3168 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3169 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3170 if (TracePageSizes && Verbose) { 3171 tty->print_cr("Reserving large pages individually."); 3172 } 3173 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3174 if (p_buf == NULL) { 3175 // give an appropriate warning message 3176 if (UseNUMAInterleaving) { 3177 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3178 } 3179 if (UseLargePagesIndividualAllocation) { 3180 warning("Individually allocated large pages failed, " 3181 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3182 } 3183 return NULL; 3184 } 3185 3186 return p_buf; 3187 3188 } else { 3189 if (TracePageSizes && Verbose) { 3190 tty->print_cr("Reserving large pages in a single large chunk."); 3191 } 3192 // normal policy just allocate it all at once 3193 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3194 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3195 if (res != NULL) { 3196 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3197 } 3198 3199 return res; 3200 } 3201 } 3202 3203 bool os::release_memory_special(char* base, size_t bytes) { 3204 assert(base != NULL, "Sanity check"); 3205 return release_memory(base, bytes); 3206 } 3207 3208 void os::print_statistics() { 3209 } 3210 3211 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3212 int err = os::get_last_error(); 3213 char buf[256]; 3214 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3215 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3216 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3217 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3218 } 3219 3220 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3221 if (bytes == 0) { 3222 // Don't bother the OS with noops. 3223 return true; 3224 } 3225 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3226 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3227 // Don't attempt to print anything if the OS call fails. We're 3228 // probably low on resources, so the print itself may cause crashes. 3229 3230 // unless we have NUMAInterleaving enabled, the range of a commit 3231 // is always within a reserve covered by a single VirtualAlloc 3232 // in that case we can just do a single commit for the requested size 3233 if (!UseNUMAInterleaving) { 3234 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3235 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3236 return false; 3237 } 3238 if (exec) { 3239 DWORD oldprot; 3240 // Windows doc says to use VirtualProtect to get execute permissions 3241 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3242 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3243 return false; 3244 } 3245 } 3246 return true; 3247 } else { 3248 3249 // when NUMAInterleaving is enabled, the commit might cover a range that 3250 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3251 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3252 // returns represents the number of bytes that can be committed in one step. 3253 size_t bytes_remaining = bytes; 3254 char * next_alloc_addr = addr; 3255 while (bytes_remaining > 0) { 3256 MEMORY_BASIC_INFORMATION alloc_info; 3257 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3258 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3259 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3260 PAGE_READWRITE) == NULL) { 3261 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3262 exec);) 3263 return false; 3264 } 3265 if (exec) { 3266 DWORD oldprot; 3267 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3268 PAGE_EXECUTE_READWRITE, &oldprot)) { 3269 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3270 exec);) 3271 return false; 3272 } 3273 } 3274 bytes_remaining -= bytes_to_rq; 3275 next_alloc_addr += bytes_to_rq; 3276 } 3277 } 3278 // if we made it this far, return true 3279 return true; 3280 } 3281 3282 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3283 bool exec) { 3284 // alignment_hint is ignored on this OS 3285 return pd_commit_memory(addr, size, exec); 3286 } 3287 3288 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3289 const char* mesg) { 3290 assert(mesg != NULL, "mesg must be specified"); 3291 if (!pd_commit_memory(addr, size, exec)) { 3292 warn_fail_commit_memory(addr, size, exec); 3293 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3294 } 3295 } 3296 3297 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3298 size_t alignment_hint, bool exec, 3299 const char* mesg) { 3300 // alignment_hint is ignored on this OS 3301 pd_commit_memory_or_exit(addr, size, exec, mesg); 3302 } 3303 3304 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3305 if (bytes == 0) { 3306 // Don't bother the OS with noops. 3307 return true; 3308 } 3309 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3310 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3311 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3312 } 3313 3314 bool os::pd_release_memory(char* addr, size_t bytes) { 3315 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3316 } 3317 3318 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3319 return os::commit_memory(addr, size, !ExecMem); 3320 } 3321 3322 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3323 return os::uncommit_memory(addr, size); 3324 } 3325 3326 // Set protections specified 3327 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3328 bool is_committed) { 3329 unsigned int p = 0; 3330 switch (prot) { 3331 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3332 case MEM_PROT_READ: p = PAGE_READONLY; break; 3333 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3334 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3335 default: 3336 ShouldNotReachHere(); 3337 } 3338 3339 DWORD old_status; 3340 3341 // Strange enough, but on Win32 one can change protection only for committed 3342 // memory, not a big deal anyway, as bytes less or equal than 64K 3343 if (!is_committed) { 3344 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3345 "cannot commit protection page"); 3346 } 3347 // One cannot use os::guard_memory() here, as on Win32 guard page 3348 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3349 // 3350 // Pages in the region become guard pages. Any attempt to access a guard page 3351 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3352 // the guard page status. Guard pages thus act as a one-time access alarm. 3353 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3354 } 3355 3356 bool os::guard_memory(char* addr, size_t bytes) { 3357 DWORD old_status; 3358 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3359 } 3360 3361 bool os::unguard_memory(char* addr, size_t bytes) { 3362 DWORD old_status; 3363 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3364 } 3365 3366 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3367 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3368 void os::numa_make_global(char *addr, size_t bytes) { } 3369 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3370 bool os::numa_topology_changed() { return false; } 3371 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3372 int os::numa_get_group_id() { return 0; } 3373 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3374 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3375 // Provide an answer for UMA systems 3376 ids[0] = 0; 3377 return 1; 3378 } else { 3379 // check for size bigger than actual groups_num 3380 size = MIN2(size, numa_get_groups_num()); 3381 for (int i = 0; i < (int)size; i++) { 3382 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3383 } 3384 return size; 3385 } 3386 } 3387 3388 bool os::get_page_info(char *start, page_info* info) { 3389 return false; 3390 } 3391 3392 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3393 page_info* page_found) { 3394 return end; 3395 } 3396 3397 char* os::non_memory_address_word() { 3398 // Must never look like an address returned by reserve_memory, 3399 // even in its subfields (as defined by the CPU immediate fields, 3400 // if the CPU splits constants across multiple instructions). 3401 return (char*)-1; 3402 } 3403 3404 #define MAX_ERROR_COUNT 100 3405 #define SYS_THREAD_ERROR 0xffffffffUL 3406 3407 void os::pd_start_thread(Thread* thread) { 3408 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3409 // Returns previous suspend state: 3410 // 0: Thread was not suspended 3411 // 1: Thread is running now 3412 // >1: Thread is still suspended. 3413 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3414 } 3415 3416 class HighResolutionInterval : public CHeapObj<mtThread> { 3417 // The default timer resolution seems to be 10 milliseconds. 3418 // (Where is this written down?) 3419 // If someone wants to sleep for only a fraction of the default, 3420 // then we set the timer resolution down to 1 millisecond for 3421 // the duration of their interval. 3422 // We carefully set the resolution back, since otherwise we 3423 // seem to incur an overhead (3%?) that we don't need. 3424 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3425 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3426 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3427 // timeBeginPeriod() if the relative error exceeded some threshold. 3428 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3429 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3430 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3431 // resolution timers running. 3432 private: 3433 jlong resolution; 3434 public: 3435 HighResolutionInterval(jlong ms) { 3436 resolution = ms % 10L; 3437 if (resolution != 0) { 3438 MMRESULT result = timeBeginPeriod(1L); 3439 } 3440 } 3441 ~HighResolutionInterval() { 3442 if (resolution != 0) { 3443 MMRESULT result = timeEndPeriod(1L); 3444 } 3445 resolution = 0L; 3446 } 3447 }; 3448 3449 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3450 jlong limit = (jlong) MAXDWORD; 3451 3452 while (ms > limit) { 3453 int res; 3454 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3455 return res; 3456 } 3457 ms -= limit; 3458 } 3459 3460 assert(thread == Thread::current(), "thread consistency check"); 3461 OSThread* osthread = thread->osthread(); 3462 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3463 int result; 3464 if (interruptable) { 3465 assert(thread->is_Java_thread(), "must be java thread"); 3466 JavaThread *jt = (JavaThread *) thread; 3467 ThreadBlockInVM tbivm(jt); 3468 3469 jt->set_suspend_equivalent(); 3470 // cleared by handle_special_suspend_equivalent_condition() or 3471 // java_suspend_self() via check_and_wait_while_suspended() 3472 3473 HANDLE events[1]; 3474 events[0] = osthread->interrupt_event(); 3475 HighResolutionInterval *phri=NULL; 3476 if (!ForceTimeHighResolution) { 3477 phri = new HighResolutionInterval(ms); 3478 } 3479 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3480 result = OS_TIMEOUT; 3481 } else { 3482 ResetEvent(osthread->interrupt_event()); 3483 osthread->set_interrupted(false); 3484 result = OS_INTRPT; 3485 } 3486 delete phri; //if it is NULL, harmless 3487 3488 // were we externally suspended while we were waiting? 3489 jt->check_and_wait_while_suspended(); 3490 } else { 3491 assert(!thread->is_Java_thread(), "must not be java thread"); 3492 Sleep((long) ms); 3493 result = OS_TIMEOUT; 3494 } 3495 return result; 3496 } 3497 3498 // Short sleep, direct OS call. 3499 // 3500 // ms = 0, means allow others (if any) to run. 3501 // 3502 void os::naked_short_sleep(jlong ms) { 3503 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3504 Sleep(ms); 3505 } 3506 3507 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3508 void os::infinite_sleep() { 3509 while (true) { // sleep forever ... 3510 Sleep(100000); // ... 100 seconds at a time 3511 } 3512 } 3513 3514 typedef BOOL (WINAPI * STTSignature)(void); 3515 3516 void os::naked_yield() { 3517 // Use either SwitchToThread() or Sleep(0) 3518 // Consider passing back the return value from SwitchToThread(). 3519 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3520 SwitchToThread(); 3521 } else { 3522 Sleep(0); 3523 } 3524 } 3525 3526 // Win32 only gives you access to seven real priorities at a time, 3527 // so we compress Java's ten down to seven. It would be better 3528 // if we dynamically adjusted relative priorities. 3529 3530 int os::java_to_os_priority[CriticalPriority + 1] = { 3531 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3532 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3533 THREAD_PRIORITY_LOWEST, // 2 3534 THREAD_PRIORITY_BELOW_NORMAL, // 3 3535 THREAD_PRIORITY_BELOW_NORMAL, // 4 3536 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3537 THREAD_PRIORITY_NORMAL, // 6 3538 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3539 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3540 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3541 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3542 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3543 }; 3544 3545 int prio_policy1[CriticalPriority + 1] = { 3546 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3547 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3548 THREAD_PRIORITY_LOWEST, // 2 3549 THREAD_PRIORITY_BELOW_NORMAL, // 3 3550 THREAD_PRIORITY_BELOW_NORMAL, // 4 3551 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3552 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3553 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3554 THREAD_PRIORITY_HIGHEST, // 8 3555 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3556 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3557 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3558 }; 3559 3560 static int prio_init() { 3561 // If ThreadPriorityPolicy is 1, switch tables 3562 if (ThreadPriorityPolicy == 1) { 3563 int i; 3564 for (i = 0; i < CriticalPriority + 1; i++) { 3565 os::java_to_os_priority[i] = prio_policy1[i]; 3566 } 3567 } 3568 if (UseCriticalJavaThreadPriority) { 3569 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3570 } 3571 return 0; 3572 } 3573 3574 OSReturn os::set_native_priority(Thread* thread, int priority) { 3575 if (!UseThreadPriorities) return OS_OK; 3576 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3577 return ret ? OS_OK : OS_ERR; 3578 } 3579 3580 OSReturn os::get_native_priority(const Thread* const thread, 3581 int* priority_ptr) { 3582 if (!UseThreadPriorities) { 3583 *priority_ptr = java_to_os_priority[NormPriority]; 3584 return OS_OK; 3585 } 3586 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3587 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3588 assert(false, "GetThreadPriority failed"); 3589 return OS_ERR; 3590 } 3591 *priority_ptr = os_prio; 3592 return OS_OK; 3593 } 3594 3595 3596 // Hint to the underlying OS that a task switch would not be good. 3597 // Void return because it's a hint and can fail. 3598 void os::hint_no_preempt() {} 3599 3600 void os::interrupt(Thread* thread) { 3601 assert(!thread->is_Java_thread() || Thread::current() == thread || 3602 Threads_lock->owned_by_self(), 3603 "possibility of dangling Thread pointer"); 3604 3605 OSThread* osthread = thread->osthread(); 3606 osthread->set_interrupted(true); 3607 // More than one thread can get here with the same value of osthread, 3608 // resulting in multiple notifications. We do, however, want the store 3609 // to interrupted() to be visible to other threads before we post 3610 // the interrupt event. 3611 OrderAccess::release(); 3612 SetEvent(osthread->interrupt_event()); 3613 // For JSR166: unpark after setting status 3614 if (thread->is_Java_thread()) { 3615 ((JavaThread*)thread)->parker()->unpark(); 3616 } 3617 3618 ParkEvent * ev = thread->_ParkEvent; 3619 if (ev != NULL) ev->unpark(); 3620 } 3621 3622 3623 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3624 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3625 "possibility of dangling Thread pointer"); 3626 3627 OSThread* osthread = thread->osthread(); 3628 // There is no synchronization between the setting of the interrupt 3629 // and it being cleared here. It is critical - see 6535709 - that 3630 // we only clear the interrupt state, and reset the interrupt event, 3631 // if we are going to report that we were indeed interrupted - else 3632 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3633 // depending on the timing. By checking thread interrupt event to see 3634 // if the thread gets real interrupt thus prevent spurious wakeup. 3635 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3636 if (interrupted && clear_interrupted) { 3637 osthread->set_interrupted(false); 3638 ResetEvent(osthread->interrupt_event()); 3639 } // Otherwise leave the interrupted state alone 3640 3641 return interrupted; 3642 } 3643 3644 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3645 ExtendedPC os::get_thread_pc(Thread* thread) { 3646 CONTEXT context; 3647 context.ContextFlags = CONTEXT_CONTROL; 3648 HANDLE handle = thread->osthread()->thread_handle(); 3649 #ifdef _M_IA64 3650 assert(0, "Fix get_thread_pc"); 3651 return ExtendedPC(NULL); 3652 #else 3653 if (GetThreadContext(handle, &context)) { 3654 #ifdef _M_AMD64 3655 return ExtendedPC((address) context.Rip); 3656 #else 3657 return ExtendedPC((address) context.Eip); 3658 #endif 3659 } else { 3660 return ExtendedPC(NULL); 3661 } 3662 #endif 3663 } 3664 3665 // GetCurrentThreadId() returns DWORD 3666 intx os::current_thread_id() { return GetCurrentThreadId(); } 3667 3668 static int _initial_pid = 0; 3669 3670 int os::current_process_id() { 3671 return (_initial_pid ? _initial_pid : _getpid()); 3672 } 3673 3674 int os::win32::_vm_page_size = 0; 3675 int os::win32::_vm_allocation_granularity = 0; 3676 int os::win32::_processor_type = 0; 3677 // Processor level is not available on non-NT systems, use vm_version instead 3678 int os::win32::_processor_level = 0; 3679 julong os::win32::_physical_memory = 0; 3680 size_t os::win32::_default_stack_size = 0; 3681 3682 intx os::win32::_os_thread_limit = 0; 3683 volatile intx os::win32::_os_thread_count = 0; 3684 3685 bool os::win32::_is_nt = false; 3686 bool os::win32::_is_windows_2003 = false; 3687 bool os::win32::_is_windows_server = false; 3688 3689 // 6573254 3690 // Currently, the bug is observed across all the supported Windows releases, 3691 // including the latest one (as of this writing - Windows Server 2012 R2) 3692 bool os::win32::_has_exit_bug = true; 3693 bool os::win32::_has_performance_count = 0; 3694 3695 void os::win32::initialize_system_info() { 3696 SYSTEM_INFO si; 3697 GetSystemInfo(&si); 3698 _vm_page_size = si.dwPageSize; 3699 _vm_allocation_granularity = si.dwAllocationGranularity; 3700 _processor_type = si.dwProcessorType; 3701 _processor_level = si.wProcessorLevel; 3702 set_processor_count(si.dwNumberOfProcessors); 3703 3704 MEMORYSTATUSEX ms; 3705 ms.dwLength = sizeof(ms); 3706 3707 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3708 // dwMemoryLoad (% of memory in use) 3709 GlobalMemoryStatusEx(&ms); 3710 _physical_memory = ms.ullTotalPhys; 3711 3712 OSVERSIONINFOEX oi; 3713 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3714 GetVersionEx((OSVERSIONINFO*)&oi); 3715 switch (oi.dwPlatformId) { 3716 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3717 case VER_PLATFORM_WIN32_NT: 3718 _is_nt = true; 3719 { 3720 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3721 if (os_vers == 5002) { 3722 _is_windows_2003 = true; 3723 } 3724 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3725 oi.wProductType == VER_NT_SERVER) { 3726 _is_windows_server = true; 3727 } 3728 } 3729 break; 3730 default: fatal("Unknown platform"); 3731 } 3732 3733 _default_stack_size = os::current_stack_size(); 3734 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3735 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3736 "stack size not a multiple of page size"); 3737 3738 initialize_performance_counter(); 3739 3740 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3741 // known to deadlock the system, if the VM issues to thread operations with 3742 // a too high frequency, e.g., such as changing the priorities. 3743 // The 6000 seems to work well - no deadlocks has been notices on the test 3744 // programs that we have seen experience this problem. 3745 if (!os::win32::is_nt()) { 3746 StarvationMonitorInterval = 6000; 3747 } 3748 } 3749 3750 3751 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3752 int ebuflen) { 3753 char path[MAX_PATH]; 3754 DWORD size; 3755 DWORD pathLen = (DWORD)sizeof(path); 3756 HINSTANCE result = NULL; 3757 3758 // only allow library name without path component 3759 assert(strchr(name, '\\') == NULL, "path not allowed"); 3760 assert(strchr(name, ':') == NULL, "path not allowed"); 3761 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3762 jio_snprintf(ebuf, ebuflen, 3763 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3764 return NULL; 3765 } 3766 3767 // search system directory 3768 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3769 if (size >= pathLen) { 3770 return NULL; // truncated 3771 } 3772 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3773 return NULL; // truncated 3774 } 3775 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3776 return result; 3777 } 3778 } 3779 3780 // try Windows directory 3781 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3782 if (size >= pathLen) { 3783 return NULL; // truncated 3784 } 3785 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3786 return NULL; // truncated 3787 } 3788 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3789 return result; 3790 } 3791 } 3792 3793 jio_snprintf(ebuf, ebuflen, 3794 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3795 return NULL; 3796 } 3797 3798 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3799 3800 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3801 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3802 return TRUE; 3803 } 3804 3805 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3806 // Basic approach: 3807 // - Each exiting thread registers its intent to exit and then does so. 3808 // - A thread trying to terminate the process must wait for all 3809 // threads currently exiting to complete their exit. 3810 3811 if (os::win32::has_exit_bug()) { 3812 // The array holds handles of the threads that have started exiting by calling 3813 // _endthreadex(). 3814 // Should be large enough to avoid blocking the exiting thread due to lack of 3815 // a free slot. 3816 static HANDLE handles[MAXIMUM_WAIT_OBJECTS]; 3817 static int handle_count = 0; 3818 3819 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3820 static CRITICAL_SECTION crit_sect; 3821 static volatile jint process_exiting = 0; 3822 int i, j; 3823 DWORD res; 3824 HANDLE hproc, hthr; 3825 3826 // The first thread that reached this point, initializes the critical section. 3827 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3828 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3829 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3830 EnterCriticalSection(&crit_sect); 3831 3832 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3833 // Remove from the array those handles of the threads that have completed exiting. 3834 for (i = 0, j = 0; i < handle_count; ++i) { 3835 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3836 if (res == WAIT_TIMEOUT) { 3837 handles[j++] = handles[i]; 3838 } else { 3839 if (res == WAIT_FAILED) { 3840 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3841 GetLastError(), __FILE__, __LINE__); 3842 } 3843 // Don't keep the handle, if we failed waiting for it. 3844 CloseHandle(handles[i]); 3845 } 3846 } 3847 3848 // If there's no free slot in the array of the kept handles, we'll have to 3849 // wait until at least one thread completes exiting. 3850 if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) { 3851 // Raise the priority of the oldest exiting thread to increase its chances 3852 // to complete sooner. 3853 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3854 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3855 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3856 i = (res - WAIT_OBJECT_0); 3857 handle_count = MAXIMUM_WAIT_OBJECTS - 1; 3858 for (; i < handle_count; ++i) { 3859 handles[i] = handles[i + 1]; 3860 } 3861 } else { 3862 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3863 (res == WAIT_FAILED ? "failed" : "timed out"), 3864 GetLastError(), __FILE__, __LINE__); 3865 // Don't keep handles, if we failed waiting for them. 3866 for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) { 3867 CloseHandle(handles[i]); 3868 } 3869 handle_count = 0; 3870 } 3871 } 3872 3873 // Store a duplicate of the current thread handle in the array of handles. 3874 hproc = GetCurrentProcess(); 3875 hthr = GetCurrentThread(); 3876 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3877 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3878 warning("DuplicateHandle failed (%u) in %s: %d\n", 3879 GetLastError(), __FILE__, __LINE__); 3880 } else { 3881 ++handle_count; 3882 } 3883 3884 // The current exiting thread has stored its handle in the array, and now 3885 // should leave the critical section before calling _endthreadex(). 3886 3887 } else if (what != EPT_THREAD) { 3888 if (handle_count > 0) { 3889 // Before ending the process, make sure all the threads that had called 3890 // _endthreadex() completed. 3891 3892 // Set the priority level of the current thread to the same value as 3893 // the priority level of exiting threads. 3894 // This is to ensure it will be given a fair chance to execute if 3895 // the timeout expires. 3896 hthr = GetCurrentThread(); 3897 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3898 for (i = 0; i < handle_count; ++i) { 3899 SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL); 3900 } 3901 res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT); 3902 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3903 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3904 (res == WAIT_FAILED ? "failed" : "timed out"), 3905 GetLastError(), __FILE__, __LINE__); 3906 } 3907 for (i = 0; i < handle_count; ++i) { 3908 CloseHandle(handles[i]); 3909 } 3910 handle_count = 0; 3911 } 3912 3913 OrderAccess::release_store(&process_exiting, 1); 3914 } 3915 3916 LeaveCriticalSection(&crit_sect); 3917 } 3918 3919 if (what == EPT_THREAD) { 3920 while (OrderAccess::load_acquire(&process_exiting) != 0) { 3921 // Some other thread is about to call exit(), so we 3922 // don't let the current thread proceed to _endthreadex() 3923 SuspendThread(GetCurrentThread()); 3924 // Avoid busy-wait loop, if SuspendThread() failed. 3925 Sleep(EXIT_TIMEOUT); 3926 } 3927 } 3928 } 3929 3930 // We are here if either 3931 // - there's no 'race at exit' bug on this OS release; 3932 // - initialization of the critical section failed (unlikely); 3933 // - the current thread has stored its handle and left the critical section; 3934 // - the process-exiting thread has raised the flag and left the critical section. 3935 if (what == EPT_THREAD) { 3936 _endthreadex((unsigned)exit_code); 3937 } else if (what == EPT_PROCESS) { 3938 ::exit(exit_code); 3939 } else { 3940 _exit(exit_code); 3941 } 3942 3943 // Should not reach here 3944 return exit_code; 3945 } 3946 3947 #undef EXIT_TIMEOUT 3948 3949 void os::win32::setmode_streams() { 3950 _setmode(_fileno(stdin), _O_BINARY); 3951 _setmode(_fileno(stdout), _O_BINARY); 3952 _setmode(_fileno(stderr), _O_BINARY); 3953 } 3954 3955 3956 bool os::is_debugger_attached() { 3957 return IsDebuggerPresent() ? true : false; 3958 } 3959 3960 3961 void os::wait_for_keypress_at_exit(void) { 3962 if (PauseAtExit) { 3963 fprintf(stderr, "Press any key to continue...\n"); 3964 fgetc(stdin); 3965 } 3966 } 3967 3968 3969 int os::message_box(const char* title, const char* message) { 3970 int result = MessageBox(NULL, message, title, 3971 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3972 return result == IDYES; 3973 } 3974 3975 int os::allocate_thread_local_storage() { 3976 return TlsAlloc(); 3977 } 3978 3979 3980 void os::free_thread_local_storage(int index) { 3981 TlsFree(index); 3982 } 3983 3984 3985 void os::thread_local_storage_at_put(int index, void* value) { 3986 TlsSetValue(index, value); 3987 assert(thread_local_storage_at(index) == value, "Just checking"); 3988 } 3989 3990 3991 void* os::thread_local_storage_at(int index) { 3992 return TlsGetValue(index); 3993 } 3994 3995 3996 #ifndef PRODUCT 3997 #ifndef _WIN64 3998 // Helpers to check whether NX protection is enabled 3999 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 4000 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 4001 pex->ExceptionRecord->NumberParameters > 0 && 4002 pex->ExceptionRecord->ExceptionInformation[0] == 4003 EXCEPTION_INFO_EXEC_VIOLATION) { 4004 return EXCEPTION_EXECUTE_HANDLER; 4005 } 4006 return EXCEPTION_CONTINUE_SEARCH; 4007 } 4008 4009 void nx_check_protection() { 4010 // If NX is enabled we'll get an exception calling into code on the stack 4011 char code[] = { (char)0xC3 }; // ret 4012 void *code_ptr = (void *)code; 4013 __try { 4014 __asm call code_ptr 4015 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4016 tty->print_raw_cr("NX protection detected."); 4017 } 4018 } 4019 #endif // _WIN64 4020 #endif // PRODUCT 4021 4022 // this is called _before_ the global arguments have been parsed 4023 void os::init(void) { 4024 _initial_pid = _getpid(); 4025 4026 init_random(1234567); 4027 4028 win32::initialize_system_info(); 4029 win32::setmode_streams(); 4030 init_page_sizes((size_t) win32::vm_page_size()); 4031 4032 // This may be overridden later when argument processing is done. 4033 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 4034 os::win32::is_windows_2003()); 4035 4036 // Initialize main_process and main_thread 4037 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4038 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4039 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4040 fatal("DuplicateHandle failed\n"); 4041 } 4042 main_thread_id = (int) GetCurrentThreadId(); 4043 } 4044 4045 // To install functions for atexit processing 4046 extern "C" { 4047 static void perfMemory_exit_helper() { 4048 perfMemory_exit(); 4049 } 4050 } 4051 4052 static jint initSock(); 4053 4054 // this is called _after_ the global arguments have been parsed 4055 jint os::init_2(void) { 4056 // Allocate a single page and mark it as readable for safepoint polling 4057 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4058 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4059 4060 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4061 guarantee(return_page != NULL, "Commit Failed for polling page"); 4062 4063 os::set_polling_page(polling_page); 4064 4065 #ifndef PRODUCT 4066 if (Verbose && PrintMiscellaneous) { 4067 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4068 (intptr_t)polling_page); 4069 } 4070 #endif 4071 4072 if (!UseMembar) { 4073 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4074 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4075 4076 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4077 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4078 4079 os::set_memory_serialize_page(mem_serialize_page); 4080 4081 #ifndef PRODUCT 4082 if (Verbose && PrintMiscellaneous) { 4083 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4084 (intptr_t)mem_serialize_page); 4085 } 4086 #endif 4087 } 4088 4089 // Setup Windows Exceptions 4090 4091 // for debugging float code generation bugs 4092 if (ForceFloatExceptions) { 4093 #ifndef _WIN64 4094 static long fp_control_word = 0; 4095 __asm { fstcw fp_control_word } 4096 // see Intel PPro Manual, Vol. 2, p 7-16 4097 const long precision = 0x20; 4098 const long underflow = 0x10; 4099 const long overflow = 0x08; 4100 const long zero_div = 0x04; 4101 const long denorm = 0x02; 4102 const long invalid = 0x01; 4103 fp_control_word |= invalid; 4104 __asm { fldcw fp_control_word } 4105 #endif 4106 } 4107 4108 // If stack_commit_size is 0, windows will reserve the default size, 4109 // but only commit a small portion of it. 4110 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4111 size_t default_reserve_size = os::win32::default_stack_size(); 4112 size_t actual_reserve_size = stack_commit_size; 4113 if (stack_commit_size < default_reserve_size) { 4114 // If stack_commit_size == 0, we want this too 4115 actual_reserve_size = default_reserve_size; 4116 } 4117 4118 // Check minimum allowable stack size for thread creation and to initialize 4119 // the java system classes, including StackOverflowError - depends on page 4120 // size. Add a page for compiler2 recursion in main thread. 4121 // Add in 2*BytesPerWord times page size to account for VM stack during 4122 // class initialization depending on 32 or 64 bit VM. 4123 size_t min_stack_allowed = 4124 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 4125 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 4126 if (actual_reserve_size < min_stack_allowed) { 4127 tty->print_cr("\nThe stack size specified is too small, " 4128 "Specify at least %dk", 4129 min_stack_allowed / K); 4130 return JNI_ERR; 4131 } 4132 4133 JavaThread::set_stack_size_at_create(stack_commit_size); 4134 4135 // Calculate theoretical max. size of Threads to guard gainst artifical 4136 // out-of-memory situations, where all available address-space has been 4137 // reserved by thread stacks. 4138 assert(actual_reserve_size != 0, "Must have a stack"); 4139 4140 // Calculate the thread limit when we should start doing Virtual Memory 4141 // banging. Currently when the threads will have used all but 200Mb of space. 4142 // 4143 // TODO: consider performing a similar calculation for commit size instead 4144 // as reserve size, since on a 64-bit platform we'll run into that more 4145 // often than running out of virtual memory space. We can use the 4146 // lower value of the two calculations as the os_thread_limit. 4147 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4148 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4149 4150 // at exit methods are called in the reverse order of their registration. 4151 // there is no limit to the number of functions registered. atexit does 4152 // not set errno. 4153 4154 if (PerfAllowAtExitRegistration) { 4155 // only register atexit functions if PerfAllowAtExitRegistration is set. 4156 // atexit functions can be delayed until process exit time, which 4157 // can be problematic for embedded VM situations. Embedded VMs should 4158 // call DestroyJavaVM() to assure that VM resources are released. 4159 4160 // note: perfMemory_exit_helper atexit function may be removed in 4161 // the future if the appropriate cleanup code can be added to the 4162 // VM_Exit VMOperation's doit method. 4163 if (atexit(perfMemory_exit_helper) != 0) { 4164 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4165 } 4166 } 4167 4168 #ifndef _WIN64 4169 // Print something if NX is enabled (win32 on AMD64) 4170 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4171 #endif 4172 4173 // initialize thread priority policy 4174 prio_init(); 4175 4176 if (UseNUMA && !ForceNUMA) { 4177 UseNUMA = false; // We don't fully support this yet 4178 } 4179 4180 if (UseNUMAInterleaving) { 4181 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4182 bool success = numa_interleaving_init(); 4183 if (!success) UseNUMAInterleaving = false; 4184 } 4185 4186 if (initSock() != JNI_OK) { 4187 return JNI_ERR; 4188 } 4189 4190 return JNI_OK; 4191 } 4192 4193 // Mark the polling page as unreadable 4194 void os::make_polling_page_unreadable(void) { 4195 DWORD old_status; 4196 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4197 PAGE_NOACCESS, &old_status)) { 4198 fatal("Could not disable polling page"); 4199 } 4200 } 4201 4202 // Mark the polling page as readable 4203 void os::make_polling_page_readable(void) { 4204 DWORD old_status; 4205 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4206 PAGE_READONLY, &old_status)) { 4207 fatal("Could not enable polling page"); 4208 } 4209 } 4210 4211 4212 int os::stat(const char *path, struct stat *sbuf) { 4213 char pathbuf[MAX_PATH]; 4214 if (strlen(path) > MAX_PATH - 1) { 4215 errno = ENAMETOOLONG; 4216 return -1; 4217 } 4218 os::native_path(strcpy(pathbuf, path)); 4219 int ret = ::stat(pathbuf, sbuf); 4220 if (sbuf != NULL && UseUTCFileTimestamp) { 4221 // Fix for 6539723. st_mtime returned from stat() is dependent on 4222 // the system timezone and so can return different values for the 4223 // same file if/when daylight savings time changes. This adjustment 4224 // makes sure the same timestamp is returned regardless of the TZ. 4225 // 4226 // See: 4227 // http://msdn.microsoft.com/library/ 4228 // default.asp?url=/library/en-us/sysinfo/base/ 4229 // time_zone_information_str.asp 4230 // and 4231 // http://msdn.microsoft.com/library/default.asp?url= 4232 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4233 // 4234 // NOTE: there is a insidious bug here: If the timezone is changed 4235 // after the call to stat() but before 'GetTimeZoneInformation()', then 4236 // the adjustment we do here will be wrong and we'll return the wrong 4237 // value (which will likely end up creating an invalid class data 4238 // archive). Absent a better API for this, or some time zone locking 4239 // mechanism, we'll have to live with this risk. 4240 TIME_ZONE_INFORMATION tz; 4241 DWORD tzid = GetTimeZoneInformation(&tz); 4242 int daylightBias = 4243 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4244 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4245 } 4246 return ret; 4247 } 4248 4249 4250 #define FT2INT64(ft) \ 4251 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4252 4253 4254 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4255 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4256 // of a thread. 4257 // 4258 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4259 // the fast estimate available on the platform. 4260 4261 // current_thread_cpu_time() is not optimized for Windows yet 4262 jlong os::current_thread_cpu_time() { 4263 // return user + sys since the cost is the same 4264 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4265 } 4266 4267 jlong os::thread_cpu_time(Thread* thread) { 4268 // consistent with what current_thread_cpu_time() returns. 4269 return os::thread_cpu_time(thread, true /* user+sys */); 4270 } 4271 4272 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4273 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4274 } 4275 4276 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4277 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4278 // If this function changes, os::is_thread_cpu_time_supported() should too 4279 if (os::win32::is_nt()) { 4280 FILETIME CreationTime; 4281 FILETIME ExitTime; 4282 FILETIME KernelTime; 4283 FILETIME UserTime; 4284 4285 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4286 &ExitTime, &KernelTime, &UserTime) == 0) { 4287 return -1; 4288 } else if (user_sys_cpu_time) { 4289 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4290 } else { 4291 return FT2INT64(UserTime) * 100; 4292 } 4293 } else { 4294 return (jlong) timeGetTime() * 1000000; 4295 } 4296 } 4297 4298 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4299 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4300 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4301 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4302 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4303 } 4304 4305 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4306 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4307 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4308 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4309 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4310 } 4311 4312 bool os::is_thread_cpu_time_supported() { 4313 // see os::thread_cpu_time 4314 if (os::win32::is_nt()) { 4315 FILETIME CreationTime; 4316 FILETIME ExitTime; 4317 FILETIME KernelTime; 4318 FILETIME UserTime; 4319 4320 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4321 &KernelTime, &UserTime) == 0) { 4322 return false; 4323 } else { 4324 return true; 4325 } 4326 } else { 4327 return false; 4328 } 4329 } 4330 4331 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4332 // It does have primitives (PDH API) to get CPU usage and run queue length. 4333 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4334 // If we wanted to implement loadavg on Windows, we have a few options: 4335 // 4336 // a) Query CPU usage and run queue length and "fake" an answer by 4337 // returning the CPU usage if it's under 100%, and the run queue 4338 // length otherwise. It turns out that querying is pretty slow 4339 // on Windows, on the order of 200 microseconds on a fast machine. 4340 // Note that on the Windows the CPU usage value is the % usage 4341 // since the last time the API was called (and the first call 4342 // returns 100%), so we'd have to deal with that as well. 4343 // 4344 // b) Sample the "fake" answer using a sampling thread and store 4345 // the answer in a global variable. The call to loadavg would 4346 // just return the value of the global, avoiding the slow query. 4347 // 4348 // c) Sample a better answer using exponential decay to smooth the 4349 // value. This is basically the algorithm used by UNIX kernels. 4350 // 4351 // Note that sampling thread starvation could affect both (b) and (c). 4352 int os::loadavg(double loadavg[], int nelem) { 4353 return -1; 4354 } 4355 4356 4357 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4358 bool os::dont_yield() { 4359 return DontYieldALot; 4360 } 4361 4362 // This method is a slightly reworked copy of JDK's sysOpen 4363 // from src/windows/hpi/src/sys_api_md.c 4364 4365 int os::open(const char *path, int oflag, int mode) { 4366 char pathbuf[MAX_PATH]; 4367 4368 if (strlen(path) > MAX_PATH - 1) { 4369 errno = ENAMETOOLONG; 4370 return -1; 4371 } 4372 os::native_path(strcpy(pathbuf, path)); 4373 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4374 } 4375 4376 FILE* os::open(int fd, const char* mode) { 4377 return ::_fdopen(fd, mode); 4378 } 4379 4380 // Is a (classpath) directory empty? 4381 bool os::dir_is_empty(const char* path) { 4382 WIN32_FIND_DATA fd; 4383 HANDLE f = FindFirstFile(path, &fd); 4384 if (f == INVALID_HANDLE_VALUE) { 4385 return true; 4386 } 4387 FindClose(f); 4388 return false; 4389 } 4390 4391 // create binary file, rewriting existing file if required 4392 int os::create_binary_file(const char* path, bool rewrite_existing) { 4393 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4394 if (!rewrite_existing) { 4395 oflags |= _O_EXCL; 4396 } 4397 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4398 } 4399 4400 // return current position of file pointer 4401 jlong os::current_file_offset(int fd) { 4402 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4403 } 4404 4405 // move file pointer to the specified offset 4406 jlong os::seek_to_file_offset(int fd, jlong offset) { 4407 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4408 } 4409 4410 4411 jlong os::lseek(int fd, jlong offset, int whence) { 4412 return (jlong) ::_lseeki64(fd, offset, whence); 4413 } 4414 4415 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4416 OVERLAPPED ov; 4417 DWORD nread; 4418 BOOL result; 4419 4420 ZeroMemory(&ov, sizeof(ov)); 4421 ov.Offset = (DWORD)offset; 4422 ov.OffsetHigh = (DWORD)(offset >> 32); 4423 4424 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4425 4426 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4427 4428 return result ? nread : 0; 4429 } 4430 4431 4432 // This method is a slightly reworked copy of JDK's sysNativePath 4433 // from src/windows/hpi/src/path_md.c 4434 4435 // Convert a pathname to native format. On win32, this involves forcing all 4436 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4437 // sometimes rejects '/') and removing redundant separators. The input path is 4438 // assumed to have been converted into the character encoding used by the local 4439 // system. Because this might be a double-byte encoding, care is taken to 4440 // treat double-byte lead characters correctly. 4441 // 4442 // This procedure modifies the given path in place, as the result is never 4443 // longer than the original. There is no error return; this operation always 4444 // succeeds. 4445 char * os::native_path(char *path) { 4446 char *src = path, *dst = path, *end = path; 4447 char *colon = NULL; // If a drive specifier is found, this will 4448 // point to the colon following the drive letter 4449 4450 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4451 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4452 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4453 4454 // Check for leading separators 4455 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4456 while (isfilesep(*src)) { 4457 src++; 4458 } 4459 4460 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4461 // Remove leading separators if followed by drive specifier. This 4462 // hack is necessary to support file URLs containing drive 4463 // specifiers (e.g., "file://c:/path"). As a side effect, 4464 // "/c:/path" can be used as an alternative to "c:/path". 4465 *dst++ = *src++; 4466 colon = dst; 4467 *dst++ = ':'; 4468 src++; 4469 } else { 4470 src = path; 4471 if (isfilesep(src[0]) && isfilesep(src[1])) { 4472 // UNC pathname: Retain first separator; leave src pointed at 4473 // second separator so that further separators will be collapsed 4474 // into the second separator. The result will be a pathname 4475 // beginning with "\\\\" followed (most likely) by a host name. 4476 src = dst = path + 1; 4477 path[0] = '\\'; // Force first separator to '\\' 4478 } 4479 } 4480 4481 end = dst; 4482 4483 // Remove redundant separators from remainder of path, forcing all 4484 // separators to be '\\' rather than '/'. Also, single byte space 4485 // characters are removed from the end of the path because those 4486 // are not legal ending characters on this operating system. 4487 // 4488 while (*src != '\0') { 4489 if (isfilesep(*src)) { 4490 *dst++ = '\\'; src++; 4491 while (isfilesep(*src)) src++; 4492 if (*src == '\0') { 4493 // Check for trailing separator 4494 end = dst; 4495 if (colon == dst - 2) break; // "z:\\" 4496 if (dst == path + 1) break; // "\\" 4497 if (dst == path + 2 && isfilesep(path[0])) { 4498 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4499 // beginning of a UNC pathname. Even though it is not, by 4500 // itself, a valid UNC pathname, we leave it as is in order 4501 // to be consistent with the path canonicalizer as well 4502 // as the win32 APIs, which treat this case as an invalid 4503 // UNC pathname rather than as an alias for the root 4504 // directory of the current drive. 4505 break; 4506 } 4507 end = --dst; // Path does not denote a root directory, so 4508 // remove trailing separator 4509 break; 4510 } 4511 end = dst; 4512 } else { 4513 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4514 *dst++ = *src++; 4515 if (*src) *dst++ = *src++; 4516 end = dst; 4517 } else { // Copy a single-byte character 4518 char c = *src++; 4519 *dst++ = c; 4520 // Space is not a legal ending character 4521 if (c != ' ') end = dst; 4522 } 4523 } 4524 } 4525 4526 *end = '\0'; 4527 4528 // For "z:", add "." to work around a bug in the C runtime library 4529 if (colon == dst - 1) { 4530 path[2] = '.'; 4531 path[3] = '\0'; 4532 } 4533 4534 return path; 4535 } 4536 4537 // This code is a copy of JDK's sysSetLength 4538 // from src/windows/hpi/src/sys_api_md.c 4539 4540 int os::ftruncate(int fd, jlong length) { 4541 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4542 long high = (long)(length >> 32); 4543 DWORD ret; 4544 4545 if (h == (HANDLE)(-1)) { 4546 return -1; 4547 } 4548 4549 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4550 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4551 return -1; 4552 } 4553 4554 if (::SetEndOfFile(h) == FALSE) { 4555 return -1; 4556 } 4557 4558 return 0; 4559 } 4560 4561 4562 // This code is a copy of JDK's sysSync 4563 // from src/windows/hpi/src/sys_api_md.c 4564 // except for the legacy workaround for a bug in Win 98 4565 4566 int os::fsync(int fd) { 4567 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4568 4569 if ((!::FlushFileBuffers(handle)) && 4570 (GetLastError() != ERROR_ACCESS_DENIED)) { 4571 // from winerror.h 4572 return -1; 4573 } 4574 return 0; 4575 } 4576 4577 static int nonSeekAvailable(int, long *); 4578 static int stdinAvailable(int, long *); 4579 4580 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4581 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4582 4583 // This code is a copy of JDK's sysAvailable 4584 // from src/windows/hpi/src/sys_api_md.c 4585 4586 int os::available(int fd, jlong *bytes) { 4587 jlong cur, end; 4588 struct _stati64 stbuf64; 4589 4590 if (::_fstati64(fd, &stbuf64) >= 0) { 4591 int mode = stbuf64.st_mode; 4592 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4593 int ret; 4594 long lpbytes; 4595 if (fd == 0) { 4596 ret = stdinAvailable(fd, &lpbytes); 4597 } else { 4598 ret = nonSeekAvailable(fd, &lpbytes); 4599 } 4600 (*bytes) = (jlong)(lpbytes); 4601 return ret; 4602 } 4603 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4604 return FALSE; 4605 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4606 return FALSE; 4607 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4608 return FALSE; 4609 } 4610 *bytes = end - cur; 4611 return TRUE; 4612 } else { 4613 return FALSE; 4614 } 4615 } 4616 4617 // This code is a copy of JDK's nonSeekAvailable 4618 // from src/windows/hpi/src/sys_api_md.c 4619 4620 static int nonSeekAvailable(int fd, long *pbytes) { 4621 // This is used for available on non-seekable devices 4622 // (like both named and anonymous pipes, such as pipes 4623 // connected to an exec'd process). 4624 // Standard Input is a special case. 4625 HANDLE han; 4626 4627 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4628 return FALSE; 4629 } 4630 4631 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4632 // PeekNamedPipe fails when at EOF. In that case we 4633 // simply make *pbytes = 0 which is consistent with the 4634 // behavior we get on Solaris when an fd is at EOF. 4635 // The only alternative is to raise an Exception, 4636 // which isn't really warranted. 4637 // 4638 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4639 return FALSE; 4640 } 4641 *pbytes = 0; 4642 } 4643 return TRUE; 4644 } 4645 4646 #define MAX_INPUT_EVENTS 2000 4647 4648 // This code is a copy of JDK's stdinAvailable 4649 // from src/windows/hpi/src/sys_api_md.c 4650 4651 static int stdinAvailable(int fd, long *pbytes) { 4652 HANDLE han; 4653 DWORD numEventsRead = 0; // Number of events read from buffer 4654 DWORD numEvents = 0; // Number of events in buffer 4655 DWORD i = 0; // Loop index 4656 DWORD curLength = 0; // Position marker 4657 DWORD actualLength = 0; // Number of bytes readable 4658 BOOL error = FALSE; // Error holder 4659 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4660 4661 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4662 return FALSE; 4663 } 4664 4665 // Construct an array of input records in the console buffer 4666 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4667 if (error == 0) { 4668 return nonSeekAvailable(fd, pbytes); 4669 } 4670 4671 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4672 if (numEvents > MAX_INPUT_EVENTS) { 4673 numEvents = MAX_INPUT_EVENTS; 4674 } 4675 4676 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4677 if (lpBuffer == NULL) { 4678 return FALSE; 4679 } 4680 4681 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4682 if (error == 0) { 4683 os::free(lpBuffer); 4684 return FALSE; 4685 } 4686 4687 // Examine input records for the number of bytes available 4688 for (i=0; i<numEvents; i++) { 4689 if (lpBuffer[i].EventType == KEY_EVENT) { 4690 4691 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4692 &(lpBuffer[i].Event); 4693 if (keyRecord->bKeyDown == TRUE) { 4694 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4695 curLength++; 4696 if (*keyPressed == '\r') { 4697 actualLength = curLength; 4698 } 4699 } 4700 } 4701 } 4702 4703 if (lpBuffer != NULL) { 4704 os::free(lpBuffer); 4705 } 4706 4707 *pbytes = (long) actualLength; 4708 return TRUE; 4709 } 4710 4711 // Map a block of memory. 4712 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4713 char *addr, size_t bytes, bool read_only, 4714 bool allow_exec) { 4715 HANDLE hFile; 4716 char* base; 4717 4718 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4719 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4720 if (hFile == NULL) { 4721 if (PrintMiscellaneous && Verbose) { 4722 DWORD err = GetLastError(); 4723 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4724 } 4725 return NULL; 4726 } 4727 4728 if (allow_exec) { 4729 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4730 // unless it comes from a PE image (which the shared archive is not.) 4731 // Even VirtualProtect refuses to give execute access to mapped memory 4732 // that was not previously executable. 4733 // 4734 // Instead, stick the executable region in anonymous memory. Yuck. 4735 // Penalty is that ~4 pages will not be shareable - in the future 4736 // we might consider DLLizing the shared archive with a proper PE 4737 // header so that mapping executable + sharing is possible. 4738 4739 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4740 PAGE_READWRITE); 4741 if (base == NULL) { 4742 if (PrintMiscellaneous && Verbose) { 4743 DWORD err = GetLastError(); 4744 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4745 } 4746 CloseHandle(hFile); 4747 return NULL; 4748 } 4749 4750 DWORD bytes_read; 4751 OVERLAPPED overlapped; 4752 overlapped.Offset = (DWORD)file_offset; 4753 overlapped.OffsetHigh = 0; 4754 overlapped.hEvent = NULL; 4755 // ReadFile guarantees that if the return value is true, the requested 4756 // number of bytes were read before returning. 4757 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4758 if (!res) { 4759 if (PrintMiscellaneous && Verbose) { 4760 DWORD err = GetLastError(); 4761 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4762 } 4763 release_memory(base, bytes); 4764 CloseHandle(hFile); 4765 return NULL; 4766 } 4767 } else { 4768 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4769 NULL /* file_name */); 4770 if (hMap == NULL) { 4771 if (PrintMiscellaneous && Verbose) { 4772 DWORD err = GetLastError(); 4773 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4774 } 4775 CloseHandle(hFile); 4776 return NULL; 4777 } 4778 4779 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4780 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4781 (DWORD)bytes, addr); 4782 if (base == NULL) { 4783 if (PrintMiscellaneous && Verbose) { 4784 DWORD err = GetLastError(); 4785 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4786 } 4787 CloseHandle(hMap); 4788 CloseHandle(hFile); 4789 return NULL; 4790 } 4791 4792 if (CloseHandle(hMap) == 0) { 4793 if (PrintMiscellaneous && Verbose) { 4794 DWORD err = GetLastError(); 4795 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4796 } 4797 CloseHandle(hFile); 4798 return base; 4799 } 4800 } 4801 4802 if (allow_exec) { 4803 DWORD old_protect; 4804 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4805 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4806 4807 if (!res) { 4808 if (PrintMiscellaneous && Verbose) { 4809 DWORD err = GetLastError(); 4810 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4811 } 4812 // Don't consider this a hard error, on IA32 even if the 4813 // VirtualProtect fails, we should still be able to execute 4814 CloseHandle(hFile); 4815 return base; 4816 } 4817 } 4818 4819 if (CloseHandle(hFile) == 0) { 4820 if (PrintMiscellaneous && Verbose) { 4821 DWORD err = GetLastError(); 4822 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4823 } 4824 return base; 4825 } 4826 4827 return base; 4828 } 4829 4830 4831 // Remap a block of memory. 4832 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4833 char *addr, size_t bytes, bool read_only, 4834 bool allow_exec) { 4835 // This OS does not allow existing memory maps to be remapped so we 4836 // have to unmap the memory before we remap it. 4837 if (!os::unmap_memory(addr, bytes)) { 4838 return NULL; 4839 } 4840 4841 // There is a very small theoretical window between the unmap_memory() 4842 // call above and the map_memory() call below where a thread in native 4843 // code may be able to access an address that is no longer mapped. 4844 4845 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4846 read_only, allow_exec); 4847 } 4848 4849 4850 // Unmap a block of memory. 4851 // Returns true=success, otherwise false. 4852 4853 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4854 BOOL result = UnmapViewOfFile(addr); 4855 if (result == 0) { 4856 if (PrintMiscellaneous && Verbose) { 4857 DWORD err = GetLastError(); 4858 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4859 } 4860 return false; 4861 } 4862 return true; 4863 } 4864 4865 void os::pause() { 4866 char filename[MAX_PATH]; 4867 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4868 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4869 } else { 4870 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4871 } 4872 4873 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4874 if (fd != -1) { 4875 struct stat buf; 4876 ::close(fd); 4877 while (::stat(filename, &buf) == 0) { 4878 Sleep(100); 4879 } 4880 } else { 4881 jio_fprintf(stderr, 4882 "Could not open pause file '%s', continuing immediately.\n", filename); 4883 } 4884 } 4885 4886 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4887 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4888 } 4889 4890 // See the caveats for this class in os_windows.hpp 4891 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4892 // into this method and returns false. If no OS EXCEPTION was raised, returns 4893 // true. 4894 // The callback is supposed to provide the method that should be protected. 4895 // 4896 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4897 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4898 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4899 "crash_protection already set?"); 4900 4901 bool success = true; 4902 __try { 4903 WatcherThread::watcher_thread()->set_crash_protection(this); 4904 cb.call(); 4905 } __except(EXCEPTION_EXECUTE_HANDLER) { 4906 // only for protection, nothing to do 4907 success = false; 4908 } 4909 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4910 return success; 4911 } 4912 4913 // An Event wraps a win32 "CreateEvent" kernel handle. 4914 // 4915 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4916 // 4917 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4918 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4919 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4920 // In addition, an unpark() operation might fetch the handle field, but the 4921 // event could recycle between the fetch and the SetEvent() operation. 4922 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4923 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4924 // on an stale but recycled handle would be harmless, but in practice this might 4925 // confuse other non-Sun code, so it's not a viable approach. 4926 // 4927 // 2: Once a win32 event handle is associated with an Event, it remains associated 4928 // with the Event. The event handle is never closed. This could be construed 4929 // as handle leakage, but only up to the maximum # of threads that have been extant 4930 // at any one time. This shouldn't be an issue, as windows platforms typically 4931 // permit a process to have hundreds of thousands of open handles. 4932 // 4933 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4934 // and release unused handles. 4935 // 4936 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4937 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4938 // 4939 // 5. Use an RCU-like mechanism (Read-Copy Update). 4940 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4941 // 4942 // We use (2). 4943 // 4944 // TODO-FIXME: 4945 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4946 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4947 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4948 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4949 // into a single win32 CreateEvent() handle. 4950 // 4951 // Assumption: 4952 // Only one parker can exist on an event, which is why we allocate 4953 // them per-thread. Multiple unparkers can coexist. 4954 // 4955 // _Event transitions in park() 4956 // -1 => -1 : illegal 4957 // 1 => 0 : pass - return immediately 4958 // 0 => -1 : block; then set _Event to 0 before returning 4959 // 4960 // _Event transitions in unpark() 4961 // 0 => 1 : just return 4962 // 1 => 1 : just return 4963 // -1 => either 0 or 1; must signal target thread 4964 // That is, we can safely transition _Event from -1 to either 4965 // 0 or 1. 4966 // 4967 // _Event serves as a restricted-range semaphore. 4968 // -1 : thread is blocked, i.e. there is a waiter 4969 // 0 : neutral: thread is running or ready, 4970 // could have been signaled after a wait started 4971 // 1 : signaled - thread is running or ready 4972 // 4973 // Another possible encoding of _Event would be with 4974 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4975 // 4976 4977 int os::PlatformEvent::park(jlong Millis) { 4978 // Transitions for _Event: 4979 // -1 => -1 : illegal 4980 // 1 => 0 : pass - return immediately 4981 // 0 => -1 : block; then set _Event to 0 before returning 4982 4983 guarantee(_ParkHandle != NULL , "Invariant"); 4984 guarantee(Millis > 0 , "Invariant"); 4985 4986 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4987 // the initial park() operation. 4988 // Consider: use atomic decrement instead of CAS-loop 4989 4990 int v; 4991 for (;;) { 4992 v = _Event; 4993 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4994 } 4995 guarantee((v == 0) || (v == 1), "invariant"); 4996 if (v != 0) return OS_OK; 4997 4998 // Do this the hard way by blocking ... 4999 // TODO: consider a brief spin here, gated on the success of recent 5000 // spin attempts by this thread. 5001 // 5002 // We decompose long timeouts into series of shorter timed waits. 5003 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5004 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5005 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5006 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5007 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5008 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5009 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5010 // for the already waited time. This policy does not admit any new outcomes. 5011 // In the future, however, we might want to track the accumulated wait time and 5012 // adjust Millis accordingly if we encounter a spurious wakeup. 5013 5014 const int MAXTIMEOUT = 0x10000000; 5015 DWORD rv = WAIT_TIMEOUT; 5016 while (_Event < 0 && Millis > 0) { 5017 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5018 if (Millis > MAXTIMEOUT) { 5019 prd = MAXTIMEOUT; 5020 } 5021 rv = ::WaitForSingleObject(_ParkHandle, prd); 5022 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5023 if (rv == WAIT_TIMEOUT) { 5024 Millis -= prd; 5025 } 5026 } 5027 v = _Event; 5028 _Event = 0; 5029 // see comment at end of os::PlatformEvent::park() below: 5030 OrderAccess::fence(); 5031 // If we encounter a nearly simultanous timeout expiry and unpark() 5032 // we return OS_OK indicating we awoke via unpark(). 5033 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5034 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5035 } 5036 5037 void os::PlatformEvent::park() { 5038 // Transitions for _Event: 5039 // -1 => -1 : illegal 5040 // 1 => 0 : pass - return immediately 5041 // 0 => -1 : block; then set _Event to 0 before returning 5042 5043 guarantee(_ParkHandle != NULL, "Invariant"); 5044 // Invariant: Only the thread associated with the Event/PlatformEvent 5045 // may call park(). 5046 // Consider: use atomic decrement instead of CAS-loop 5047 int v; 5048 for (;;) { 5049 v = _Event; 5050 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5051 } 5052 guarantee((v == 0) || (v == 1), "invariant"); 5053 if (v != 0) return; 5054 5055 // Do this the hard way by blocking ... 5056 // TODO: consider a brief spin here, gated on the success of recent 5057 // spin attempts by this thread. 5058 while (_Event < 0) { 5059 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5060 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5061 } 5062 5063 // Usually we'll find _Event == 0 at this point, but as 5064 // an optional optimization we clear it, just in case can 5065 // multiple unpark() operations drove _Event up to 1. 5066 _Event = 0; 5067 OrderAccess::fence(); 5068 guarantee(_Event >= 0, "invariant"); 5069 } 5070 5071 void os::PlatformEvent::unpark() { 5072 guarantee(_ParkHandle != NULL, "Invariant"); 5073 5074 // Transitions for _Event: 5075 // 0 => 1 : just return 5076 // 1 => 1 : just return 5077 // -1 => either 0 or 1; must signal target thread 5078 // That is, we can safely transition _Event from -1 to either 5079 // 0 or 1. 5080 // See also: "Semaphores in Plan 9" by Mullender & Cox 5081 // 5082 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5083 // that it will take two back-to-back park() calls for the owning 5084 // thread to block. This has the benefit of forcing a spurious return 5085 // from the first park() call after an unpark() call which will help 5086 // shake out uses of park() and unpark() without condition variables. 5087 5088 if (Atomic::xchg(1, &_Event) >= 0) return; 5089 5090 ::SetEvent(_ParkHandle); 5091 } 5092 5093 5094 // JSR166 5095 // ------------------------------------------------------- 5096 5097 // The Windows implementation of Park is very straightforward: Basic 5098 // operations on Win32 Events turn out to have the right semantics to 5099 // use them directly. We opportunistically resuse the event inherited 5100 // from Monitor. 5101 5102 void Parker::park(bool isAbsolute, jlong time) { 5103 guarantee(_ParkEvent != NULL, "invariant"); 5104 // First, demultiplex/decode time arguments 5105 if (time < 0) { // don't wait 5106 return; 5107 } else if (time == 0 && !isAbsolute) { 5108 time = INFINITE; 5109 } else if (isAbsolute) { 5110 time -= os::javaTimeMillis(); // convert to relative time 5111 if (time <= 0) { // already elapsed 5112 return; 5113 } 5114 } else { // relative 5115 time /= 1000000; // Must coarsen from nanos to millis 5116 if (time == 0) { // Wait for the minimal time unit if zero 5117 time = 1; 5118 } 5119 } 5120 5121 JavaThread* thread = (JavaThread*)(Thread::current()); 5122 assert(thread->is_Java_thread(), "Must be JavaThread"); 5123 JavaThread *jt = (JavaThread *)thread; 5124 5125 // Don't wait if interrupted or already triggered 5126 if (Thread::is_interrupted(thread, false) || 5127 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5128 ResetEvent(_ParkEvent); 5129 return; 5130 } else { 5131 ThreadBlockInVM tbivm(jt); 5132 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5133 jt->set_suspend_equivalent(); 5134 5135 WaitForSingleObject(_ParkEvent, time); 5136 ResetEvent(_ParkEvent); 5137 5138 // If externally suspended while waiting, re-suspend 5139 if (jt->handle_special_suspend_equivalent_condition()) { 5140 jt->java_suspend_self(); 5141 } 5142 } 5143 } 5144 5145 void Parker::unpark() { 5146 guarantee(_ParkEvent != NULL, "invariant"); 5147 SetEvent(_ParkEvent); 5148 } 5149 5150 // Run the specified command in a separate process. Return its exit value, 5151 // or -1 on failure (e.g. can't create a new process). 5152 int os::fork_and_exec(char* cmd) { 5153 STARTUPINFO si; 5154 PROCESS_INFORMATION pi; 5155 5156 memset(&si, 0, sizeof(si)); 5157 si.cb = sizeof(si); 5158 memset(&pi, 0, sizeof(pi)); 5159 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5160 cmd, // command line 5161 NULL, // process security attribute 5162 NULL, // thread security attribute 5163 TRUE, // inherits system handles 5164 0, // no creation flags 5165 NULL, // use parent's environment block 5166 NULL, // use parent's starting directory 5167 &si, // (in) startup information 5168 &pi); // (out) process information 5169 5170 if (rslt) { 5171 // Wait until child process exits. 5172 WaitForSingleObject(pi.hProcess, INFINITE); 5173 5174 DWORD exit_code; 5175 GetExitCodeProcess(pi.hProcess, &exit_code); 5176 5177 // Close process and thread handles. 5178 CloseHandle(pi.hProcess); 5179 CloseHandle(pi.hThread); 5180 5181 return (int)exit_code; 5182 } else { 5183 return -1; 5184 } 5185 } 5186 5187 //-------------------------------------------------------------------------------------------------- 5188 // Non-product code 5189 5190 static int mallocDebugIntervalCounter = 0; 5191 static int mallocDebugCounter = 0; 5192 bool os::check_heap(bool force) { 5193 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5194 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5195 // Note: HeapValidate executes two hardware breakpoints when it finds something 5196 // wrong; at these points, eax contains the address of the offending block (I think). 5197 // To get to the exlicit error message(s) below, just continue twice. 5198 HANDLE heap = GetProcessHeap(); 5199 5200 // If we fail to lock the heap, then gflags.exe has been used 5201 // or some other special heap flag has been set that prevents 5202 // locking. We don't try to walk a heap we can't lock. 5203 if (HeapLock(heap) != 0) { 5204 PROCESS_HEAP_ENTRY phe; 5205 phe.lpData = NULL; 5206 while (HeapWalk(heap, &phe) != 0) { 5207 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5208 !HeapValidate(heap, 0, phe.lpData)) { 5209 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5210 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5211 fatal("corrupted C heap"); 5212 } 5213 } 5214 DWORD err = GetLastError(); 5215 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5216 fatal(err_msg("heap walk aborted with error %d", err)); 5217 } 5218 HeapUnlock(heap); 5219 } 5220 mallocDebugIntervalCounter = 0; 5221 } 5222 return true; 5223 } 5224 5225 5226 bool os::find(address addr, outputStream* st) { 5227 // Nothing yet 5228 return false; 5229 } 5230 5231 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5232 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5233 5234 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5235 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5236 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5237 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5238 5239 if (os::is_memory_serialize_page(thread, addr)) { 5240 return EXCEPTION_CONTINUE_EXECUTION; 5241 } 5242 } 5243 5244 return EXCEPTION_CONTINUE_SEARCH; 5245 } 5246 5247 // We don't build a headless jre for Windows 5248 bool os::is_headless_jre() { return false; } 5249 5250 static jint initSock() { 5251 WSADATA wsadata; 5252 5253 if (!os::WinSock2Dll::WinSock2Available()) { 5254 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5255 ::GetLastError()); 5256 return JNI_ERR; 5257 } 5258 5259 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5260 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5261 ::GetLastError()); 5262 return JNI_ERR; 5263 } 5264 return JNI_OK; 5265 } 5266 5267 struct hostent* os::get_host_by_name(char* name) { 5268 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5269 } 5270 5271 int os::socket_close(int fd) { 5272 return ::closesocket(fd); 5273 } 5274 5275 int os::socket(int domain, int type, int protocol) { 5276 return ::socket(domain, type, protocol); 5277 } 5278 5279 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5280 return ::connect(fd, him, len); 5281 } 5282 5283 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5284 return ::recv(fd, buf, (int)nBytes, flags); 5285 } 5286 5287 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5288 return ::send(fd, buf, (int)nBytes, flags); 5289 } 5290 5291 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5292 return ::send(fd, buf, (int)nBytes, flags); 5293 } 5294 5295 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5296 #if defined(IA32) 5297 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5298 #elif defined (AMD64) 5299 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5300 #endif 5301 5302 // returns true if thread could be suspended, 5303 // false otherwise 5304 static bool do_suspend(HANDLE* h) { 5305 if (h != NULL) { 5306 if (SuspendThread(*h) != ~0) { 5307 return true; 5308 } 5309 } 5310 return false; 5311 } 5312 5313 // resume the thread 5314 // calling resume on an active thread is a no-op 5315 static void do_resume(HANDLE* h) { 5316 if (h != NULL) { 5317 ResumeThread(*h); 5318 } 5319 } 5320 5321 // retrieve a suspend/resume context capable handle 5322 // from the tid. Caller validates handle return value. 5323 void get_thread_handle_for_extended_context(HANDLE* h, 5324 OSThread::thread_id_t tid) { 5325 if (h != NULL) { 5326 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5327 } 5328 } 5329 5330 // Thread sampling implementation 5331 // 5332 void os::SuspendedThreadTask::internal_do_task() { 5333 CONTEXT ctxt; 5334 HANDLE h = NULL; 5335 5336 // get context capable handle for thread 5337 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5338 5339 // sanity 5340 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5341 return; 5342 } 5343 5344 // suspend the thread 5345 if (do_suspend(&h)) { 5346 ctxt.ContextFlags = sampling_context_flags; 5347 // get thread context 5348 GetThreadContext(h, &ctxt); 5349 SuspendedThreadTaskContext context(_thread, &ctxt); 5350 // pass context to Thread Sampling impl 5351 do_task(context); 5352 // resume thread 5353 do_resume(&h); 5354 } 5355 5356 // close handle 5357 CloseHandle(h); 5358 } 5359 5360 5361 // Kernel32 API 5362 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5363 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn)(HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5364 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn)(PULONG); 5365 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn)(UCHAR, PULONGLONG); 5366 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5367 5368 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5369 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5370 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5371 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5372 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5373 5374 5375 BOOL os::Kernel32Dll::initialized = FALSE; 5376 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5377 assert(initialized && _GetLargePageMinimum != NULL, 5378 "GetLargePageMinimumAvailable() not yet called"); 5379 return _GetLargePageMinimum(); 5380 } 5381 5382 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5383 if (!initialized) { 5384 initialize(); 5385 } 5386 return _GetLargePageMinimum != NULL; 5387 } 5388 5389 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5390 if (!initialized) { 5391 initialize(); 5392 } 5393 return _VirtualAllocExNuma != NULL; 5394 } 5395 5396 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, 5397 SIZE_T bytes, DWORD flags, 5398 DWORD prot, DWORD node) { 5399 assert(initialized && _VirtualAllocExNuma != NULL, 5400 "NUMACallsAvailable() not yet called"); 5401 5402 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5403 } 5404 5405 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5406 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5407 "NUMACallsAvailable() not yet called"); 5408 5409 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5410 } 5411 5412 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, 5413 PULONGLONG proc_mask) { 5414 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5415 "NUMACallsAvailable() not yet called"); 5416 5417 return _GetNumaNodeProcessorMask(node, proc_mask); 5418 } 5419 5420 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5421 ULONG FrameToCapture, 5422 PVOID* BackTrace, 5423 PULONG BackTraceHash) { 5424 if (!initialized) { 5425 initialize(); 5426 } 5427 5428 if (_RtlCaptureStackBackTrace != NULL) { 5429 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5430 BackTrace, BackTraceHash); 5431 } else { 5432 return 0; 5433 } 5434 } 5435 5436 void os::Kernel32Dll::initializeCommon() { 5437 if (!initialized) { 5438 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5439 assert(handle != NULL, "Just check"); 5440 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5441 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5442 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5443 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5444 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5445 initialized = TRUE; 5446 } 5447 } 5448 5449 5450 5451 #ifndef JDK6_OR_EARLIER 5452 5453 void os::Kernel32Dll::initialize() { 5454 initializeCommon(); 5455 } 5456 5457 5458 // Kernel32 API 5459 inline BOOL os::Kernel32Dll::SwitchToThread() { 5460 return ::SwitchToThread(); 5461 } 5462 5463 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5464 return true; 5465 } 5466 5467 // Help tools 5468 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5469 return true; 5470 } 5471 5472 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5473 DWORD th32ProcessId) { 5474 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5475 } 5476 5477 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot, 5478 LPMODULEENTRY32 lpme) { 5479 return ::Module32First(hSnapshot, lpme); 5480 } 5481 5482 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5483 LPMODULEENTRY32 lpme) { 5484 return ::Module32Next(hSnapshot, lpme); 5485 } 5486 5487 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5488 ::GetNativeSystemInfo(lpSystemInfo); 5489 } 5490 5491 // PSAPI API 5492 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, 5493 HMODULE *lpModule, DWORD cb, 5494 LPDWORD lpcbNeeded) { 5495 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5496 } 5497 5498 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, 5499 HMODULE hModule, 5500 LPTSTR lpFilename, 5501 DWORD nSize) { 5502 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5503 } 5504 5505 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, 5506 HMODULE hModule, 5507 LPMODULEINFO lpmodinfo, 5508 DWORD cb) { 5509 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5510 } 5511 5512 inline BOOL os::PSApiDll::PSApiAvailable() { 5513 return true; 5514 } 5515 5516 5517 // WinSock2 API 5518 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, 5519 LPWSADATA lpWSAData) { 5520 return ::WSAStartup(wVersionRequested, lpWSAData); 5521 } 5522 5523 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5524 return ::gethostbyname(name); 5525 } 5526 5527 inline BOOL os::WinSock2Dll::WinSock2Available() { 5528 return true; 5529 } 5530 5531 // Advapi API 5532 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5533 BOOL DisableAllPrivileges, 5534 PTOKEN_PRIVILEGES NewState, 5535 DWORD BufferLength, 5536 PTOKEN_PRIVILEGES PreviousState, 5537 PDWORD ReturnLength) { 5538 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5539 BufferLength, PreviousState, ReturnLength); 5540 } 5541 5542 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5543 DWORD DesiredAccess, 5544 PHANDLE TokenHandle) { 5545 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5546 } 5547 5548 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5549 LPCTSTR lpName, 5550 PLUID lpLuid) { 5551 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5552 } 5553 5554 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5555 return true; 5556 } 5557 5558 void* os::get_default_process_handle() { 5559 return (void*)GetModuleHandle(NULL); 5560 } 5561 5562 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5563 // which is used to find statically linked in agents. 5564 // Additionally for windows, takes into account __stdcall names. 5565 // Parameters: 5566 // sym_name: Symbol in library we are looking for 5567 // lib_name: Name of library to look in, NULL for shared libs. 5568 // is_absolute_path == true if lib_name is absolute path to agent 5569 // such as "C:/a/b/L.dll" 5570 // == false if only the base name of the library is passed in 5571 // such as "L" 5572 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5573 bool is_absolute_path) { 5574 char *agent_entry_name; 5575 size_t len; 5576 size_t name_len; 5577 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5578 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5579 const char *start; 5580 5581 if (lib_name != NULL) { 5582 len = name_len = strlen(lib_name); 5583 if (is_absolute_path) { 5584 // Need to strip path, prefix and suffix 5585 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5586 lib_name = ++start; 5587 } else { 5588 // Need to check for drive prefix 5589 if ((start = strchr(lib_name, ':')) != NULL) { 5590 lib_name = ++start; 5591 } 5592 } 5593 if (len <= (prefix_len + suffix_len)) { 5594 return NULL; 5595 } 5596 lib_name += prefix_len; 5597 name_len = strlen(lib_name) - suffix_len; 5598 } 5599 } 5600 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5601 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5602 if (agent_entry_name == NULL) { 5603 return NULL; 5604 } 5605 if (lib_name != NULL) { 5606 const char *p = strrchr(sym_name, '@'); 5607 if (p != NULL && p != sym_name) { 5608 // sym_name == _Agent_OnLoad@XX 5609 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5610 agent_entry_name[(p-sym_name)] = '\0'; 5611 // agent_entry_name == _Agent_OnLoad 5612 strcat(agent_entry_name, "_"); 5613 strncat(agent_entry_name, lib_name, name_len); 5614 strcat(agent_entry_name, p); 5615 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5616 } else { 5617 strcpy(agent_entry_name, sym_name); 5618 strcat(agent_entry_name, "_"); 5619 strncat(agent_entry_name, lib_name, name_len); 5620 } 5621 } else { 5622 strcpy(agent_entry_name, sym_name); 5623 } 5624 return agent_entry_name; 5625 } 5626 5627 #else 5628 // Kernel32 API 5629 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5630 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD, DWORD); 5631 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE, LPMODULEENTRY32); 5632 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE, LPMODULEENTRY32); 5633 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5634 5635 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5636 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5637 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5638 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5639 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5640 5641 void os::Kernel32Dll::initialize() { 5642 if (!initialized) { 5643 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5644 assert(handle != NULL, "Just check"); 5645 5646 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5647 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5648 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5649 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5650 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5651 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5652 initializeCommon(); // resolve the functions that always need resolving 5653 5654 initialized = TRUE; 5655 } 5656 } 5657 5658 BOOL os::Kernel32Dll::SwitchToThread() { 5659 assert(initialized && _SwitchToThread != NULL, 5660 "SwitchToThreadAvailable() not yet called"); 5661 return _SwitchToThread(); 5662 } 5663 5664 5665 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5666 if (!initialized) { 5667 initialize(); 5668 } 5669 return _SwitchToThread != NULL; 5670 } 5671 5672 // Help tools 5673 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5674 if (!initialized) { 5675 initialize(); 5676 } 5677 return _CreateToolhelp32Snapshot != NULL && 5678 _Module32First != NULL && 5679 _Module32Next != NULL; 5680 } 5681 5682 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags, 5683 DWORD th32ProcessId) { 5684 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5685 "HelpToolsAvailable() not yet called"); 5686 5687 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5688 } 5689 5690 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5691 assert(initialized && _Module32First != NULL, 5692 "HelpToolsAvailable() not yet called"); 5693 5694 return _Module32First(hSnapshot, lpme); 5695 } 5696 5697 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot, 5698 LPMODULEENTRY32 lpme) { 5699 assert(initialized && _Module32Next != NULL, 5700 "HelpToolsAvailable() not yet called"); 5701 5702 return _Module32Next(hSnapshot, lpme); 5703 } 5704 5705 5706 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5707 if (!initialized) { 5708 initialize(); 5709 } 5710 return _GetNativeSystemInfo != NULL; 5711 } 5712 5713 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5714 assert(initialized && _GetNativeSystemInfo != NULL, 5715 "GetNativeSystemInfoAvailable() not yet called"); 5716 5717 _GetNativeSystemInfo(lpSystemInfo); 5718 } 5719 5720 // PSAPI API 5721 5722 5723 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5724 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD); 5725 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5726 5727 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5728 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5729 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5730 BOOL os::PSApiDll::initialized = FALSE; 5731 5732 void os::PSApiDll::initialize() { 5733 if (!initialized) { 5734 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5735 if (handle != NULL) { 5736 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5737 "EnumProcessModules"); 5738 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5739 "GetModuleFileNameExA"); 5740 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5741 "GetModuleInformation"); 5742 } 5743 initialized = TRUE; 5744 } 5745 } 5746 5747 5748 5749 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, 5750 DWORD cb, LPDWORD lpcbNeeded) { 5751 assert(initialized && _EnumProcessModules != NULL, 5752 "PSApiAvailable() not yet called"); 5753 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5754 } 5755 5756 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, 5757 LPTSTR lpFilename, DWORD nSize) { 5758 assert(initialized && _GetModuleFileNameEx != NULL, 5759 "PSApiAvailable() not yet called"); 5760 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5761 } 5762 5763 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, 5764 LPMODULEINFO lpmodinfo, DWORD cb) { 5765 assert(initialized && _GetModuleInformation != NULL, 5766 "PSApiAvailable() not yet called"); 5767 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5768 } 5769 5770 BOOL os::PSApiDll::PSApiAvailable() { 5771 if (!initialized) { 5772 initialize(); 5773 } 5774 return _EnumProcessModules != NULL && 5775 _GetModuleFileNameEx != NULL && 5776 _GetModuleInformation != NULL; 5777 } 5778 5779 5780 // WinSock2 API 5781 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5782 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5783 5784 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5785 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5786 BOOL os::WinSock2Dll::initialized = FALSE; 5787 5788 void os::WinSock2Dll::initialize() { 5789 if (!initialized) { 5790 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5791 if (handle != NULL) { 5792 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5793 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5794 } 5795 initialized = TRUE; 5796 } 5797 } 5798 5799 5800 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5801 assert(initialized && _WSAStartup != NULL, 5802 "WinSock2Available() not yet called"); 5803 return _WSAStartup(wVersionRequested, lpWSAData); 5804 } 5805 5806 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5807 assert(initialized && _gethostbyname != NULL, 5808 "WinSock2Available() not yet called"); 5809 return _gethostbyname(name); 5810 } 5811 5812 BOOL os::WinSock2Dll::WinSock2Available() { 5813 if (!initialized) { 5814 initialize(); 5815 } 5816 return _WSAStartup != NULL && 5817 _gethostbyname != NULL; 5818 } 5819 5820 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5821 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5822 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5823 5824 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5825 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5826 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5827 BOOL os::Advapi32Dll::initialized = FALSE; 5828 5829 void os::Advapi32Dll::initialize() { 5830 if (!initialized) { 5831 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5832 if (handle != NULL) { 5833 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5834 "AdjustTokenPrivileges"); 5835 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5836 "OpenProcessToken"); 5837 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5838 "LookupPrivilegeValueA"); 5839 } 5840 initialized = TRUE; 5841 } 5842 } 5843 5844 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5845 BOOL DisableAllPrivileges, 5846 PTOKEN_PRIVILEGES NewState, 5847 DWORD BufferLength, 5848 PTOKEN_PRIVILEGES PreviousState, 5849 PDWORD ReturnLength) { 5850 assert(initialized && _AdjustTokenPrivileges != NULL, 5851 "AdvapiAvailable() not yet called"); 5852 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5853 BufferLength, PreviousState, ReturnLength); 5854 } 5855 5856 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, 5857 DWORD DesiredAccess, 5858 PHANDLE TokenHandle) { 5859 assert(initialized && _OpenProcessToken != NULL, 5860 "AdvapiAvailable() not yet called"); 5861 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5862 } 5863 5864 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, 5865 LPCTSTR lpName, PLUID lpLuid) { 5866 assert(initialized && _LookupPrivilegeValue != NULL, 5867 "AdvapiAvailable() not yet called"); 5868 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5869 } 5870 5871 BOOL os::Advapi32Dll::AdvapiAvailable() { 5872 if (!initialized) { 5873 initialize(); 5874 } 5875 return _AdjustTokenPrivileges != NULL && 5876 _OpenProcessToken != NULL && 5877 _LookupPrivilegeValue != NULL; 5878 } 5879 5880 #endif 5881 5882 #ifndef PRODUCT 5883 5884 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5885 // contiguous memory block at a particular address. 5886 // The test first tries to find a good approximate address to allocate at by using the same 5887 // method to allocate some memory at any address. The test then tries to allocate memory in 5888 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5889 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5890 // the previously allocated memory is available for allocation. The only actual failure 5891 // that is reported is when the test tries to allocate at a particular location but gets a 5892 // different valid one. A NULL return value at this point is not considered an error but may 5893 // be legitimate. 5894 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5895 void TestReserveMemorySpecial_test() { 5896 if (!UseLargePages) { 5897 if (VerboseInternalVMTests) { 5898 gclog_or_tty->print("Skipping test because large pages are disabled"); 5899 } 5900 return; 5901 } 5902 // save current value of globals 5903 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5904 bool old_use_numa_interleaving = UseNUMAInterleaving; 5905 5906 // set globals to make sure we hit the correct code path 5907 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5908 5909 // do an allocation at an address selected by the OS to get a good one. 5910 const size_t large_allocation_size = os::large_page_size() * 4; 5911 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5912 if (result == NULL) { 5913 if (VerboseInternalVMTests) { 5914 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5915 large_allocation_size); 5916 } 5917 } else { 5918 os::release_memory_special(result, large_allocation_size); 5919 5920 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5921 // we managed to get it once. 5922 const size_t expected_allocation_size = os::large_page_size(); 5923 char* expected_location = result + os::large_page_size(); 5924 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5925 if (actual_location == NULL) { 5926 if (VerboseInternalVMTests) { 5927 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5928 expected_location, large_allocation_size); 5929 } 5930 } else { 5931 // release memory 5932 os::release_memory_special(actual_location, expected_allocation_size); 5933 // only now check, after releasing any memory to avoid any leaks. 5934 assert(actual_location == expected_location, 5935 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5936 expected_location, expected_allocation_size, actual_location)); 5937 } 5938 } 5939 5940 // restore globals 5941 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5942 UseNUMAInterleaving = old_use_numa_interleaving; 5943 } 5944 #endif // PRODUCT