1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.inline.hpp" 48 #include "runtime/extendedPC.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/interfaceSupport.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/orderAccess.inline.hpp" 56 #include "runtime/osThread.hpp" 57 #include "runtime/perfMemory.hpp" 58 #include "runtime/sharedRuntime.hpp" 59 #include "runtime/statSampler.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "runtime/thread.inline.hpp" 62 #include "runtime/threadCritical.hpp" 63 #include "runtime/timer.hpp" 64 #include "services/attachListener.hpp" 65 #include "services/memTracker.hpp" 66 #include "services/runtimeService.hpp" 67 #include "utilities/decoder.hpp" 68 #include "utilities/defaultStream.hpp" 69 #include "utilities/events.hpp" 70 #include "utilities/growableArray.hpp" 71 #include "utilities/vmError.hpp" 72 73 #ifdef _DEBUG 74 #include <crtdbg.h> 75 #endif 76 77 78 #include <windows.h> 79 #include <sys/types.h> 80 #include <sys/stat.h> 81 #include <sys/timeb.h> 82 #include <objidl.h> 83 #include <shlobj.h> 84 85 #include <malloc.h> 86 #include <signal.h> 87 #include <direct.h> 88 #include <errno.h> 89 #include <fcntl.h> 90 #include <io.h> 91 #include <process.h> // For _beginthreadex(), _endthreadex() 92 #include <imagehlp.h> // For os::dll_address_to_function_name 93 /* for enumerating dll libraries */ 94 #include <vdmdbg.h> 95 96 // for timer info max values which include all bits 97 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 98 99 // For DLL loading/load error detection 100 // Values of PE COFF 101 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 102 #define IMAGE_FILE_SIGNATURE_LENGTH 4 103 104 static HANDLE main_process; 105 static HANDLE main_thread; 106 static int main_thread_id; 107 108 static FILETIME process_creation_time; 109 static FILETIME process_exit_time; 110 static FILETIME process_user_time; 111 static FILETIME process_kernel_time; 112 113 #ifdef _M_IA64 114 #define __CPU__ ia64 115 #elif _M_AMD64 116 #define __CPU__ amd64 117 #else 118 #define __CPU__ i486 119 #endif 120 121 // save DLL module handle, used by GetModuleFileName 122 123 HINSTANCE vm_lib_handle; 124 125 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 126 switch (reason) { 127 case DLL_PROCESS_ATTACH: 128 vm_lib_handle = hinst; 129 if(ForceTimeHighResolution) 130 timeBeginPeriod(1L); 131 break; 132 case DLL_PROCESS_DETACH: 133 if(ForceTimeHighResolution) 134 timeEndPeriod(1L); 135 136 break; 137 default: 138 break; 139 } 140 return true; 141 } 142 143 static inline double fileTimeAsDouble(FILETIME* time) { 144 const double high = (double) ((unsigned int) ~0); 145 const double split = 10000000.0; 146 double result = (time->dwLowDateTime / split) + 147 time->dwHighDateTime * (high/split); 148 return result; 149 } 150 151 // Implementation of os 152 153 bool os::getenv(const char* name, char* buffer, int len) { 154 int result = GetEnvironmentVariable(name, buffer, len); 155 return result > 0 && result < len; 156 } 157 158 bool os::unsetenv(const char* name) { 159 assert(name != NULL, "Null pointer"); 160 return (SetEnvironmentVariable(name, NULL) == TRUE); 161 } 162 163 // No setuid programs under Windows. 164 bool os::have_special_privileges() { 165 return false; 166 } 167 168 169 // This method is a periodic task to check for misbehaving JNI applications 170 // under CheckJNI, we can add any periodic checks here. 171 // For Windows at the moment does nothing 172 void os::run_periodic_checks() { 173 return; 174 } 175 176 #ifndef _WIN64 177 // previous UnhandledExceptionFilter, if there is one 178 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 179 180 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 181 #endif 182 void os::init_system_properties_values() { 183 /* sysclasspath, java_home, dll_dir */ 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH]; 190 191 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 192 os::jvm_path(home_dir, sizeof(home_dir)); 193 // Found the full path to jvm.dll. 194 // Now cut the path to <java_home>/jre if we can. 195 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 196 pslash = strrchr(home_dir, '\\'); 197 if (pslash != NULL) { 198 *pslash = '\0'; /* get rid of \{client|server} */ 199 pslash = strrchr(home_dir, '\\'); 200 if (pslash != NULL) 201 *pslash = '\0'; /* get rid of \bin */ 202 } 203 } 204 205 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 206 if (home_path == NULL) 207 return; 208 strcpy(home_path, home_dir); 209 Arguments::set_java_home(home_path); 210 211 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 212 if (dll_path == NULL) 213 return; 214 strcpy(dll_path, home_dir); 215 strcat(dll_path, bin); 216 Arguments::set_dll_dir(dll_path); 217 218 if (!set_boot_path('\\', ';')) 219 return; 220 } 221 222 /* library_path */ 223 #define EXT_DIR "\\lib\\ext" 224 #define BIN_DIR "\\bin" 225 #define PACKAGE_DIR "\\Sun\\Java" 226 { 227 /* Win32 library search order (See the documentation for LoadLibrary): 228 * 229 * 1. The directory from which application is loaded. 230 * 2. The system wide Java Extensions directory (Java only) 231 * 3. System directory (GetSystemDirectory) 232 * 4. Windows directory (GetWindowsDirectory) 233 * 5. The PATH environment variable 234 * 6. The current directory 235 */ 236 237 char *library_path; 238 char tmp[MAX_PATH]; 239 char *path_str = ::getenv("PATH"); 240 241 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 242 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 243 244 library_path[0] = '\0'; 245 246 GetModuleFileName(NULL, tmp, sizeof(tmp)); 247 *(strrchr(tmp, '\\')) = '\0'; 248 strcat(library_path, tmp); 249 250 GetWindowsDirectory(tmp, sizeof(tmp)); 251 strcat(library_path, ";"); 252 strcat(library_path, tmp); 253 strcat(library_path, PACKAGE_DIR BIN_DIR); 254 255 GetSystemDirectory(tmp, sizeof(tmp)); 256 strcat(library_path, ";"); 257 strcat(library_path, tmp); 258 259 GetWindowsDirectory(tmp, sizeof(tmp)); 260 strcat(library_path, ";"); 261 strcat(library_path, tmp); 262 263 if (path_str) { 264 strcat(library_path, ";"); 265 strcat(library_path, path_str); 266 } 267 268 strcat(library_path, ";."); 269 270 Arguments::set_library_path(library_path); 271 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 272 } 273 274 /* Default extensions directory */ 275 { 276 char path[MAX_PATH]; 277 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 278 GetWindowsDirectory(path, MAX_PATH); 279 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 280 path, PACKAGE_DIR, EXT_DIR); 281 Arguments::set_ext_dirs(buf); 282 } 283 #undef EXT_DIR 284 #undef BIN_DIR 285 #undef PACKAGE_DIR 286 287 /* Default endorsed standards directory. */ 288 { 289 #define ENDORSED_DIR "\\lib\\endorsed" 290 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 291 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 292 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 293 Arguments::set_endorsed_dirs(buf); 294 #undef ENDORSED_DIR 295 } 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 /* 316 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 317 * So far, this method is only used by Native Memory Tracking, which is 318 * only supported on Windows XP or later. 319 */ 320 321 int os::get_native_stack(address* stack, int frames, int toSkip) { 322 #ifdef _NMT_NOINLINE_ 323 toSkip ++; 324 #endif 325 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 326 (PVOID*)stack, NULL); 327 for (int index = captured; index < frames; index ++) { 328 stack[index] = NULL; 329 } 330 return captured; 331 } 332 333 334 // os::current_stack_base() 335 // 336 // Returns the base of the stack, which is the stack's 337 // starting address. This function must be called 338 // while running on the stack of the thread being queried. 339 340 address os::current_stack_base() { 341 MEMORY_BASIC_INFORMATION minfo; 342 address stack_bottom; 343 size_t stack_size; 344 345 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 346 stack_bottom = (address)minfo.AllocationBase; 347 stack_size = minfo.RegionSize; 348 349 // Add up the sizes of all the regions with the same 350 // AllocationBase. 351 while( 1 ) 352 { 353 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 354 if ( stack_bottom == (address)minfo.AllocationBase ) 355 stack_size += minfo.RegionSize; 356 else 357 break; 358 } 359 360 #ifdef _M_IA64 361 // IA64 has memory and register stacks 362 // 363 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 364 // at thread creation (1MB backing store growing upwards, 1MB memory stack 365 // growing downwards, 2MB summed up) 366 // 367 // ... 368 // ------- top of stack (high address) ----- 369 // | 370 // | 1MB 371 // | Backing Store (Register Stack) 372 // | 373 // | / \ 374 // | | 375 // | | 376 // | | 377 // ------------------------ stack base ----- 378 // | 1MB 379 // | Memory Stack 380 // | 381 // | | 382 // | | 383 // | | 384 // | \ / 385 // | 386 // ----- bottom of stack (low address) ----- 387 // ... 388 389 stack_size = stack_size / 2; 390 #endif 391 return stack_bottom + stack_size; 392 } 393 394 size_t os::current_stack_size() { 395 size_t sz; 396 MEMORY_BASIC_INFORMATION minfo; 397 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 398 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 399 return sz; 400 } 401 402 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 403 const struct tm* time_struct_ptr = localtime(clock); 404 if (time_struct_ptr != NULL) { 405 *res = *time_struct_ptr; 406 return res; 407 } 408 return NULL; 409 } 410 411 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 412 413 // Thread start routine for all new Java threads 414 static unsigned __stdcall java_start(Thread* thread) { 415 // Try to randomize the cache line index of hot stack frames. 416 // This helps when threads of the same stack traces evict each other's 417 // cache lines. The threads can be either from the same JVM instance, or 418 // from different JVM instances. The benefit is especially true for 419 // processors with hyperthreading technology. 420 static int counter = 0; 421 int pid = os::current_process_id(); 422 _alloca(((pid ^ counter++) & 7) * 128); 423 424 OSThread* osthr = thread->osthread(); 425 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 426 427 if (UseNUMA) { 428 int lgrp_id = os::numa_get_group_id(); 429 if (lgrp_id != -1) { 430 thread->set_lgrp_id(lgrp_id); 431 } 432 } 433 434 435 // Install a win32 structured exception handler around every thread created 436 // by VM, so VM can genrate error dump when an exception occurred in non- 437 // Java thread (e.g. VM thread). 438 __try { 439 thread->run(); 440 } __except(topLevelExceptionFilter( 441 (_EXCEPTION_POINTERS*)_exception_info())) { 442 // Nothing to do. 443 } 444 445 // One less thread is executing 446 // When the VMThread gets here, the main thread may have already exited 447 // which frees the CodeHeap containing the Atomic::add code 448 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 449 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 450 } 451 452 return 0; 453 } 454 455 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 456 // Allocate the OSThread object 457 OSThread* osthread = new OSThread(NULL, NULL); 458 if (osthread == NULL) return NULL; 459 460 // Initialize support for Java interrupts 461 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 462 if (interrupt_event == NULL) { 463 delete osthread; 464 return NULL; 465 } 466 osthread->set_interrupt_event(interrupt_event); 467 468 // Store info on the Win32 thread into the OSThread 469 osthread->set_thread_handle(thread_handle); 470 osthread->set_thread_id(thread_id); 471 472 if (UseNUMA) { 473 int lgrp_id = os::numa_get_group_id(); 474 if (lgrp_id != -1) { 475 thread->set_lgrp_id(lgrp_id); 476 } 477 } 478 479 // Initial thread state is INITIALIZED, not SUSPENDED 480 osthread->set_state(INITIALIZED); 481 482 return osthread; 483 } 484 485 486 bool os::create_attached_thread(JavaThread* thread) { 487 #ifdef ASSERT 488 thread->verify_not_published(); 489 #endif 490 HANDLE thread_h; 491 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 492 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 493 fatal("DuplicateHandle failed\n"); 494 } 495 OSThread* osthread = create_os_thread(thread, thread_h, 496 (int)current_thread_id()); 497 if (osthread == NULL) { 498 return false; 499 } 500 501 // Initial thread state is RUNNABLE 502 osthread->set_state(RUNNABLE); 503 504 thread->set_osthread(osthread); 505 return true; 506 } 507 508 bool os::create_main_thread(JavaThread* thread) { 509 #ifdef ASSERT 510 thread->verify_not_published(); 511 #endif 512 if (_starting_thread == NULL) { 513 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 514 if (_starting_thread == NULL) { 515 return false; 516 } 517 } 518 519 // The primordial thread is runnable from the start) 520 _starting_thread->set_state(RUNNABLE); 521 522 thread->set_osthread(_starting_thread); 523 return true; 524 } 525 526 // Allocate and initialize a new OSThread 527 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 528 unsigned thread_id; 529 530 // Allocate the OSThread object 531 OSThread* osthread = new OSThread(NULL, NULL); 532 if (osthread == NULL) { 533 return false; 534 } 535 536 // Initialize support for Java interrupts 537 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 538 if (interrupt_event == NULL) { 539 delete osthread; 540 return NULL; 541 } 542 osthread->set_interrupt_event(interrupt_event); 543 osthread->set_interrupted(false); 544 545 thread->set_osthread(osthread); 546 547 if (stack_size == 0) { 548 switch (thr_type) { 549 case os::java_thread: 550 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 551 if (JavaThread::stack_size_at_create() > 0) 552 stack_size = JavaThread::stack_size_at_create(); 553 break; 554 case os::compiler_thread: 555 if (CompilerThreadStackSize > 0) { 556 stack_size = (size_t)(CompilerThreadStackSize * K); 557 break; 558 } // else fall through: 559 // use VMThreadStackSize if CompilerThreadStackSize is not defined 560 case os::vm_thread: 561 case os::pgc_thread: 562 case os::cgc_thread: 563 case os::watcher_thread: 564 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 565 break; 566 } 567 } 568 569 // Create the Win32 thread 570 // 571 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 572 // does not specify stack size. Instead, it specifies the size of 573 // initially committed space. The stack size is determined by 574 // PE header in the executable. If the committed "stack_size" is larger 575 // than default value in the PE header, the stack is rounded up to the 576 // nearest multiple of 1MB. For example if the launcher has default 577 // stack size of 320k, specifying any size less than 320k does not 578 // affect the actual stack size at all, it only affects the initial 579 // commitment. On the other hand, specifying 'stack_size' larger than 580 // default value may cause significant increase in memory usage, because 581 // not only the stack space will be rounded up to MB, but also the 582 // entire space is committed upfront. 583 // 584 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 585 // for CreateThread() that can treat 'stack_size' as stack size. However we 586 // are not supposed to call CreateThread() directly according to MSDN 587 // document because JVM uses C runtime library. The good news is that the 588 // flag appears to work with _beginthredex() as well. 589 590 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 591 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 592 #endif 593 594 HANDLE thread_handle = 595 (HANDLE)_beginthreadex(NULL, 596 (unsigned)stack_size, 597 (unsigned (__stdcall *)(void*)) java_start, 598 thread, 599 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 600 &thread_id); 601 if (thread_handle == NULL) { 602 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 603 // without the flag. 604 thread_handle = 605 (HANDLE)_beginthreadex(NULL, 606 (unsigned)stack_size, 607 (unsigned (__stdcall *)(void*)) java_start, 608 thread, 609 CREATE_SUSPENDED, 610 &thread_id); 611 } 612 if (thread_handle == NULL) { 613 // Need to clean up stuff we've allocated so far 614 CloseHandle(osthread->interrupt_event()); 615 thread->set_osthread(NULL); 616 delete osthread; 617 return NULL; 618 } 619 620 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 621 622 // Store info on the Win32 thread into the OSThread 623 osthread->set_thread_handle(thread_handle); 624 osthread->set_thread_id(thread_id); 625 626 // Initial thread state is INITIALIZED, not SUSPENDED 627 osthread->set_state(INITIALIZED); 628 629 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 630 return true; 631 } 632 633 634 // Free Win32 resources related to the OSThread 635 void os::free_thread(OSThread* osthread) { 636 assert(osthread != NULL, "osthread not set"); 637 CloseHandle(osthread->thread_handle()); 638 CloseHandle(osthread->interrupt_event()); 639 delete osthread; 640 } 641 642 643 static int has_performance_count = 0; 644 static jlong first_filetime; 645 static jlong initial_performance_count; 646 static jlong performance_frequency; 647 648 649 jlong as_long(LARGE_INTEGER x) { 650 jlong result = 0; // initialization to avoid warning 651 set_high(&result, x.HighPart); 652 set_low(&result, x.LowPart); 653 return result; 654 } 655 656 657 jlong os::elapsed_counter() { 658 LARGE_INTEGER count; 659 if (has_performance_count) { 660 QueryPerformanceCounter(&count); 661 return as_long(count) - initial_performance_count; 662 } else { 663 FILETIME wt; 664 GetSystemTimeAsFileTime(&wt); 665 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 666 } 667 } 668 669 670 jlong os::elapsed_frequency() { 671 if (has_performance_count) { 672 return performance_frequency; 673 } else { 674 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 675 return 10000000; 676 } 677 } 678 679 680 julong os::available_memory() { 681 return win32::available_memory(); 682 } 683 684 julong os::win32::available_memory() { 685 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 686 // value if total memory is larger than 4GB 687 MEMORYSTATUSEX ms; 688 ms.dwLength = sizeof(ms); 689 GlobalMemoryStatusEx(&ms); 690 691 return (julong)ms.ullAvailPhys; 692 } 693 694 julong os::physical_memory() { 695 return win32::physical_memory(); 696 } 697 698 bool os::has_allocatable_memory_limit(julong* limit) { 699 MEMORYSTATUSEX ms; 700 ms.dwLength = sizeof(ms); 701 GlobalMemoryStatusEx(&ms); 702 #ifdef _LP64 703 *limit = (julong)ms.ullAvailVirtual; 704 return true; 705 #else 706 // Limit to 1400m because of the 2gb address space wall 707 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 708 return true; 709 #endif 710 } 711 712 // VC6 lacks DWORD_PTR 713 #if _MSC_VER < 1300 714 typedef UINT_PTR DWORD_PTR; 715 #endif 716 717 int os::active_processor_count() { 718 DWORD_PTR lpProcessAffinityMask = 0; 719 DWORD_PTR lpSystemAffinityMask = 0; 720 int proc_count = processor_count(); 721 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 722 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 723 // Nof active processors is number of bits in process affinity mask 724 int bitcount = 0; 725 while (lpProcessAffinityMask != 0) { 726 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 727 bitcount++; 728 } 729 return bitcount; 730 } else { 731 return proc_count; 732 } 733 } 734 735 void os::set_native_thread_name(const char *name) { 736 // Not yet implemented. 737 return; 738 } 739 740 bool os::distribute_processes(uint length, uint* distribution) { 741 // Not yet implemented. 742 return false; 743 } 744 745 bool os::bind_to_processor(uint processor_id) { 746 // Not yet implemented. 747 return false; 748 } 749 750 static void initialize_performance_counter() { 751 LARGE_INTEGER count; 752 if (QueryPerformanceFrequency(&count)) { 753 has_performance_count = 1; 754 performance_frequency = as_long(count); 755 QueryPerformanceCounter(&count); 756 initial_performance_count = as_long(count); 757 } else { 758 has_performance_count = 0; 759 FILETIME wt; 760 GetSystemTimeAsFileTime(&wt); 761 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 762 } 763 } 764 765 766 double os::elapsedTime() { 767 return (double) elapsed_counter() / (double) elapsed_frequency(); 768 } 769 770 771 // Windows format: 772 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 773 // Java format: 774 // Java standards require the number of milliseconds since 1/1/1970 775 776 // Constant offset - calculated using offset() 777 static jlong _offset = 116444736000000000; 778 // Fake time counter for reproducible results when debugging 779 static jlong fake_time = 0; 780 781 #ifdef ASSERT 782 // Just to be safe, recalculate the offset in debug mode 783 static jlong _calculated_offset = 0; 784 static int _has_calculated_offset = 0; 785 786 jlong offset() { 787 if (_has_calculated_offset) return _calculated_offset; 788 SYSTEMTIME java_origin; 789 java_origin.wYear = 1970; 790 java_origin.wMonth = 1; 791 java_origin.wDayOfWeek = 0; // ignored 792 java_origin.wDay = 1; 793 java_origin.wHour = 0; 794 java_origin.wMinute = 0; 795 java_origin.wSecond = 0; 796 java_origin.wMilliseconds = 0; 797 FILETIME jot; 798 if (!SystemTimeToFileTime(&java_origin, &jot)) { 799 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 800 } 801 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 802 _has_calculated_offset = 1; 803 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 804 return _calculated_offset; 805 } 806 #else 807 jlong offset() { 808 return _offset; 809 } 810 #endif 811 812 jlong windows_to_java_time(FILETIME wt) { 813 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 814 return (a - offset()) / 10000; 815 } 816 817 FILETIME java_to_windows_time(jlong l) { 818 jlong a = (l * 10000) + offset(); 819 FILETIME result; 820 result.dwHighDateTime = high(a); 821 result.dwLowDateTime = low(a); 822 return result; 823 } 824 825 bool os::supports_vtime() { return true; } 826 bool os::enable_vtime() { return false; } 827 bool os::vtime_enabled() { return false; } 828 829 double os::elapsedVTime() { 830 FILETIME created; 831 FILETIME exited; 832 FILETIME kernel; 833 FILETIME user; 834 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 835 // the resolution of windows_to_java_time() should be sufficient (ms) 836 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 837 } else { 838 return elapsedTime(); 839 } 840 } 841 842 jlong os::javaTimeMillis() { 843 if (UseFakeTimers) { 844 return fake_time++; 845 } else { 846 FILETIME wt; 847 GetSystemTimeAsFileTime(&wt); 848 return windows_to_java_time(wt); 849 } 850 } 851 852 jlong os::javaTimeNanos() { 853 if (!has_performance_count) { 854 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 855 } else { 856 LARGE_INTEGER current_count; 857 QueryPerformanceCounter(¤t_count); 858 double current = as_long(current_count); 859 double freq = performance_frequency; 860 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 861 return time; 862 } 863 } 864 865 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 866 if (!has_performance_count) { 867 // javaTimeMillis() doesn't have much percision, 868 // but it is not going to wrap -- so all 64 bits 869 info_ptr->max_value = ALL_64_BITS; 870 871 // this is a wall clock timer, so may skip 872 info_ptr->may_skip_backward = true; 873 info_ptr->may_skip_forward = true; 874 } else { 875 jlong freq = performance_frequency; 876 if (freq < NANOSECS_PER_SEC) { 877 // the performance counter is 64 bits and we will 878 // be multiplying it -- so no wrap in 64 bits 879 info_ptr->max_value = ALL_64_BITS; 880 } else if (freq > NANOSECS_PER_SEC) { 881 // use the max value the counter can reach to 882 // determine the max value which could be returned 883 julong max_counter = (julong)ALL_64_BITS; 884 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 885 } else { 886 // the performance counter is 64 bits and we will 887 // be using it directly -- so no wrap in 64 bits 888 info_ptr->max_value = ALL_64_BITS; 889 } 890 891 // using a counter, so no skipping 892 info_ptr->may_skip_backward = false; 893 info_ptr->may_skip_forward = false; 894 } 895 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 896 } 897 898 char* os::local_time_string(char *buf, size_t buflen) { 899 SYSTEMTIME st; 900 GetLocalTime(&st); 901 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 902 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 903 return buf; 904 } 905 906 bool os::getTimesSecs(double* process_real_time, 907 double* process_user_time, 908 double* process_system_time) { 909 HANDLE h_process = GetCurrentProcess(); 910 FILETIME create_time, exit_time, kernel_time, user_time; 911 BOOL result = GetProcessTimes(h_process, 912 &create_time, 913 &exit_time, 914 &kernel_time, 915 &user_time); 916 if (result != 0) { 917 FILETIME wt; 918 GetSystemTimeAsFileTime(&wt); 919 jlong rtc_millis = windows_to_java_time(wt); 920 jlong user_millis = windows_to_java_time(user_time); 921 jlong system_millis = windows_to_java_time(kernel_time); 922 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 923 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 924 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 925 return true; 926 } else { 927 return false; 928 } 929 } 930 931 void os::shutdown() { 932 933 // allow PerfMemory to attempt cleanup of any persistent resources 934 perfMemory_exit(); 935 936 // flush buffered output, finish log files 937 ostream_abort(); 938 939 // Check for abort hook 940 abort_hook_t abort_hook = Arguments::abort_hook(); 941 if (abort_hook != NULL) { 942 abort_hook(); 943 } 944 } 945 946 947 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 948 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 949 950 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 951 HINSTANCE dbghelp; 952 EXCEPTION_POINTERS ep; 953 MINIDUMP_EXCEPTION_INFORMATION mei; 954 MINIDUMP_EXCEPTION_INFORMATION* pmei; 955 956 HANDLE hProcess = GetCurrentProcess(); 957 DWORD processId = GetCurrentProcessId(); 958 HANDLE dumpFile; 959 MINIDUMP_TYPE dumpType; 960 static const char* cwd; 961 962 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 963 #ifndef ASSERT 964 // If running on a client version of Windows and user has not explicitly enabled dumping 965 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 966 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 967 return; 968 // If running on a server version of Windows and user has explictly disabled dumping 969 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 970 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 971 return; 972 } 973 #else 974 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 975 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 976 return; 977 } 978 #endif 979 980 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 981 982 if (dbghelp == NULL) { 983 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 984 return; 985 } 986 987 _MiniDumpWriteDump = CAST_TO_FN_PTR( 988 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 989 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 990 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 991 992 if (_MiniDumpWriteDump == NULL) { 993 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 994 return; 995 } 996 997 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 998 999 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1000 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1001 #if API_VERSION_NUMBER >= 11 1002 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1003 MiniDumpWithUnloadedModules); 1004 #endif 1005 1006 cwd = get_current_directory(NULL, 0); 1007 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id()); 1008 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1009 1010 if (dumpFile == INVALID_HANDLE_VALUE) { 1011 VMError::report_coredump_status("Failed to create file for dumping", false); 1012 return; 1013 } 1014 if (exceptionRecord != NULL && contextRecord != NULL) { 1015 ep.ContextRecord = (PCONTEXT) contextRecord; 1016 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1017 1018 mei.ThreadId = GetCurrentThreadId(); 1019 mei.ExceptionPointers = &ep; 1020 pmei = &mei; 1021 } else { 1022 pmei = NULL; 1023 } 1024 1025 1026 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1027 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1028 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1029 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1030 DWORD error = GetLastError(); 1031 LPTSTR msgbuf = NULL; 1032 1033 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1034 FORMAT_MESSAGE_FROM_SYSTEM | 1035 FORMAT_MESSAGE_IGNORE_INSERTS, 1036 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1037 1038 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1039 LocalFree(msgbuf); 1040 } else { 1041 // Call to FormatMessage failed, just include the result from GetLastError 1042 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1043 } 1044 VMError::report_coredump_status(buffer, false); 1045 } else { 1046 VMError::report_coredump_status(buffer, true); 1047 } 1048 1049 CloseHandle(dumpFile); 1050 } 1051 1052 1053 1054 void os::abort(bool dump_core) 1055 { 1056 os::shutdown(); 1057 // no core dump on Windows 1058 ::exit(1); 1059 } 1060 1061 // Die immediately, no exit hook, no abort hook, no cleanup. 1062 void os::die() { 1063 _exit(-1); 1064 } 1065 1066 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1067 // * dirent_md.c 1.15 00/02/02 1068 // 1069 // The declarations for DIR and struct dirent are in jvm_win32.h. 1070 1071 /* Caller must have already run dirname through JVM_NativePath, which removes 1072 duplicate slashes and converts all instances of '/' into '\\'. */ 1073 1074 DIR * 1075 os::opendir(const char *dirname) 1076 { 1077 assert(dirname != NULL, "just checking"); // hotspot change 1078 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1079 DWORD fattr; // hotspot change 1080 char alt_dirname[4] = { 0, 0, 0, 0 }; 1081 1082 if (dirp == 0) { 1083 errno = ENOMEM; 1084 return 0; 1085 } 1086 1087 /* 1088 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1089 * as a directory in FindFirstFile(). We detect this case here and 1090 * prepend the current drive name. 1091 */ 1092 if (dirname[1] == '\0' && dirname[0] == '\\') { 1093 alt_dirname[0] = _getdrive() + 'A' - 1; 1094 alt_dirname[1] = ':'; 1095 alt_dirname[2] = '\\'; 1096 alt_dirname[3] = '\0'; 1097 dirname = alt_dirname; 1098 } 1099 1100 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1101 if (dirp->path == 0) { 1102 free(dirp, mtInternal); 1103 errno = ENOMEM; 1104 return 0; 1105 } 1106 strcpy(dirp->path, dirname); 1107 1108 fattr = GetFileAttributes(dirp->path); 1109 if (fattr == 0xffffffff) { 1110 free(dirp->path, mtInternal); 1111 free(dirp, mtInternal); 1112 errno = ENOENT; 1113 return 0; 1114 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1115 free(dirp->path, mtInternal); 1116 free(dirp, mtInternal); 1117 errno = ENOTDIR; 1118 return 0; 1119 } 1120 1121 /* Append "*.*", or possibly "\\*.*", to path */ 1122 if (dirp->path[1] == ':' 1123 && (dirp->path[2] == '\0' 1124 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1125 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1126 strcat(dirp->path, "*.*"); 1127 } else { 1128 strcat(dirp->path, "\\*.*"); 1129 } 1130 1131 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1132 if (dirp->handle == INVALID_HANDLE_VALUE) { 1133 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1134 free(dirp->path, mtInternal); 1135 free(dirp, mtInternal); 1136 errno = EACCES; 1137 return 0; 1138 } 1139 } 1140 return dirp; 1141 } 1142 1143 /* parameter dbuf unused on Windows */ 1144 1145 struct dirent * 1146 os::readdir(DIR *dirp, dirent *dbuf) 1147 { 1148 assert(dirp != NULL, "just checking"); // hotspot change 1149 if (dirp->handle == INVALID_HANDLE_VALUE) { 1150 return 0; 1151 } 1152 1153 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1154 1155 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1156 if (GetLastError() == ERROR_INVALID_HANDLE) { 1157 errno = EBADF; 1158 return 0; 1159 } 1160 FindClose(dirp->handle); 1161 dirp->handle = INVALID_HANDLE_VALUE; 1162 } 1163 1164 return &dirp->dirent; 1165 } 1166 1167 int 1168 os::closedir(DIR *dirp) 1169 { 1170 assert(dirp != NULL, "just checking"); // hotspot change 1171 if (dirp->handle != INVALID_HANDLE_VALUE) { 1172 if (!FindClose(dirp->handle)) { 1173 errno = EBADF; 1174 return -1; 1175 } 1176 dirp->handle = INVALID_HANDLE_VALUE; 1177 } 1178 free(dirp->path, mtInternal); 1179 free(dirp, mtInternal); 1180 return 0; 1181 } 1182 1183 // This must be hard coded because it's the system's temporary 1184 // directory not the java application's temp directory, ala java.io.tmpdir. 1185 const char* os::get_temp_directory() { 1186 static char path_buf[MAX_PATH]; 1187 if (GetTempPath(MAX_PATH, path_buf)>0) 1188 return path_buf; 1189 else{ 1190 path_buf[0]='\0'; 1191 return path_buf; 1192 } 1193 } 1194 1195 static bool file_exists(const char* filename) { 1196 if (filename == NULL || strlen(filename) == 0) { 1197 return false; 1198 } 1199 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1200 } 1201 1202 bool os::dll_build_name(char *buffer, size_t buflen, 1203 const char* pname, const char* fname) { 1204 bool retval = false; 1205 const size_t pnamelen = pname ? strlen(pname) : 0; 1206 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1207 1208 // Return error on buffer overflow. 1209 if (pnamelen + strlen(fname) + 10 > buflen) { 1210 return retval; 1211 } 1212 1213 if (pnamelen == 0) { 1214 jio_snprintf(buffer, buflen, "%s.dll", fname); 1215 retval = true; 1216 } else if (c == ':' || c == '\\') { 1217 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1218 retval = true; 1219 } else if (strchr(pname, *os::path_separator()) != NULL) { 1220 int n; 1221 char** pelements = split_path(pname, &n); 1222 if (pelements == NULL) { 1223 return false; 1224 } 1225 for (int i = 0 ; i < n ; i++) { 1226 char* path = pelements[i]; 1227 // Really shouldn't be NULL, but check can't hurt 1228 size_t plen = (path == NULL) ? 0 : strlen(path); 1229 if (plen == 0) { 1230 continue; // skip the empty path values 1231 } 1232 const char lastchar = path[plen - 1]; 1233 if (lastchar == ':' || lastchar == '\\') { 1234 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1235 } else { 1236 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1237 } 1238 if (file_exists(buffer)) { 1239 retval = true; 1240 break; 1241 } 1242 } 1243 // release the storage 1244 for (int i = 0 ; i < n ; i++) { 1245 if (pelements[i] != NULL) { 1246 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1247 } 1248 } 1249 if (pelements != NULL) { 1250 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1251 } 1252 } else { 1253 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1254 retval = true; 1255 } 1256 return retval; 1257 } 1258 1259 // Needs to be in os specific directory because windows requires another 1260 // header file <direct.h> 1261 const char* os::get_current_directory(char *buf, size_t buflen) { 1262 int n = static_cast<int>(buflen); 1263 if (buflen > INT_MAX) n = INT_MAX; 1264 return _getcwd(buf, n); 1265 } 1266 1267 //----------------------------------------------------------- 1268 // Helper functions for fatal error handler 1269 #ifdef _WIN64 1270 // Helper routine which returns true if address in 1271 // within the NTDLL address space. 1272 // 1273 static bool _addr_in_ntdll( address addr ) 1274 { 1275 HMODULE hmod; 1276 MODULEINFO minfo; 1277 1278 hmod = GetModuleHandle("NTDLL.DLL"); 1279 if ( hmod == NULL ) return false; 1280 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1281 &minfo, sizeof(MODULEINFO)) ) 1282 return false; 1283 1284 if ( (addr >= minfo.lpBaseOfDll) && 1285 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1286 return true; 1287 else 1288 return false; 1289 } 1290 #endif 1291 1292 1293 // Enumerate all modules for a given process ID 1294 // 1295 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1296 // different API for doing this. We use PSAPI.DLL on NT based 1297 // Windows and ToolHelp on 95/98/Me. 1298 1299 // Callback function that is called by enumerate_modules() on 1300 // every DLL module. 1301 // Input parameters: 1302 // int pid, 1303 // char* module_file_name, 1304 // address module_base_addr, 1305 // unsigned module_size, 1306 // void* param 1307 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1308 1309 // enumerate_modules for Windows NT, using PSAPI 1310 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1311 { 1312 HANDLE hProcess ; 1313 1314 # define MAX_NUM_MODULES 128 1315 HMODULE modules[MAX_NUM_MODULES]; 1316 static char filename[ MAX_PATH ]; 1317 int result = 0; 1318 1319 if (!os::PSApiDll::PSApiAvailable()) { 1320 return 0; 1321 } 1322 1323 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1324 FALSE, pid ) ; 1325 if (hProcess == NULL) return 0; 1326 1327 DWORD size_needed; 1328 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1329 sizeof(modules), &size_needed)) { 1330 CloseHandle( hProcess ); 1331 return 0; 1332 } 1333 1334 // number of modules that are currently loaded 1335 int num_modules = size_needed / sizeof(HMODULE); 1336 1337 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1338 // Get Full pathname: 1339 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1340 filename, sizeof(filename))) { 1341 filename[0] = '\0'; 1342 } 1343 1344 MODULEINFO modinfo; 1345 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1346 &modinfo, sizeof(modinfo))) { 1347 modinfo.lpBaseOfDll = NULL; 1348 modinfo.SizeOfImage = 0; 1349 } 1350 1351 // Invoke callback function 1352 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1353 modinfo.SizeOfImage, param); 1354 if (result) break; 1355 } 1356 1357 CloseHandle( hProcess ) ; 1358 return result; 1359 } 1360 1361 1362 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1363 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1364 { 1365 HANDLE hSnapShot ; 1366 static MODULEENTRY32 modentry ; 1367 int result = 0; 1368 1369 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1370 return 0; 1371 } 1372 1373 // Get a handle to a Toolhelp snapshot of the system 1374 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; 1375 if( hSnapShot == INVALID_HANDLE_VALUE ) { 1376 return FALSE ; 1377 } 1378 1379 // iterate through all modules 1380 modentry.dwSize = sizeof(MODULEENTRY32) ; 1381 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1382 1383 while( not_done ) { 1384 // invoke the callback 1385 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1386 modentry.modBaseSize, param); 1387 if (result) break; 1388 1389 modentry.dwSize = sizeof(MODULEENTRY32) ; 1390 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1391 } 1392 1393 CloseHandle(hSnapShot); 1394 return result; 1395 } 1396 1397 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1398 { 1399 // Get current process ID if caller doesn't provide it. 1400 if (!pid) pid = os::current_process_id(); 1401 1402 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1403 else return _enumerate_modules_windows(pid, func, param); 1404 } 1405 1406 struct _modinfo { 1407 address addr; 1408 char* full_path; // point to a char buffer 1409 int buflen; // size of the buffer 1410 address base_addr; 1411 }; 1412 1413 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1414 unsigned size, void * param) { 1415 struct _modinfo *pmod = (struct _modinfo *)param; 1416 if (!pmod) return -1; 1417 1418 if (base_addr <= pmod->addr && 1419 base_addr+size > pmod->addr) { 1420 // if a buffer is provided, copy path name to the buffer 1421 if (pmod->full_path) { 1422 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1423 } 1424 pmod->base_addr = base_addr; 1425 return 1; 1426 } 1427 return 0; 1428 } 1429 1430 bool os::dll_address_to_library_name(address addr, char* buf, 1431 int buflen, int* offset) { 1432 // buf is not optional, but offset is optional 1433 assert(buf != NULL, "sanity check"); 1434 1435 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1436 // return the full path to the DLL file, sometimes it returns path 1437 // to the corresponding PDB file (debug info); sometimes it only 1438 // returns partial path, which makes life painful. 1439 1440 struct _modinfo mi; 1441 mi.addr = addr; 1442 mi.full_path = buf; 1443 mi.buflen = buflen; 1444 int pid = os::current_process_id(); 1445 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1446 // buf already contains path name 1447 if (offset) *offset = addr - mi.base_addr; 1448 return true; 1449 } 1450 1451 buf[0] = '\0'; 1452 if (offset) *offset = -1; 1453 return false; 1454 } 1455 1456 bool os::dll_address_to_function_name(address addr, char *buf, 1457 int buflen, int *offset) { 1458 // buf is not optional, but offset is optional 1459 assert(buf != NULL, "sanity check"); 1460 1461 if (Decoder::decode(addr, buf, buflen, offset)) { 1462 return true; 1463 } 1464 if (offset != NULL) *offset = -1; 1465 buf[0] = '\0'; 1466 return false; 1467 } 1468 1469 // save the start and end address of jvm.dll into param[0] and param[1] 1470 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1471 unsigned size, void * param) { 1472 if (!param) return -1; 1473 1474 if (base_addr <= (address)_locate_jvm_dll && 1475 base_addr+size > (address)_locate_jvm_dll) { 1476 ((address*)param)[0] = base_addr; 1477 ((address*)param)[1] = base_addr + size; 1478 return 1; 1479 } 1480 return 0; 1481 } 1482 1483 address vm_lib_location[2]; // start and end address of jvm.dll 1484 1485 // check if addr is inside jvm.dll 1486 bool os::address_is_in_vm(address addr) { 1487 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1488 int pid = os::current_process_id(); 1489 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1490 assert(false, "Can't find jvm module."); 1491 return false; 1492 } 1493 } 1494 1495 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1496 } 1497 1498 // print module info; param is outputStream* 1499 static int _print_module(int pid, char* fname, address base, 1500 unsigned size, void* param) { 1501 if (!param) return -1; 1502 1503 outputStream* st = (outputStream*)param; 1504 1505 address end_addr = base + size; 1506 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1507 return 0; 1508 } 1509 1510 // Loads .dll/.so and 1511 // in case of error it checks if .dll/.so was built for the 1512 // same architecture as Hotspot is running on 1513 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1514 { 1515 void * result = LoadLibrary(name); 1516 if (result != NULL) 1517 { 1518 return result; 1519 } 1520 1521 DWORD errcode = GetLastError(); 1522 if (errcode == ERROR_MOD_NOT_FOUND) { 1523 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1524 ebuf[ebuflen-1]='\0'; 1525 return NULL; 1526 } 1527 1528 // Parsing dll below 1529 // If we can read dll-info and find that dll was built 1530 // for an architecture other than Hotspot is running in 1531 // - then print to buffer "DLL was built for a different architecture" 1532 // else call os::lasterror to obtain system error message 1533 1534 // Read system error message into ebuf 1535 // It may or may not be overwritten below (in the for loop and just above) 1536 lasterror(ebuf, (size_t) ebuflen); 1537 ebuf[ebuflen-1]='\0'; 1538 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1539 if (file_descriptor<0) 1540 { 1541 return NULL; 1542 } 1543 1544 uint32_t signature_offset; 1545 uint16_t lib_arch=0; 1546 bool failed_to_get_lib_arch= 1547 ( 1548 //Go to position 3c in the dll 1549 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1550 || 1551 // Read loacation of signature 1552 (sizeof(signature_offset)!= 1553 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1554 || 1555 //Go to COFF File Header in dll 1556 //that is located after"signature" (4 bytes long) 1557 (os::seek_to_file_offset(file_descriptor, 1558 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1559 || 1560 //Read field that contains code of architecture 1561 // that dll was build for 1562 (sizeof(lib_arch)!= 1563 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1564 ); 1565 1566 ::close(file_descriptor); 1567 if (failed_to_get_lib_arch) 1568 { 1569 // file i/o error - report os::lasterror(...) msg 1570 return NULL; 1571 } 1572 1573 typedef struct 1574 { 1575 uint16_t arch_code; 1576 char* arch_name; 1577 } arch_t; 1578 1579 static const arch_t arch_array[]={ 1580 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1581 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1582 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1583 }; 1584 #if (defined _M_IA64) 1585 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1586 #elif (defined _M_AMD64) 1587 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1588 #elif (defined _M_IX86) 1589 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1590 #else 1591 #error Method os::dll_load requires that one of following \ 1592 is defined :_M_IA64,_M_AMD64 or _M_IX86 1593 #endif 1594 1595 1596 // Obtain a string for printf operation 1597 // lib_arch_str shall contain string what platform this .dll was built for 1598 // running_arch_str shall string contain what platform Hotspot was built for 1599 char *running_arch_str=NULL,*lib_arch_str=NULL; 1600 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1601 { 1602 if (lib_arch==arch_array[i].arch_code) 1603 lib_arch_str=arch_array[i].arch_name; 1604 if (running_arch==arch_array[i].arch_code) 1605 running_arch_str=arch_array[i].arch_name; 1606 } 1607 1608 assert(running_arch_str, 1609 "Didn't find runing architecture code in arch_array"); 1610 1611 // If the architure is right 1612 // but some other error took place - report os::lasterror(...) msg 1613 if (lib_arch == running_arch) 1614 { 1615 return NULL; 1616 } 1617 1618 if (lib_arch_str!=NULL) 1619 { 1620 ::_snprintf(ebuf, ebuflen-1, 1621 "Can't load %s-bit .dll on a %s-bit platform", 1622 lib_arch_str,running_arch_str); 1623 } 1624 else 1625 { 1626 // don't know what architecture this dll was build for 1627 ::_snprintf(ebuf, ebuflen-1, 1628 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1629 lib_arch,running_arch_str); 1630 } 1631 1632 return NULL; 1633 } 1634 1635 1636 void os::print_dll_info(outputStream *st) { 1637 int pid = os::current_process_id(); 1638 st->print_cr("Dynamic libraries:"); 1639 enumerate_modules(pid, _print_module, (void *)st); 1640 } 1641 1642 void os::print_os_info_brief(outputStream* st) { 1643 os::print_os_info(st); 1644 } 1645 1646 void os::print_os_info(outputStream* st) { 1647 st->print("OS:"); 1648 1649 os::win32::print_windows_version(st); 1650 } 1651 1652 void os::win32::print_windows_version(outputStream* st) { 1653 OSVERSIONINFOEX osvi; 1654 SYSTEM_INFO si; 1655 1656 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1657 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1658 1659 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1660 st->print_cr("N/A"); 1661 return; 1662 } 1663 1664 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; 1665 1666 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1667 if (os_vers >= 5002) { 1668 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1669 // find out whether we are running on 64 bit processor or not. 1670 if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) { 1671 os::Kernel32Dll::GetNativeSystemInfo(&si); 1672 } else { 1673 GetSystemInfo(&si); 1674 } 1675 } 1676 1677 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { 1678 switch (os_vers) { 1679 case 3051: st->print(" Windows NT 3.51"); break; 1680 case 4000: st->print(" Windows NT 4.0"); break; 1681 case 5000: st->print(" Windows 2000"); break; 1682 case 5001: st->print(" Windows XP"); break; 1683 case 5002: 1684 if (osvi.wProductType == VER_NT_WORKSTATION && 1685 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1686 st->print(" Windows XP x64 Edition"); 1687 } else { 1688 st->print(" Windows Server 2003 family"); 1689 } 1690 break; 1691 1692 case 6000: 1693 if (osvi.wProductType == VER_NT_WORKSTATION) { 1694 st->print(" Windows Vista"); 1695 } else { 1696 st->print(" Windows Server 2008"); 1697 } 1698 break; 1699 1700 case 6001: 1701 if (osvi.wProductType == VER_NT_WORKSTATION) { 1702 st->print(" Windows 7"); 1703 } else { 1704 st->print(" Windows Server 2008 R2"); 1705 } 1706 break; 1707 1708 case 6002: 1709 if (osvi.wProductType == VER_NT_WORKSTATION) { 1710 st->print(" Windows 8"); 1711 } else { 1712 st->print(" Windows Server 2012"); 1713 } 1714 break; 1715 1716 case 6003: 1717 if (osvi.wProductType == VER_NT_WORKSTATION) { 1718 st->print(" Windows 8.1"); 1719 } else { 1720 st->print(" Windows Server 2012 R2"); 1721 } 1722 break; 1723 1724 default: // future os 1725 // Unrecognized windows, print out its major and minor versions 1726 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1727 } 1728 } else { 1729 switch (os_vers) { 1730 case 4000: st->print(" Windows 95"); break; 1731 case 4010: st->print(" Windows 98"); break; 1732 case 4090: st->print(" Windows Me"); break; 1733 default: // future windows, print out its major and minor versions 1734 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1735 } 1736 } 1737 1738 if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1739 st->print(" , 64 bit"); 1740 } 1741 1742 st->print(" Build %d", osvi.dwBuildNumber); 1743 st->print(" %s", osvi.szCSDVersion); // service pack 1744 st->cr(); 1745 } 1746 1747 void os::pd_print_cpu_info(outputStream* st) { 1748 // Nothing to do for now. 1749 } 1750 1751 void os::print_memory_info(outputStream* st) { 1752 st->print("Memory:"); 1753 st->print(" %dk page", os::vm_page_size()>>10); 1754 1755 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1756 // value if total memory is larger than 4GB 1757 MEMORYSTATUSEX ms; 1758 ms.dwLength = sizeof(ms); 1759 GlobalMemoryStatusEx(&ms); 1760 1761 st->print(", physical %uk", os::physical_memory() >> 10); 1762 st->print("(%uk free)", os::available_memory() >> 10); 1763 1764 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1765 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1766 st->cr(); 1767 } 1768 1769 void os::print_siginfo(outputStream *st, void *siginfo) { 1770 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1771 st->print("siginfo:"); 1772 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1773 1774 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1775 er->NumberParameters >= 2) { 1776 switch (er->ExceptionInformation[0]) { 1777 case 0: st->print(", reading address"); break; 1778 case 1: st->print(", writing address"); break; 1779 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1780 er->ExceptionInformation[0]); 1781 } 1782 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1783 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1784 er->NumberParameters >= 2 && UseSharedSpaces) { 1785 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1786 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1787 st->print("\n\nError accessing class data sharing archive." \ 1788 " Mapped file inaccessible during execution, " \ 1789 " possible disk/network problem."); 1790 } 1791 } else { 1792 int num = er->NumberParameters; 1793 if (num > 0) { 1794 st->print(", ExceptionInformation="); 1795 for (int i = 0; i < num; i++) { 1796 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1797 } 1798 } 1799 } 1800 st->cr(); 1801 } 1802 1803 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1804 // do nothing 1805 } 1806 1807 static char saved_jvm_path[MAX_PATH] = {0}; 1808 1809 // Find the full path to the current module, jvm.dll 1810 void os::jvm_path(char *buf, jint buflen) { 1811 // Error checking. 1812 if (buflen < MAX_PATH) { 1813 assert(false, "must use a large-enough buffer"); 1814 buf[0] = '\0'; 1815 return; 1816 } 1817 // Lazy resolve the path to current module. 1818 if (saved_jvm_path[0] != 0) { 1819 strcpy(buf, saved_jvm_path); 1820 return; 1821 } 1822 1823 buf[0] = '\0'; 1824 if (Arguments::created_by_gamma_launcher()) { 1825 // Support for the gamma launcher. Check for an 1826 // JAVA_HOME environment variable 1827 // and fix up the path so it looks like 1828 // libjvm.so is installed there (append a fake suffix 1829 // hotspot/libjvm.so). 1830 char* java_home_var = ::getenv("JAVA_HOME"); 1831 if (java_home_var != NULL && java_home_var[0] != 0 && 1832 strlen(java_home_var) < (size_t)buflen) { 1833 1834 strncpy(buf, java_home_var, buflen); 1835 1836 // determine if this is a legacy image or modules image 1837 // modules image doesn't have "jre" subdirectory 1838 size_t len = strlen(buf); 1839 char* jrebin_p = buf + len; 1840 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1841 if (0 != _access(buf, 0)) { 1842 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1843 } 1844 len = strlen(buf); 1845 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1846 } 1847 } 1848 1849 if(buf[0] == '\0') { 1850 GetModuleFileName(vm_lib_handle, buf, buflen); 1851 } 1852 strncpy(saved_jvm_path, buf, MAX_PATH); 1853 } 1854 1855 1856 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1857 #ifndef _WIN64 1858 st->print("_"); 1859 #endif 1860 } 1861 1862 1863 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1864 #ifndef _WIN64 1865 st->print("@%d", args_size * sizeof(int)); 1866 #endif 1867 } 1868 1869 // This method is a copy of JDK's sysGetLastErrorString 1870 // from src/windows/hpi/src/system_md.c 1871 1872 size_t os::lasterror(char* buf, size_t len) { 1873 DWORD errval; 1874 1875 if ((errval = GetLastError()) != 0) { 1876 // DOS error 1877 size_t n = (size_t)FormatMessage( 1878 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1879 NULL, 1880 errval, 1881 0, 1882 buf, 1883 (DWORD)len, 1884 NULL); 1885 if (n > 3) { 1886 // Drop final '.', CR, LF 1887 if (buf[n - 1] == '\n') n--; 1888 if (buf[n - 1] == '\r') n--; 1889 if (buf[n - 1] == '.') n--; 1890 buf[n] = '\0'; 1891 } 1892 return n; 1893 } 1894 1895 if (errno != 0) { 1896 // C runtime error that has no corresponding DOS error code 1897 const char* s = strerror(errno); 1898 size_t n = strlen(s); 1899 if (n >= len) n = len - 1; 1900 strncpy(buf, s, n); 1901 buf[n] = '\0'; 1902 return n; 1903 } 1904 1905 return 0; 1906 } 1907 1908 int os::get_last_error() { 1909 DWORD error = GetLastError(); 1910 if (error == 0) 1911 error = errno; 1912 return (int)error; 1913 } 1914 1915 // sun.misc.Signal 1916 // NOTE that this is a workaround for an apparent kernel bug where if 1917 // a signal handler for SIGBREAK is installed then that signal handler 1918 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1919 // See bug 4416763. 1920 static void (*sigbreakHandler)(int) = NULL; 1921 1922 static void UserHandler(int sig, void *siginfo, void *context) { 1923 os::signal_notify(sig); 1924 // We need to reinstate the signal handler each time... 1925 os::signal(sig, (void*)UserHandler); 1926 } 1927 1928 void* os::user_handler() { 1929 return (void*) UserHandler; 1930 } 1931 1932 void* os::signal(int signal_number, void* handler) { 1933 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1934 void (*oldHandler)(int) = sigbreakHandler; 1935 sigbreakHandler = (void (*)(int)) handler; 1936 return (void*) oldHandler; 1937 } else { 1938 return (void*)::signal(signal_number, (void (*)(int))handler); 1939 } 1940 } 1941 1942 void os::signal_raise(int signal_number) { 1943 raise(signal_number); 1944 } 1945 1946 // The Win32 C runtime library maps all console control events other than ^C 1947 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1948 // logoff, and shutdown events. We therefore install our own console handler 1949 // that raises SIGTERM for the latter cases. 1950 // 1951 static BOOL WINAPI consoleHandler(DWORD event) { 1952 switch(event) { 1953 case CTRL_C_EVENT: 1954 if (is_error_reported()) { 1955 // Ctrl-C is pressed during error reporting, likely because the error 1956 // handler fails to abort. Let VM die immediately. 1957 os::die(); 1958 } 1959 1960 os::signal_raise(SIGINT); 1961 return TRUE; 1962 break; 1963 case CTRL_BREAK_EVENT: 1964 if (sigbreakHandler != NULL) { 1965 (*sigbreakHandler)(SIGBREAK); 1966 } 1967 return TRUE; 1968 break; 1969 case CTRL_LOGOFF_EVENT: { 1970 // Don't terminate JVM if it is running in a non-interactive session, 1971 // such as a service process. 1972 USEROBJECTFLAGS flags; 1973 HANDLE handle = GetProcessWindowStation(); 1974 if (handle != NULL && 1975 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1976 sizeof( USEROBJECTFLAGS), NULL)) { 1977 // If it is a non-interactive session, let next handler to deal 1978 // with it. 1979 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1980 return FALSE; 1981 } 1982 } 1983 } 1984 case CTRL_CLOSE_EVENT: 1985 case CTRL_SHUTDOWN_EVENT: 1986 os::signal_raise(SIGTERM); 1987 return TRUE; 1988 break; 1989 default: 1990 break; 1991 } 1992 return FALSE; 1993 } 1994 1995 /* 1996 * The following code is moved from os.cpp for making this 1997 * code platform specific, which it is by its very nature. 1998 */ 1999 2000 // Return maximum OS signal used + 1 for internal use only 2001 // Used as exit signal for signal_thread 2002 int os::sigexitnum_pd(){ 2003 return NSIG; 2004 } 2005 2006 // a counter for each possible signal value, including signal_thread exit signal 2007 static volatile jint pending_signals[NSIG+1] = { 0 }; 2008 static HANDLE sig_sem = NULL; 2009 2010 void os::signal_init_pd() { 2011 // Initialize signal structures 2012 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2013 2014 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2015 2016 // Programs embedding the VM do not want it to attempt to receive 2017 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2018 // shutdown hooks mechanism introduced in 1.3. For example, when 2019 // the VM is run as part of a Windows NT service (i.e., a servlet 2020 // engine in a web server), the correct behavior is for any console 2021 // control handler to return FALSE, not TRUE, because the OS's 2022 // "final" handler for such events allows the process to continue if 2023 // it is a service (while terminating it if it is not a service). 2024 // To make this behavior uniform and the mechanism simpler, we 2025 // completely disable the VM's usage of these console events if -Xrs 2026 // (=ReduceSignalUsage) is specified. This means, for example, that 2027 // the CTRL-BREAK thread dump mechanism is also disabled in this 2028 // case. See bugs 4323062, 4345157, and related bugs. 2029 2030 if (!ReduceSignalUsage) { 2031 // Add a CTRL-C handler 2032 SetConsoleCtrlHandler(consoleHandler, TRUE); 2033 } 2034 } 2035 2036 void os::signal_notify(int signal_number) { 2037 BOOL ret; 2038 if (sig_sem != NULL) { 2039 Atomic::inc(&pending_signals[signal_number]); 2040 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2041 assert(ret != 0, "ReleaseSemaphore() failed"); 2042 } 2043 } 2044 2045 static int check_pending_signals(bool wait_for_signal) { 2046 DWORD ret; 2047 while (true) { 2048 for (int i = 0; i < NSIG + 1; i++) { 2049 jint n = pending_signals[i]; 2050 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2051 return i; 2052 } 2053 } 2054 if (!wait_for_signal) { 2055 return -1; 2056 } 2057 2058 JavaThread *thread = JavaThread::current(); 2059 2060 ThreadBlockInVM tbivm(thread); 2061 2062 bool threadIsSuspended; 2063 do { 2064 thread->set_suspend_equivalent(); 2065 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2066 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2067 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2068 2069 // were we externally suspended while we were waiting? 2070 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2071 if (threadIsSuspended) { 2072 // 2073 // The semaphore has been incremented, but while we were waiting 2074 // another thread suspended us. We don't want to continue running 2075 // while suspended because that would surprise the thread that 2076 // suspended us. 2077 // 2078 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2079 assert(ret != 0, "ReleaseSemaphore() failed"); 2080 2081 thread->java_suspend_self(); 2082 } 2083 } while (threadIsSuspended); 2084 } 2085 } 2086 2087 int os::signal_lookup() { 2088 return check_pending_signals(false); 2089 } 2090 2091 int os::signal_wait() { 2092 return check_pending_signals(true); 2093 } 2094 2095 // Implicit OS exception handling 2096 2097 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2098 JavaThread* thread = JavaThread::current(); 2099 // Save pc in thread 2100 #ifdef _M_IA64 2101 // Do not blow up if no thread info available. 2102 if (thread) { 2103 // Saving PRECISE pc (with slot information) in thread. 2104 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2105 // Convert precise PC into "Unix" format 2106 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2107 thread->set_saved_exception_pc((address)precise_pc); 2108 } 2109 // Set pc to handler 2110 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2111 // Clear out psr.ri (= Restart Instruction) in order to continue 2112 // at the beginning of the target bundle. 2113 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2114 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2115 #elif _M_AMD64 2116 // Do not blow up if no thread info available. 2117 if (thread) { 2118 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2119 } 2120 // Set pc to handler 2121 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2122 #else 2123 // Do not blow up if no thread info available. 2124 if (thread) { 2125 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2126 } 2127 // Set pc to handler 2128 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2129 #endif 2130 2131 // Continue the execution 2132 return EXCEPTION_CONTINUE_EXECUTION; 2133 } 2134 2135 2136 // Used for PostMortemDump 2137 extern "C" void safepoints(); 2138 extern "C" void find(int x); 2139 extern "C" void events(); 2140 2141 // According to Windows API documentation, an illegal instruction sequence should generate 2142 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2143 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2144 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2145 2146 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2147 2148 // From "Execution Protection in the Windows Operating System" draft 0.35 2149 // Once a system header becomes available, the "real" define should be 2150 // included or copied here. 2151 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2152 2153 // Handle NAT Bit consumption on IA64. 2154 #ifdef _M_IA64 2155 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2156 #endif 2157 2158 // Windows Vista/2008 heap corruption check 2159 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2160 2161 #define def_excpt(val) #val, val 2162 2163 struct siglabel { 2164 char *name; 2165 int number; 2166 }; 2167 2168 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2169 // C++ compiler contain this error code. Because this is a compiler-generated 2170 // error, the code is not listed in the Win32 API header files. 2171 // The code is actually a cryptic mnemonic device, with the initial "E" 2172 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2173 // ASCII values of "msc". 2174 2175 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2176 2177 2178 struct siglabel exceptlabels[] = { 2179 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2180 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2181 def_excpt(EXCEPTION_BREAKPOINT), 2182 def_excpt(EXCEPTION_SINGLE_STEP), 2183 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2184 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2185 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2186 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2187 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2188 def_excpt(EXCEPTION_FLT_OVERFLOW), 2189 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2190 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2191 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2192 def_excpt(EXCEPTION_INT_OVERFLOW), 2193 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2194 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2195 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2196 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2197 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2198 def_excpt(EXCEPTION_STACK_OVERFLOW), 2199 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2200 def_excpt(EXCEPTION_GUARD_PAGE), 2201 def_excpt(EXCEPTION_INVALID_HANDLE), 2202 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2203 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2204 #ifdef _M_IA64 2205 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2206 #endif 2207 NULL, 0 2208 }; 2209 2210 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2211 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2212 if (exceptlabels[i].number == exception_code) { 2213 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2214 return buf; 2215 } 2216 } 2217 2218 return NULL; 2219 } 2220 2221 //----------------------------------------------------------------------------- 2222 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2223 // handle exception caused by idiv; should only happen for -MinInt/-1 2224 // (division by zero is handled explicitly) 2225 #ifdef _M_IA64 2226 assert(0, "Fix Handle_IDiv_Exception"); 2227 #elif _M_AMD64 2228 PCONTEXT ctx = exceptionInfo->ContextRecord; 2229 address pc = (address)ctx->Rip; 2230 assert(pc[0] == 0xF7, "not an idiv opcode"); 2231 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2232 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2233 // set correct result values and continue after idiv instruction 2234 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2235 ctx->Rax = (DWORD)min_jint; // result 2236 ctx->Rdx = (DWORD)0; // remainder 2237 // Continue the execution 2238 #else 2239 PCONTEXT ctx = exceptionInfo->ContextRecord; 2240 address pc = (address)ctx->Eip; 2241 assert(pc[0] == 0xF7, "not an idiv opcode"); 2242 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2243 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2244 // set correct result values and continue after idiv instruction 2245 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2246 ctx->Eax = (DWORD)min_jint; // result 2247 ctx->Edx = (DWORD)0; // remainder 2248 // Continue the execution 2249 #endif 2250 return EXCEPTION_CONTINUE_EXECUTION; 2251 } 2252 2253 #ifndef _WIN64 2254 //----------------------------------------------------------------------------- 2255 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2256 // handle exception caused by native method modifying control word 2257 PCONTEXT ctx = exceptionInfo->ContextRecord; 2258 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2259 2260 switch (exception_code) { 2261 case EXCEPTION_FLT_DENORMAL_OPERAND: 2262 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2263 case EXCEPTION_FLT_INEXACT_RESULT: 2264 case EXCEPTION_FLT_INVALID_OPERATION: 2265 case EXCEPTION_FLT_OVERFLOW: 2266 case EXCEPTION_FLT_STACK_CHECK: 2267 case EXCEPTION_FLT_UNDERFLOW: 2268 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2269 if (fp_control_word != ctx->FloatSave.ControlWord) { 2270 // Restore FPCW and mask out FLT exceptions 2271 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2272 // Mask out pending FLT exceptions 2273 ctx->FloatSave.StatusWord &= 0xffffff00; 2274 return EXCEPTION_CONTINUE_EXECUTION; 2275 } 2276 } 2277 2278 if (prev_uef_handler != NULL) { 2279 // We didn't handle this exception so pass it to the previous 2280 // UnhandledExceptionFilter. 2281 return (prev_uef_handler)(exceptionInfo); 2282 } 2283 2284 return EXCEPTION_CONTINUE_SEARCH; 2285 } 2286 #else //_WIN64 2287 /* 2288 On Windows, the mxcsr control bits are non-volatile across calls 2289 See also CR 6192333 2290 If EXCEPTION_FLT_* happened after some native method modified 2291 mxcsr - it is not a jvm fault. 2292 However should we decide to restore of mxcsr after a faulty 2293 native method we can uncomment following code 2294 jint MxCsr = INITIAL_MXCSR; 2295 // we can't use StubRoutines::addr_mxcsr_std() 2296 // because in Win64 mxcsr is not saved there 2297 if (MxCsr != ctx->MxCsr) { 2298 ctx->MxCsr = MxCsr; 2299 return EXCEPTION_CONTINUE_EXECUTION; 2300 } 2301 2302 */ 2303 #endif // _WIN64 2304 2305 2306 static inline void report_error(Thread* t, DWORD exception_code, 2307 address addr, void* siginfo, void* context) { 2308 VMError err(t, exception_code, addr, siginfo, context); 2309 err.report_and_die(); 2310 2311 // If UseOsErrorReporting, this will return here and save the error file 2312 // somewhere where we can find it in the minidump. 2313 } 2314 2315 //----------------------------------------------------------------------------- 2316 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2317 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2318 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2319 #ifdef _M_IA64 2320 // On Itanium, we need the "precise pc", which has the slot number coded 2321 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2322 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2323 // Convert the pc to "Unix format", which has the slot number coded 2324 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2325 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2326 // information is saved in the Unix format. 2327 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2328 #elif _M_AMD64 2329 address pc = (address) exceptionInfo->ContextRecord->Rip; 2330 #else 2331 address pc = (address) exceptionInfo->ContextRecord->Eip; 2332 #endif 2333 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2334 2335 // Handle SafeFetch32 and SafeFetchN exceptions. 2336 if (StubRoutines::is_safefetch_fault(pc)) { 2337 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2338 } 2339 2340 #ifndef _WIN64 2341 // Execution protection violation - win32 running on AMD64 only 2342 // Handled first to avoid misdiagnosis as a "normal" access violation; 2343 // This is safe to do because we have a new/unique ExceptionInformation 2344 // code for this condition. 2345 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2346 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2347 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2348 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2349 2350 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2351 int page_size = os::vm_page_size(); 2352 2353 // Make sure the pc and the faulting address are sane. 2354 // 2355 // If an instruction spans a page boundary, and the page containing 2356 // the beginning of the instruction is executable but the following 2357 // page is not, the pc and the faulting address might be slightly 2358 // different - we still want to unguard the 2nd page in this case. 2359 // 2360 // 15 bytes seems to be a (very) safe value for max instruction size. 2361 bool pc_is_near_addr = 2362 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2363 bool instr_spans_page_boundary = 2364 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2365 (intptr_t) page_size) > 0); 2366 2367 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2368 static volatile address last_addr = 2369 (address) os::non_memory_address_word(); 2370 2371 // In conservative mode, don't unguard unless the address is in the VM 2372 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2373 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2374 2375 // Set memory to RWX and retry 2376 address page_start = 2377 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2378 bool res = os::protect_memory((char*) page_start, page_size, 2379 os::MEM_PROT_RWX); 2380 2381 if (PrintMiscellaneous && Verbose) { 2382 char buf[256]; 2383 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2384 "at " INTPTR_FORMAT 2385 ", unguarding " INTPTR_FORMAT ": %s", addr, 2386 page_start, (res ? "success" : strerror(errno))); 2387 tty->print_raw_cr(buf); 2388 } 2389 2390 // Set last_addr so if we fault again at the same address, we don't 2391 // end up in an endless loop. 2392 // 2393 // There are two potential complications here. Two threads trapping 2394 // at the same address at the same time could cause one of the 2395 // threads to think it already unguarded, and abort the VM. Likely 2396 // very rare. 2397 // 2398 // The other race involves two threads alternately trapping at 2399 // different addresses and failing to unguard the page, resulting in 2400 // an endless loop. This condition is probably even more unlikely 2401 // than the first. 2402 // 2403 // Although both cases could be avoided by using locks or thread 2404 // local last_addr, these solutions are unnecessary complication: 2405 // this handler is a best-effort safety net, not a complete solution. 2406 // It is disabled by default and should only be used as a workaround 2407 // in case we missed any no-execute-unsafe VM code. 2408 2409 last_addr = addr; 2410 2411 return EXCEPTION_CONTINUE_EXECUTION; 2412 } 2413 } 2414 2415 // Last unguard failed or not unguarding 2416 tty->print_raw_cr("Execution protection violation"); 2417 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2418 exceptionInfo->ContextRecord); 2419 return EXCEPTION_CONTINUE_SEARCH; 2420 } 2421 } 2422 #endif // _WIN64 2423 2424 // Check to see if we caught the safepoint code in the 2425 // process of write protecting the memory serialization page. 2426 // It write enables the page immediately after protecting it 2427 // so just return. 2428 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 2429 JavaThread* thread = (JavaThread*) t; 2430 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2431 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2432 if ( os::is_memory_serialize_page(thread, addr) ) { 2433 // Block current thread until the memory serialize page permission restored. 2434 os::block_on_serialize_page_trap(); 2435 return EXCEPTION_CONTINUE_EXECUTION; 2436 } 2437 } 2438 2439 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2440 VM_Version::is_cpuinfo_segv_addr(pc)) { 2441 // Verify that OS save/restore AVX registers. 2442 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2443 } 2444 2445 if (t != NULL && t->is_Java_thread()) { 2446 JavaThread* thread = (JavaThread*) t; 2447 bool in_java = thread->thread_state() == _thread_in_Java; 2448 2449 // Handle potential stack overflows up front. 2450 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2451 if (os::uses_stack_guard_pages()) { 2452 #ifdef _M_IA64 2453 // Use guard page for register stack. 2454 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2455 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2456 // Check for a register stack overflow on Itanium 2457 if (thread->addr_inside_register_stack_red_zone(addr)) { 2458 // Fatal red zone violation happens if the Java program 2459 // catches a StackOverflow error and does so much processing 2460 // that it runs beyond the unprotected yellow guard zone. As 2461 // a result, we are out of here. 2462 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2463 } else if(thread->addr_inside_register_stack(addr)) { 2464 // Disable the yellow zone which sets the state that 2465 // we've got a stack overflow problem. 2466 if (thread->stack_yellow_zone_enabled()) { 2467 thread->disable_stack_yellow_zone(); 2468 } 2469 // Give us some room to process the exception. 2470 thread->disable_register_stack_guard(); 2471 // Tracing with +Verbose. 2472 if (Verbose) { 2473 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2474 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2475 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2476 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2477 thread->register_stack_base(), 2478 thread->register_stack_base() + thread->stack_size()); 2479 } 2480 2481 // Reguard the permanent register stack red zone just to be sure. 2482 // We saw Windows silently disabling this without telling us. 2483 thread->enable_register_stack_red_zone(); 2484 2485 return Handle_Exception(exceptionInfo, 2486 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2487 } 2488 #endif 2489 if (thread->stack_yellow_zone_enabled()) { 2490 // Yellow zone violation. The o/s has unprotected the first yellow 2491 // zone page for us. Note: must call disable_stack_yellow_zone to 2492 // update the enabled status, even if the zone contains only one page. 2493 thread->disable_stack_yellow_zone(); 2494 // If not in java code, return and hope for the best. 2495 return in_java ? Handle_Exception(exceptionInfo, 2496 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2497 : EXCEPTION_CONTINUE_EXECUTION; 2498 } else { 2499 // Fatal red zone violation. 2500 thread->disable_stack_red_zone(); 2501 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2502 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2503 exceptionInfo->ContextRecord); 2504 return EXCEPTION_CONTINUE_SEARCH; 2505 } 2506 } else if (in_java) { 2507 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2508 // a one-time-only guard page, which it has released to us. The next 2509 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2510 return Handle_Exception(exceptionInfo, 2511 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2512 } else { 2513 // Can only return and hope for the best. Further stack growth will 2514 // result in an ACCESS_VIOLATION. 2515 return EXCEPTION_CONTINUE_EXECUTION; 2516 } 2517 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2518 // Either stack overflow or null pointer exception. 2519 if (in_java) { 2520 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2521 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2522 address stack_end = thread->stack_base() - thread->stack_size(); 2523 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2524 // Stack overflow. 2525 assert(!os::uses_stack_guard_pages(), 2526 "should be caught by red zone code above."); 2527 return Handle_Exception(exceptionInfo, 2528 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2529 } 2530 // 2531 // Check for safepoint polling and implicit null 2532 // We only expect null pointers in the stubs (vtable) 2533 // the rest are checked explicitly now. 2534 // 2535 CodeBlob* cb = CodeCache::find_blob(pc); 2536 if (cb != NULL) { 2537 if (os::is_poll_address(addr)) { 2538 address stub = SharedRuntime::get_poll_stub(pc); 2539 return Handle_Exception(exceptionInfo, stub); 2540 } 2541 } 2542 { 2543 #ifdef _WIN64 2544 // 2545 // If it's a legal stack address map the entire region in 2546 // 2547 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2548 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2549 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2550 addr = (address)((uintptr_t)addr & 2551 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2552 os::commit_memory((char *)addr, thread->stack_base() - addr, 2553 !ExecMem); 2554 return EXCEPTION_CONTINUE_EXECUTION; 2555 } 2556 else 2557 #endif 2558 { 2559 // Null pointer exception. 2560 #ifdef _M_IA64 2561 // Process implicit null checks in compiled code. Note: Implicit null checks 2562 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2563 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2564 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2565 // Handle implicit null check in UEP method entry 2566 if (cb && (cb->is_frame_complete_at(pc) || 2567 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2568 if (Verbose) { 2569 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2570 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2571 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2572 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2573 *(bundle_start + 1), *bundle_start); 2574 } 2575 return Handle_Exception(exceptionInfo, 2576 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2577 } 2578 } 2579 2580 // Implicit null checks were processed above. Hence, we should not reach 2581 // here in the usual case => die! 2582 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2583 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2584 exceptionInfo->ContextRecord); 2585 return EXCEPTION_CONTINUE_SEARCH; 2586 2587 #else // !IA64 2588 2589 // Windows 98 reports faulting addresses incorrectly 2590 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2591 !os::win32::is_nt()) { 2592 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2593 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2594 } 2595 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2596 exceptionInfo->ContextRecord); 2597 return EXCEPTION_CONTINUE_SEARCH; 2598 #endif 2599 } 2600 } 2601 } 2602 2603 #ifdef _WIN64 2604 // Special care for fast JNI field accessors. 2605 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2606 // in and the heap gets shrunk before the field access. 2607 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2608 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2609 if (addr != (address)-1) { 2610 return Handle_Exception(exceptionInfo, addr); 2611 } 2612 } 2613 #endif 2614 2615 // Stack overflow or null pointer exception in native code. 2616 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2617 exceptionInfo->ContextRecord); 2618 return EXCEPTION_CONTINUE_SEARCH; 2619 } // /EXCEPTION_ACCESS_VIOLATION 2620 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2621 #if defined _M_IA64 2622 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2623 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2624 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2625 2626 // Compiled method patched to be non entrant? Following conditions must apply: 2627 // 1. must be first instruction in bundle 2628 // 2. must be a break instruction with appropriate code 2629 if((((uint64_t) pc & 0x0F) == 0) && 2630 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2631 return Handle_Exception(exceptionInfo, 2632 (address)SharedRuntime::get_handle_wrong_method_stub()); 2633 } 2634 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2635 #endif 2636 2637 2638 if (in_java) { 2639 switch (exception_code) { 2640 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2641 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2642 2643 case EXCEPTION_INT_OVERFLOW: 2644 return Handle_IDiv_Exception(exceptionInfo); 2645 2646 } // switch 2647 } 2648 #ifndef _WIN64 2649 if (((thread->thread_state() == _thread_in_Java) || 2650 (thread->thread_state() == _thread_in_native)) && 2651 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2652 { 2653 LONG result=Handle_FLT_Exception(exceptionInfo); 2654 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2655 } 2656 #endif //_WIN64 2657 } 2658 2659 if (exception_code != EXCEPTION_BREAKPOINT) { 2660 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2661 exceptionInfo->ContextRecord); 2662 } 2663 return EXCEPTION_CONTINUE_SEARCH; 2664 } 2665 2666 #ifndef _WIN64 2667 // Special care for fast JNI accessors. 2668 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2669 // the heap gets shrunk before the field access. 2670 // Need to install our own structured exception handler since native code may 2671 // install its own. 2672 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2673 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2674 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2675 address pc = (address) exceptionInfo->ContextRecord->Eip; 2676 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2677 if (addr != (address)-1) { 2678 return Handle_Exception(exceptionInfo, addr); 2679 } 2680 } 2681 return EXCEPTION_CONTINUE_SEARCH; 2682 } 2683 2684 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2685 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2686 __try { \ 2687 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2688 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2689 } \ 2690 return 0; \ 2691 } 2692 2693 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2694 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2695 DEFINE_FAST_GETFIELD(jchar, char, Char) 2696 DEFINE_FAST_GETFIELD(jshort, short, Short) 2697 DEFINE_FAST_GETFIELD(jint, int, Int) 2698 DEFINE_FAST_GETFIELD(jlong, long, Long) 2699 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2700 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2701 2702 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2703 switch (type) { 2704 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2705 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2706 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2707 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2708 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2709 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2710 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2711 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2712 default: ShouldNotReachHere(); 2713 } 2714 return (address)-1; 2715 } 2716 #endif 2717 2718 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2719 // Install a win32 structured exception handler around the test 2720 // function call so the VM can generate an error dump if needed. 2721 __try { 2722 (*funcPtr)(); 2723 } __except(topLevelExceptionFilter( 2724 (_EXCEPTION_POINTERS*)_exception_info())) { 2725 // Nothing to do. 2726 } 2727 } 2728 2729 // Virtual Memory 2730 2731 int os::vm_page_size() { return os::win32::vm_page_size(); } 2732 int os::vm_allocation_granularity() { 2733 return os::win32::vm_allocation_granularity(); 2734 } 2735 2736 // Windows large page support is available on Windows 2003. In order to use 2737 // large page memory, the administrator must first assign additional privilege 2738 // to the user: 2739 // + select Control Panel -> Administrative Tools -> Local Security Policy 2740 // + select Local Policies -> User Rights Assignment 2741 // + double click "Lock pages in memory", add users and/or groups 2742 // + reboot 2743 // Note the above steps are needed for administrator as well, as administrators 2744 // by default do not have the privilege to lock pages in memory. 2745 // 2746 // Note about Windows 2003: although the API supports committing large page 2747 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2748 // scenario, I found through experiment it only uses large page if the entire 2749 // memory region is reserved and committed in a single VirtualAlloc() call. 2750 // This makes Windows large page support more or less like Solaris ISM, in 2751 // that the entire heap must be committed upfront. This probably will change 2752 // in the future, if so the code below needs to be revisited. 2753 2754 #ifndef MEM_LARGE_PAGES 2755 #define MEM_LARGE_PAGES 0x20000000 2756 #endif 2757 2758 static HANDLE _hProcess; 2759 static HANDLE _hToken; 2760 2761 // Container for NUMA node list info 2762 class NUMANodeListHolder { 2763 private: 2764 int *_numa_used_node_list; // allocated below 2765 int _numa_used_node_count; 2766 2767 void free_node_list() { 2768 if (_numa_used_node_list != NULL) { 2769 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2770 } 2771 } 2772 2773 public: 2774 NUMANodeListHolder() { 2775 _numa_used_node_count = 0; 2776 _numa_used_node_list = NULL; 2777 // do rest of initialization in build routine (after function pointers are set up) 2778 } 2779 2780 ~NUMANodeListHolder() { 2781 free_node_list(); 2782 } 2783 2784 bool build() { 2785 DWORD_PTR proc_aff_mask; 2786 DWORD_PTR sys_aff_mask; 2787 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2788 ULONG highest_node_number; 2789 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2790 free_node_list(); 2791 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2792 for (unsigned int i = 0; i <= highest_node_number; i++) { 2793 ULONGLONG proc_mask_numa_node; 2794 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2795 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2796 _numa_used_node_list[_numa_used_node_count++] = i; 2797 } 2798 } 2799 return (_numa_used_node_count > 1); 2800 } 2801 2802 int get_count() {return _numa_used_node_count;} 2803 int get_node_list_entry(int n) { 2804 // for indexes out of range, returns -1 2805 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2806 } 2807 2808 } numa_node_list_holder; 2809 2810 2811 2812 static size_t _large_page_size = 0; 2813 2814 static bool resolve_functions_for_large_page_init() { 2815 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2816 os::Advapi32Dll::AdvapiAvailable(); 2817 } 2818 2819 static bool request_lock_memory_privilege() { 2820 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2821 os::current_process_id()); 2822 2823 LUID luid; 2824 if (_hProcess != NULL && 2825 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2826 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2827 2828 TOKEN_PRIVILEGES tp; 2829 tp.PrivilegeCount = 1; 2830 tp.Privileges[0].Luid = luid; 2831 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2832 2833 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2834 // privilege. Check GetLastError() too. See MSDN document. 2835 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2836 (GetLastError() == ERROR_SUCCESS)) { 2837 return true; 2838 } 2839 } 2840 2841 return false; 2842 } 2843 2844 static void cleanup_after_large_page_init() { 2845 if (_hProcess) CloseHandle(_hProcess); 2846 _hProcess = NULL; 2847 if (_hToken) CloseHandle(_hToken); 2848 _hToken = NULL; 2849 } 2850 2851 static bool numa_interleaving_init() { 2852 bool success = false; 2853 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2854 2855 // print a warning if UseNUMAInterleaving flag is specified on command line 2856 bool warn_on_failure = use_numa_interleaving_specified; 2857 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2858 2859 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2860 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2861 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2862 2863 if (os::Kernel32Dll::NumaCallsAvailable()) { 2864 if (numa_node_list_holder.build()) { 2865 if (PrintMiscellaneous && Verbose) { 2866 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2867 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2868 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2869 } 2870 tty->print("\n"); 2871 } 2872 success = true; 2873 } else { 2874 WARN("Process does not cover multiple NUMA nodes."); 2875 } 2876 } else { 2877 WARN("NUMA Interleaving is not supported by the operating system."); 2878 } 2879 if (!success) { 2880 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2881 } 2882 return success; 2883 #undef WARN 2884 } 2885 2886 // this routine is used whenever we need to reserve a contiguous VA range 2887 // but we need to make separate VirtualAlloc calls for each piece of the range 2888 // Reasons for doing this: 2889 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2890 // * UseNUMAInterleaving requires a separate node for each piece 2891 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2892 bool should_inject_error=false) { 2893 char * p_buf; 2894 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2895 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2896 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2897 2898 // first reserve enough address space in advance since we want to be 2899 // able to break a single contiguous virtual address range into multiple 2900 // large page commits but WS2003 does not allow reserving large page space 2901 // so we just use 4K pages for reserve, this gives us a legal contiguous 2902 // address space. then we will deallocate that reservation, and re alloc 2903 // using large pages 2904 const size_t size_of_reserve = bytes + chunk_size; 2905 if (bytes > size_of_reserve) { 2906 // Overflowed. 2907 return NULL; 2908 } 2909 p_buf = (char *) VirtualAlloc(addr, 2910 size_of_reserve, // size of Reserve 2911 MEM_RESERVE, 2912 PAGE_READWRITE); 2913 // If reservation failed, return NULL 2914 if (p_buf == NULL) return NULL; 2915 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2916 os::release_memory(p_buf, bytes + chunk_size); 2917 2918 // we still need to round up to a page boundary (in case we are using large pages) 2919 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2920 // instead we handle this in the bytes_to_rq computation below 2921 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2922 2923 // now go through and allocate one chunk at a time until all bytes are 2924 // allocated 2925 size_t bytes_remaining = bytes; 2926 // An overflow of align_size_up() would have been caught above 2927 // in the calculation of size_of_reserve. 2928 char * next_alloc_addr = p_buf; 2929 HANDLE hProc = GetCurrentProcess(); 2930 2931 #ifdef ASSERT 2932 // Variable for the failure injection 2933 long ran_num = os::random(); 2934 size_t fail_after = ran_num % bytes; 2935 #endif 2936 2937 int count=0; 2938 while (bytes_remaining) { 2939 // select bytes_to_rq to get to the next chunk_size boundary 2940 2941 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2942 // Note allocate and commit 2943 char * p_new; 2944 2945 #ifdef ASSERT 2946 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2947 #else 2948 const bool inject_error_now = false; 2949 #endif 2950 2951 if (inject_error_now) { 2952 p_new = NULL; 2953 } else { 2954 if (!UseNUMAInterleaving) { 2955 p_new = (char *) VirtualAlloc(next_alloc_addr, 2956 bytes_to_rq, 2957 flags, 2958 prot); 2959 } else { 2960 // get the next node to use from the used_node_list 2961 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2962 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2963 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2964 next_alloc_addr, 2965 bytes_to_rq, 2966 flags, 2967 prot, 2968 node); 2969 } 2970 } 2971 2972 if (p_new == NULL) { 2973 // Free any allocated pages 2974 if (next_alloc_addr > p_buf) { 2975 // Some memory was committed so release it. 2976 size_t bytes_to_release = bytes - bytes_remaining; 2977 // NMT has yet to record any individual blocks, so it 2978 // need to create a dummy 'reserve' record to match 2979 // the release. 2980 MemTracker::record_virtual_memory_reserve((address)p_buf, 2981 bytes_to_release, CALLER_PC); 2982 os::release_memory(p_buf, bytes_to_release); 2983 } 2984 #ifdef ASSERT 2985 if (should_inject_error) { 2986 if (TracePageSizes && Verbose) { 2987 tty->print_cr("Reserving pages individually failed."); 2988 } 2989 } 2990 #endif 2991 return NULL; 2992 } 2993 2994 bytes_remaining -= bytes_to_rq; 2995 next_alloc_addr += bytes_to_rq; 2996 count++; 2997 } 2998 // Although the memory is allocated individually, it is returned as one. 2999 // NMT records it as one block. 3000 if ((flags & MEM_COMMIT) != 0) { 3001 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3002 } else { 3003 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3004 } 3005 3006 // made it this far, success 3007 return p_buf; 3008 } 3009 3010 3011 3012 void os::large_page_init() { 3013 if (!UseLargePages) return; 3014 3015 // print a warning if any large page related flag is specified on command line 3016 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3017 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3018 bool success = false; 3019 3020 # define WARN(msg) if (warn_on_failure) { warning(msg); } 3021 if (resolve_functions_for_large_page_init()) { 3022 if (request_lock_memory_privilege()) { 3023 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 3024 if (s) { 3025 #if defined(IA32) || defined(AMD64) 3026 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3027 WARN("JVM cannot use large pages bigger than 4mb."); 3028 } else { 3029 #endif 3030 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3031 _large_page_size = LargePageSizeInBytes; 3032 } else { 3033 _large_page_size = s; 3034 } 3035 success = true; 3036 #if defined(IA32) || defined(AMD64) 3037 } 3038 #endif 3039 } else { 3040 WARN("Large page is not supported by the processor."); 3041 } 3042 } else { 3043 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3044 } 3045 } else { 3046 WARN("Large page is not supported by the operating system."); 3047 } 3048 #undef WARN 3049 3050 const size_t default_page_size = (size_t) vm_page_size(); 3051 if (success && _large_page_size > default_page_size) { 3052 _page_sizes[0] = _large_page_size; 3053 _page_sizes[1] = default_page_size; 3054 _page_sizes[2] = 0; 3055 } 3056 3057 cleanup_after_large_page_init(); 3058 UseLargePages = success; 3059 } 3060 3061 // On win32, one cannot release just a part of reserved memory, it's an 3062 // all or nothing deal. When we split a reservation, we must break the 3063 // reservation into two reservations. 3064 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3065 bool realloc) { 3066 if (size > 0) { 3067 release_memory(base, size); 3068 if (realloc) { 3069 reserve_memory(split, base); 3070 } 3071 if (size != split) { 3072 reserve_memory(size - split, base + split); 3073 } 3074 } 3075 } 3076 3077 // Multiple threads can race in this code but it's not possible to unmap small sections of 3078 // virtual space to get requested alignment, like posix-like os's. 3079 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3080 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3081 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3082 "Alignment must be a multiple of allocation granularity (page size)"); 3083 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3084 3085 size_t extra_size = size + alignment; 3086 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3087 3088 char* aligned_base = NULL; 3089 3090 do { 3091 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3092 if (extra_base == NULL) { 3093 return NULL; 3094 } 3095 // Do manual alignment 3096 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3097 3098 os::release_memory(extra_base, extra_size); 3099 3100 aligned_base = os::reserve_memory(size, aligned_base); 3101 3102 } while (aligned_base == NULL); 3103 3104 return aligned_base; 3105 } 3106 3107 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3108 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3109 "reserve alignment"); 3110 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3111 char* res; 3112 // note that if UseLargePages is on, all the areas that require interleaving 3113 // will go thru reserve_memory_special rather than thru here. 3114 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3115 if (!use_individual) { 3116 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3117 } else { 3118 elapsedTimer reserveTimer; 3119 if( Verbose && PrintMiscellaneous ) reserveTimer.start(); 3120 // in numa interleaving, we have to allocate pages individually 3121 // (well really chunks of NUMAInterleaveGranularity size) 3122 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3123 if (res == NULL) { 3124 warning("NUMA page allocation failed"); 3125 } 3126 if( Verbose && PrintMiscellaneous ) { 3127 reserveTimer.stop(); 3128 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3129 reserveTimer.milliseconds(), reserveTimer.ticks()); 3130 } 3131 } 3132 assert(res == NULL || addr == NULL || addr == res, 3133 "Unexpected address from reserve."); 3134 3135 return res; 3136 } 3137 3138 // Reserve memory at an arbitrary address, only if that area is 3139 // available (and not reserved for something else). 3140 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3141 // Windows os::reserve_memory() fails of the requested address range is 3142 // not avilable. 3143 return reserve_memory(bytes, requested_addr); 3144 } 3145 3146 size_t os::large_page_size() { 3147 return _large_page_size; 3148 } 3149 3150 bool os::can_commit_large_page_memory() { 3151 // Windows only uses large page memory when the entire region is reserved 3152 // and committed in a single VirtualAlloc() call. This may change in the 3153 // future, but with Windows 2003 it's not possible to commit on demand. 3154 return false; 3155 } 3156 3157 bool os::can_execute_large_page_memory() { 3158 return true; 3159 } 3160 3161 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3162 assert(UseLargePages, "only for large pages"); 3163 3164 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3165 return NULL; // Fallback to small pages. 3166 } 3167 3168 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3169 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3170 3171 // with large pages, there are two cases where we need to use Individual Allocation 3172 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3173 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3174 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3175 if (TracePageSizes && Verbose) { 3176 tty->print_cr("Reserving large pages individually."); 3177 } 3178 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3179 if (p_buf == NULL) { 3180 // give an appropriate warning message 3181 if (UseNUMAInterleaving) { 3182 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3183 } 3184 if (UseLargePagesIndividualAllocation) { 3185 warning("Individually allocated large pages failed, " 3186 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3187 } 3188 return NULL; 3189 } 3190 3191 return p_buf; 3192 3193 } else { 3194 if (TracePageSizes && Verbose) { 3195 tty->print_cr("Reserving large pages in a single large chunk."); 3196 } 3197 // normal policy just allocate it all at once 3198 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3199 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3200 if (res != NULL) { 3201 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3202 } 3203 3204 return res; 3205 } 3206 } 3207 3208 bool os::release_memory_special(char* base, size_t bytes) { 3209 assert(base != NULL, "Sanity check"); 3210 return release_memory(base, bytes); 3211 } 3212 3213 void os::print_statistics() { 3214 } 3215 3216 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3217 int err = os::get_last_error(); 3218 char buf[256]; 3219 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3220 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3221 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3222 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3223 } 3224 3225 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3226 if (bytes == 0) { 3227 // Don't bother the OS with noops. 3228 return true; 3229 } 3230 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3231 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3232 // Don't attempt to print anything if the OS call fails. We're 3233 // probably low on resources, so the print itself may cause crashes. 3234 3235 // unless we have NUMAInterleaving enabled, the range of a commit 3236 // is always within a reserve covered by a single VirtualAlloc 3237 // in that case we can just do a single commit for the requested size 3238 if (!UseNUMAInterleaving) { 3239 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3240 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3241 return false; 3242 } 3243 if (exec) { 3244 DWORD oldprot; 3245 // Windows doc says to use VirtualProtect to get execute permissions 3246 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3247 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3248 return false; 3249 } 3250 } 3251 return true; 3252 } else { 3253 3254 // when NUMAInterleaving is enabled, the commit might cover a range that 3255 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3256 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3257 // returns represents the number of bytes that can be committed in one step. 3258 size_t bytes_remaining = bytes; 3259 char * next_alloc_addr = addr; 3260 while (bytes_remaining > 0) { 3261 MEMORY_BASIC_INFORMATION alloc_info; 3262 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3263 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3264 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3265 PAGE_READWRITE) == NULL) { 3266 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3267 exec);) 3268 return false; 3269 } 3270 if (exec) { 3271 DWORD oldprot; 3272 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3273 PAGE_EXECUTE_READWRITE, &oldprot)) { 3274 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3275 exec);) 3276 return false; 3277 } 3278 } 3279 bytes_remaining -= bytes_to_rq; 3280 next_alloc_addr += bytes_to_rq; 3281 } 3282 } 3283 // if we made it this far, return true 3284 return true; 3285 } 3286 3287 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3288 bool exec) { 3289 // alignment_hint is ignored on this OS 3290 return pd_commit_memory(addr, size, exec); 3291 } 3292 3293 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3294 const char* mesg) { 3295 assert(mesg != NULL, "mesg must be specified"); 3296 if (!pd_commit_memory(addr, size, exec)) { 3297 warn_fail_commit_memory(addr, size, exec); 3298 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3299 } 3300 } 3301 3302 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3303 size_t alignment_hint, bool exec, 3304 const char* mesg) { 3305 // alignment_hint is ignored on this OS 3306 pd_commit_memory_or_exit(addr, size, exec, mesg); 3307 } 3308 3309 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3310 if (bytes == 0) { 3311 // Don't bother the OS with noops. 3312 return true; 3313 } 3314 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3315 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3316 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3317 } 3318 3319 bool os::pd_release_memory(char* addr, size_t bytes) { 3320 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3321 } 3322 3323 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3324 return os::commit_memory(addr, size, !ExecMem); 3325 } 3326 3327 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3328 return os::uncommit_memory(addr, size); 3329 } 3330 3331 // Set protections specified 3332 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3333 bool is_committed) { 3334 unsigned int p = 0; 3335 switch (prot) { 3336 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3337 case MEM_PROT_READ: p = PAGE_READONLY; break; 3338 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3339 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3340 default: 3341 ShouldNotReachHere(); 3342 } 3343 3344 DWORD old_status; 3345 3346 // Strange enough, but on Win32 one can change protection only for committed 3347 // memory, not a big deal anyway, as bytes less or equal than 64K 3348 if (!is_committed) { 3349 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3350 "cannot commit protection page"); 3351 } 3352 // One cannot use os::guard_memory() here, as on Win32 guard page 3353 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3354 // 3355 // Pages in the region become guard pages. Any attempt to access a guard page 3356 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3357 // the guard page status. Guard pages thus act as a one-time access alarm. 3358 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3359 } 3360 3361 bool os::guard_memory(char* addr, size_t bytes) { 3362 DWORD old_status; 3363 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3364 } 3365 3366 bool os::unguard_memory(char* addr, size_t bytes) { 3367 DWORD old_status; 3368 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3369 } 3370 3371 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3372 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3373 void os::numa_make_global(char *addr, size_t bytes) { } 3374 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3375 bool os::numa_topology_changed() { return false; } 3376 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3377 int os::numa_get_group_id() { return 0; } 3378 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3379 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3380 // Provide an answer for UMA systems 3381 ids[0] = 0; 3382 return 1; 3383 } else { 3384 // check for size bigger than actual groups_num 3385 size = MIN2(size, numa_get_groups_num()); 3386 for (int i = 0; i < (int)size; i++) { 3387 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3388 } 3389 return size; 3390 } 3391 } 3392 3393 bool os::get_page_info(char *start, page_info* info) { 3394 return false; 3395 } 3396 3397 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3398 return end; 3399 } 3400 3401 char* os::non_memory_address_word() { 3402 // Must never look like an address returned by reserve_memory, 3403 // even in its subfields (as defined by the CPU immediate fields, 3404 // if the CPU splits constants across multiple instructions). 3405 return (char*)-1; 3406 } 3407 3408 #define MAX_ERROR_COUNT 100 3409 #define SYS_THREAD_ERROR 0xffffffffUL 3410 3411 void os::pd_start_thread(Thread* thread) { 3412 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3413 // Returns previous suspend state: 3414 // 0: Thread was not suspended 3415 // 1: Thread is running now 3416 // >1: Thread is still suspended. 3417 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3418 } 3419 3420 class HighResolutionInterval : public CHeapObj<mtThread> { 3421 // The default timer resolution seems to be 10 milliseconds. 3422 // (Where is this written down?) 3423 // If someone wants to sleep for only a fraction of the default, 3424 // then we set the timer resolution down to 1 millisecond for 3425 // the duration of their interval. 3426 // We carefully set the resolution back, since otherwise we 3427 // seem to incur an overhead (3%?) that we don't need. 3428 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3429 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3430 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3431 // timeBeginPeriod() if the relative error exceeded some threshold. 3432 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3433 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3434 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3435 // resolution timers running. 3436 private: 3437 jlong resolution; 3438 public: 3439 HighResolutionInterval(jlong ms) { 3440 resolution = ms % 10L; 3441 if (resolution != 0) { 3442 MMRESULT result = timeBeginPeriod(1L); 3443 } 3444 } 3445 ~HighResolutionInterval() { 3446 if (resolution != 0) { 3447 MMRESULT result = timeEndPeriod(1L); 3448 } 3449 resolution = 0L; 3450 } 3451 }; 3452 3453 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3454 jlong limit = (jlong) MAXDWORD; 3455 3456 while(ms > limit) { 3457 int res; 3458 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3459 return res; 3460 ms -= limit; 3461 } 3462 3463 assert(thread == Thread::current(), "thread consistency check"); 3464 OSThread* osthread = thread->osthread(); 3465 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3466 int result; 3467 if (interruptable) { 3468 assert(thread->is_Java_thread(), "must be java thread"); 3469 JavaThread *jt = (JavaThread *) thread; 3470 ThreadBlockInVM tbivm(jt); 3471 3472 jt->set_suspend_equivalent(); 3473 // cleared by handle_special_suspend_equivalent_condition() or 3474 // java_suspend_self() via check_and_wait_while_suspended() 3475 3476 HANDLE events[1]; 3477 events[0] = osthread->interrupt_event(); 3478 HighResolutionInterval *phri=NULL; 3479 if(!ForceTimeHighResolution) 3480 phri = new HighResolutionInterval( ms ); 3481 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3482 result = OS_TIMEOUT; 3483 } else { 3484 ResetEvent(osthread->interrupt_event()); 3485 osthread->set_interrupted(false); 3486 result = OS_INTRPT; 3487 } 3488 delete phri; //if it is NULL, harmless 3489 3490 // were we externally suspended while we were waiting? 3491 jt->check_and_wait_while_suspended(); 3492 } else { 3493 assert(!thread->is_Java_thread(), "must not be java thread"); 3494 Sleep((long) ms); 3495 result = OS_TIMEOUT; 3496 } 3497 return result; 3498 } 3499 3500 // 3501 // Short sleep, direct OS call. 3502 // 3503 // ms = 0, means allow others (if any) to run. 3504 // 3505 void os::naked_short_sleep(jlong ms) { 3506 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3507 Sleep(ms); 3508 } 3509 3510 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3511 void os::infinite_sleep() { 3512 while (true) { // sleep forever ... 3513 Sleep(100000); // ... 100 seconds at a time 3514 } 3515 } 3516 3517 typedef BOOL (WINAPI * STTSignature)(void) ; 3518 3519 os::YieldResult os::NakedYield() { 3520 // Use either SwitchToThread() or Sleep(0) 3521 // Consider passing back the return value from SwitchToThread(). 3522 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3523 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; 3524 } else { 3525 Sleep(0); 3526 } 3527 return os::YIELD_UNKNOWN ; 3528 } 3529 3530 void os::yield() { os::NakedYield(); } 3531 3532 void os::yield_all(int attempts) { 3533 // Yields to all threads, including threads with lower priorities 3534 Sleep(1); 3535 } 3536 3537 // Win32 only gives you access to seven real priorities at a time, 3538 // so we compress Java's ten down to seven. It would be better 3539 // if we dynamically adjusted relative priorities. 3540 3541 int os::java_to_os_priority[CriticalPriority + 1] = { 3542 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3543 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3544 THREAD_PRIORITY_LOWEST, // 2 3545 THREAD_PRIORITY_BELOW_NORMAL, // 3 3546 THREAD_PRIORITY_BELOW_NORMAL, // 4 3547 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3548 THREAD_PRIORITY_NORMAL, // 6 3549 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3550 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3551 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3552 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3553 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3554 }; 3555 3556 int prio_policy1[CriticalPriority + 1] = { 3557 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3558 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3559 THREAD_PRIORITY_LOWEST, // 2 3560 THREAD_PRIORITY_BELOW_NORMAL, // 3 3561 THREAD_PRIORITY_BELOW_NORMAL, // 4 3562 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3563 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3564 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3565 THREAD_PRIORITY_HIGHEST, // 8 3566 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3567 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3568 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3569 }; 3570 3571 static int prio_init() { 3572 // If ThreadPriorityPolicy is 1, switch tables 3573 if (ThreadPriorityPolicy == 1) { 3574 int i; 3575 for (i = 0; i < CriticalPriority + 1; i++) { 3576 os::java_to_os_priority[i] = prio_policy1[i]; 3577 } 3578 } 3579 if (UseCriticalJavaThreadPriority) { 3580 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ; 3581 } 3582 return 0; 3583 } 3584 3585 OSReturn os::set_native_priority(Thread* thread, int priority) { 3586 if (!UseThreadPriorities) return OS_OK; 3587 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3588 return ret ? OS_OK : OS_ERR; 3589 } 3590 3591 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3592 if ( !UseThreadPriorities ) { 3593 *priority_ptr = java_to_os_priority[NormPriority]; 3594 return OS_OK; 3595 } 3596 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3597 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3598 assert(false, "GetThreadPriority failed"); 3599 return OS_ERR; 3600 } 3601 *priority_ptr = os_prio; 3602 return OS_OK; 3603 } 3604 3605 3606 // Hint to the underlying OS that a task switch would not be good. 3607 // Void return because it's a hint and can fail. 3608 void os::hint_no_preempt() {} 3609 3610 void os::interrupt(Thread* thread) { 3611 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3612 "possibility of dangling Thread pointer"); 3613 3614 OSThread* osthread = thread->osthread(); 3615 osthread->set_interrupted(true); 3616 // More than one thread can get here with the same value of osthread, 3617 // resulting in multiple notifications. We do, however, want the store 3618 // to interrupted() to be visible to other threads before we post 3619 // the interrupt event. 3620 OrderAccess::release(); 3621 SetEvent(osthread->interrupt_event()); 3622 // For JSR166: unpark after setting status 3623 if (thread->is_Java_thread()) 3624 ((JavaThread*)thread)->parker()->unpark(); 3625 3626 ParkEvent * ev = thread->_ParkEvent ; 3627 if (ev != NULL) ev->unpark() ; 3628 3629 } 3630 3631 3632 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3633 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3634 "possibility of dangling Thread pointer"); 3635 3636 OSThread* osthread = thread->osthread(); 3637 // There is no synchronization between the setting of the interrupt 3638 // and it being cleared here. It is critical - see 6535709 - that 3639 // we only clear the interrupt state, and reset the interrupt event, 3640 // if we are going to report that we were indeed interrupted - else 3641 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3642 // depending on the timing. By checking thread interrupt event to see 3643 // if the thread gets real interrupt thus prevent spurious wakeup. 3644 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3645 if (interrupted && clear_interrupted) { 3646 osthread->set_interrupted(false); 3647 ResetEvent(osthread->interrupt_event()); 3648 } // Otherwise leave the interrupted state alone 3649 3650 return interrupted; 3651 } 3652 3653 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3654 ExtendedPC os::get_thread_pc(Thread* thread) { 3655 CONTEXT context; 3656 context.ContextFlags = CONTEXT_CONTROL; 3657 HANDLE handle = thread->osthread()->thread_handle(); 3658 #ifdef _M_IA64 3659 assert(0, "Fix get_thread_pc"); 3660 return ExtendedPC(NULL); 3661 #else 3662 if (GetThreadContext(handle, &context)) { 3663 #ifdef _M_AMD64 3664 return ExtendedPC((address) context.Rip); 3665 #else 3666 return ExtendedPC((address) context.Eip); 3667 #endif 3668 } else { 3669 return ExtendedPC(NULL); 3670 } 3671 #endif 3672 } 3673 3674 // GetCurrentThreadId() returns DWORD 3675 intx os::current_thread_id() { return GetCurrentThreadId(); } 3676 3677 static int _initial_pid = 0; 3678 3679 int os::current_process_id() 3680 { 3681 return (_initial_pid ? _initial_pid : _getpid()); 3682 } 3683 3684 int os::win32::_vm_page_size = 0; 3685 int os::win32::_vm_allocation_granularity = 0; 3686 int os::win32::_processor_type = 0; 3687 // Processor level is not available on non-NT systems, use vm_version instead 3688 int os::win32::_processor_level = 0; 3689 julong os::win32::_physical_memory = 0; 3690 size_t os::win32::_default_stack_size = 0; 3691 3692 intx os::win32::_os_thread_limit = 0; 3693 volatile intx os::win32::_os_thread_count = 0; 3694 3695 bool os::win32::_is_nt = false; 3696 bool os::win32::_is_windows_2003 = false; 3697 bool os::win32::_is_windows_server = false; 3698 3699 void os::win32::initialize_system_info() { 3700 SYSTEM_INFO si; 3701 GetSystemInfo(&si); 3702 _vm_page_size = si.dwPageSize; 3703 _vm_allocation_granularity = si.dwAllocationGranularity; 3704 _processor_type = si.dwProcessorType; 3705 _processor_level = si.wProcessorLevel; 3706 set_processor_count(si.dwNumberOfProcessors); 3707 3708 MEMORYSTATUSEX ms; 3709 ms.dwLength = sizeof(ms); 3710 3711 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3712 // dwMemoryLoad (% of memory in use) 3713 GlobalMemoryStatusEx(&ms); 3714 _physical_memory = ms.ullTotalPhys; 3715 3716 OSVERSIONINFOEX oi; 3717 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3718 GetVersionEx((OSVERSIONINFO*)&oi); 3719 switch(oi.dwPlatformId) { 3720 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3721 case VER_PLATFORM_WIN32_NT: 3722 _is_nt = true; 3723 { 3724 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3725 if (os_vers == 5002) { 3726 _is_windows_2003 = true; 3727 } 3728 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3729 oi.wProductType == VER_NT_SERVER) { 3730 _is_windows_server = true; 3731 } 3732 } 3733 break; 3734 default: fatal("Unknown platform"); 3735 } 3736 3737 _default_stack_size = os::current_stack_size(); 3738 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3739 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3740 "stack size not a multiple of page size"); 3741 3742 initialize_performance_counter(); 3743 3744 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3745 // known to deadlock the system, if the VM issues to thread operations with 3746 // a too high frequency, e.g., such as changing the priorities. 3747 // The 6000 seems to work well - no deadlocks has been notices on the test 3748 // programs that we have seen experience this problem. 3749 if (!os::win32::is_nt()) { 3750 StarvationMonitorInterval = 6000; 3751 } 3752 } 3753 3754 3755 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3756 char path[MAX_PATH]; 3757 DWORD size; 3758 DWORD pathLen = (DWORD)sizeof(path); 3759 HINSTANCE result = NULL; 3760 3761 // only allow library name without path component 3762 assert(strchr(name, '\\') == NULL, "path not allowed"); 3763 assert(strchr(name, ':') == NULL, "path not allowed"); 3764 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3765 jio_snprintf(ebuf, ebuflen, 3766 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3767 return NULL; 3768 } 3769 3770 // search system directory 3771 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3772 strcat(path, "\\"); 3773 strcat(path, name); 3774 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3775 return result; 3776 } 3777 } 3778 3779 // try Windows directory 3780 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3781 strcat(path, "\\"); 3782 strcat(path, name); 3783 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3784 return result; 3785 } 3786 } 3787 3788 jio_snprintf(ebuf, ebuflen, 3789 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3790 return NULL; 3791 } 3792 3793 void os::win32::setmode_streams() { 3794 _setmode(_fileno(stdin), _O_BINARY); 3795 _setmode(_fileno(stdout), _O_BINARY); 3796 _setmode(_fileno(stderr), _O_BINARY); 3797 } 3798 3799 3800 bool os::is_debugger_attached() { 3801 return IsDebuggerPresent() ? true : false; 3802 } 3803 3804 3805 void os::wait_for_keypress_at_exit(void) { 3806 if (PauseAtExit) { 3807 fprintf(stderr, "Press any key to continue...\n"); 3808 fgetc(stdin); 3809 } 3810 } 3811 3812 3813 int os::message_box(const char* title, const char* message) { 3814 int result = MessageBox(NULL, message, title, 3815 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3816 return result == IDYES; 3817 } 3818 3819 int os::allocate_thread_local_storage() { 3820 return TlsAlloc(); 3821 } 3822 3823 3824 void os::free_thread_local_storage(int index) { 3825 TlsFree(index); 3826 } 3827 3828 3829 void os::thread_local_storage_at_put(int index, void* value) { 3830 TlsSetValue(index, value); 3831 assert(thread_local_storage_at(index) == value, "Just checking"); 3832 } 3833 3834 3835 void* os::thread_local_storage_at(int index) { 3836 return TlsGetValue(index); 3837 } 3838 3839 3840 #ifndef PRODUCT 3841 #ifndef _WIN64 3842 // Helpers to check whether NX protection is enabled 3843 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3844 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3845 pex->ExceptionRecord->NumberParameters > 0 && 3846 pex->ExceptionRecord->ExceptionInformation[0] == 3847 EXCEPTION_INFO_EXEC_VIOLATION) { 3848 return EXCEPTION_EXECUTE_HANDLER; 3849 } 3850 return EXCEPTION_CONTINUE_SEARCH; 3851 } 3852 3853 void nx_check_protection() { 3854 // If NX is enabled we'll get an exception calling into code on the stack 3855 char code[] = { (char)0xC3 }; // ret 3856 void *code_ptr = (void *)code; 3857 __try { 3858 __asm call code_ptr 3859 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3860 tty->print_raw_cr("NX protection detected."); 3861 } 3862 } 3863 #endif // _WIN64 3864 #endif // PRODUCT 3865 3866 // this is called _before_ the global arguments have been parsed 3867 void os::init(void) { 3868 _initial_pid = _getpid(); 3869 3870 init_random(1234567); 3871 3872 win32::initialize_system_info(); 3873 win32::setmode_streams(); 3874 init_page_sizes((size_t) win32::vm_page_size()); 3875 3876 // For better scalability on MP systems (must be called after initialize_system_info) 3877 #ifndef PRODUCT 3878 if (is_MP()) { 3879 NoYieldsInMicrolock = true; 3880 } 3881 #endif 3882 // This may be overridden later when argument processing is done. 3883 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3884 os::win32::is_windows_2003()); 3885 3886 // Initialize main_process and main_thread 3887 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3888 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3889 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3890 fatal("DuplicateHandle failed\n"); 3891 } 3892 main_thread_id = (int) GetCurrentThreadId(); 3893 } 3894 3895 // To install functions for atexit processing 3896 extern "C" { 3897 static void perfMemory_exit_helper() { 3898 perfMemory_exit(); 3899 } 3900 } 3901 3902 static jint initSock(); 3903 3904 // this is called _after_ the global arguments have been parsed 3905 jint os::init_2(void) { 3906 // Allocate a single page and mark it as readable for safepoint polling 3907 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3908 guarantee( polling_page != NULL, "Reserve Failed for polling page"); 3909 3910 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3911 guarantee( return_page != NULL, "Commit Failed for polling page"); 3912 3913 os::set_polling_page( polling_page ); 3914 3915 #ifndef PRODUCT 3916 if( Verbose && PrintMiscellaneous ) 3917 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3918 #endif 3919 3920 if (!UseMembar) { 3921 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3922 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3923 3924 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3925 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); 3926 3927 os::set_memory_serialize_page( mem_serialize_page ); 3928 3929 #ifndef PRODUCT 3930 if(Verbose && PrintMiscellaneous) 3931 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3932 #endif 3933 } 3934 3935 // Setup Windows Exceptions 3936 3937 // for debugging float code generation bugs 3938 if (ForceFloatExceptions) { 3939 #ifndef _WIN64 3940 static long fp_control_word = 0; 3941 __asm { fstcw fp_control_word } 3942 // see Intel PPro Manual, Vol. 2, p 7-16 3943 const long precision = 0x20; 3944 const long underflow = 0x10; 3945 const long overflow = 0x08; 3946 const long zero_div = 0x04; 3947 const long denorm = 0x02; 3948 const long invalid = 0x01; 3949 fp_control_word |= invalid; 3950 __asm { fldcw fp_control_word } 3951 #endif 3952 } 3953 3954 // If stack_commit_size is 0, windows will reserve the default size, 3955 // but only commit a small portion of it. 3956 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 3957 size_t default_reserve_size = os::win32::default_stack_size(); 3958 size_t actual_reserve_size = stack_commit_size; 3959 if (stack_commit_size < default_reserve_size) { 3960 // If stack_commit_size == 0, we want this too 3961 actual_reserve_size = default_reserve_size; 3962 } 3963 3964 // Check minimum allowable stack size for thread creation and to initialize 3965 // the java system classes, including StackOverflowError - depends on page 3966 // size. Add a page for compiler2 recursion in main thread. 3967 // Add in 2*BytesPerWord times page size to account for VM stack during 3968 // class initialization depending on 32 or 64 bit VM. 3969 size_t min_stack_allowed = 3970 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 3971 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 3972 if (actual_reserve_size < min_stack_allowed) { 3973 tty->print_cr("\nThe stack size specified is too small, " 3974 "Specify at least %dk", 3975 min_stack_allowed / K); 3976 return JNI_ERR; 3977 } 3978 3979 JavaThread::set_stack_size_at_create(stack_commit_size); 3980 3981 // Calculate theoretical max. size of Threads to guard gainst artifical 3982 // out-of-memory situations, where all available address-space has been 3983 // reserved by thread stacks. 3984 assert(actual_reserve_size != 0, "Must have a stack"); 3985 3986 // Calculate the thread limit when we should start doing Virtual Memory 3987 // banging. Currently when the threads will have used all but 200Mb of space. 3988 // 3989 // TODO: consider performing a similar calculation for commit size instead 3990 // as reserve size, since on a 64-bit platform we'll run into that more 3991 // often than running out of virtual memory space. We can use the 3992 // lower value of the two calculations as the os_thread_limit. 3993 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 3994 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 3995 3996 // at exit methods are called in the reverse order of their registration. 3997 // there is no limit to the number of functions registered. atexit does 3998 // not set errno. 3999 4000 if (PerfAllowAtExitRegistration) { 4001 // only register atexit functions if PerfAllowAtExitRegistration is set. 4002 // atexit functions can be delayed until process exit time, which 4003 // can be problematic for embedded VM situations. Embedded VMs should 4004 // call DestroyJavaVM() to assure that VM resources are released. 4005 4006 // note: perfMemory_exit_helper atexit function may be removed in 4007 // the future if the appropriate cleanup code can be added to the 4008 // VM_Exit VMOperation's doit method. 4009 if (atexit(perfMemory_exit_helper) != 0) { 4010 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4011 } 4012 } 4013 4014 #ifndef _WIN64 4015 // Print something if NX is enabled (win32 on AMD64) 4016 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4017 #endif 4018 4019 // initialize thread priority policy 4020 prio_init(); 4021 4022 if (UseNUMA && !ForceNUMA) { 4023 UseNUMA = false; // We don't fully support this yet 4024 } 4025 4026 if (UseNUMAInterleaving) { 4027 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4028 bool success = numa_interleaving_init(); 4029 if (!success) UseNUMAInterleaving = false; 4030 } 4031 4032 if (initSock() != JNI_OK) { 4033 return JNI_ERR; 4034 } 4035 4036 return JNI_OK; 4037 } 4038 4039 void os::init_3(void) { 4040 return; 4041 } 4042 4043 // Mark the polling page as unreadable 4044 void os::make_polling_page_unreadable(void) { 4045 DWORD old_status; 4046 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) 4047 fatal("Could not disable polling page"); 4048 }; 4049 4050 // Mark the polling page as readable 4051 void os::make_polling_page_readable(void) { 4052 DWORD old_status; 4053 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) 4054 fatal("Could not enable polling page"); 4055 }; 4056 4057 4058 int os::stat(const char *path, struct stat *sbuf) { 4059 char pathbuf[MAX_PATH]; 4060 if (strlen(path) > MAX_PATH - 1) { 4061 errno = ENAMETOOLONG; 4062 return -1; 4063 } 4064 os::native_path(strcpy(pathbuf, path)); 4065 int ret = ::stat(pathbuf, sbuf); 4066 if (sbuf != NULL && UseUTCFileTimestamp) { 4067 // Fix for 6539723. st_mtime returned from stat() is dependent on 4068 // the system timezone and so can return different values for the 4069 // same file if/when daylight savings time changes. This adjustment 4070 // makes sure the same timestamp is returned regardless of the TZ. 4071 // 4072 // See: 4073 // http://msdn.microsoft.com/library/ 4074 // default.asp?url=/library/en-us/sysinfo/base/ 4075 // time_zone_information_str.asp 4076 // and 4077 // http://msdn.microsoft.com/library/default.asp?url= 4078 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4079 // 4080 // NOTE: there is a insidious bug here: If the timezone is changed 4081 // after the call to stat() but before 'GetTimeZoneInformation()', then 4082 // the adjustment we do here will be wrong and we'll return the wrong 4083 // value (which will likely end up creating an invalid class data 4084 // archive). Absent a better API for this, or some time zone locking 4085 // mechanism, we'll have to live with this risk. 4086 TIME_ZONE_INFORMATION tz; 4087 DWORD tzid = GetTimeZoneInformation(&tz); 4088 int daylightBias = 4089 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4090 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4091 } 4092 return ret; 4093 } 4094 4095 4096 #define FT2INT64(ft) \ 4097 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4098 4099 4100 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4101 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4102 // of a thread. 4103 // 4104 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4105 // the fast estimate available on the platform. 4106 4107 // current_thread_cpu_time() is not optimized for Windows yet 4108 jlong os::current_thread_cpu_time() { 4109 // return user + sys since the cost is the same 4110 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4111 } 4112 4113 jlong os::thread_cpu_time(Thread* thread) { 4114 // consistent with what current_thread_cpu_time() returns. 4115 return os::thread_cpu_time(thread, true /* user+sys */); 4116 } 4117 4118 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4119 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4120 } 4121 4122 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4123 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4124 // If this function changes, os::is_thread_cpu_time_supported() should too 4125 if (os::win32::is_nt()) { 4126 FILETIME CreationTime; 4127 FILETIME ExitTime; 4128 FILETIME KernelTime; 4129 FILETIME UserTime; 4130 4131 if ( GetThreadTimes(thread->osthread()->thread_handle(), 4132 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4133 return -1; 4134 else 4135 if (user_sys_cpu_time) { 4136 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4137 } else { 4138 return FT2INT64(UserTime) * 100; 4139 } 4140 } else { 4141 return (jlong) timeGetTime() * 1000000; 4142 } 4143 } 4144 4145 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4146 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4147 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4148 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4149 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4150 } 4151 4152 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4153 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4154 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4155 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4156 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4157 } 4158 4159 bool os::is_thread_cpu_time_supported() { 4160 // see os::thread_cpu_time 4161 if (os::win32::is_nt()) { 4162 FILETIME CreationTime; 4163 FILETIME ExitTime; 4164 FILETIME KernelTime; 4165 FILETIME UserTime; 4166 4167 if ( GetThreadTimes(GetCurrentThread(), 4168 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4169 return false; 4170 else 4171 return true; 4172 } else { 4173 return false; 4174 } 4175 } 4176 4177 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4178 // It does have primitives (PDH API) to get CPU usage and run queue length. 4179 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4180 // If we wanted to implement loadavg on Windows, we have a few options: 4181 // 4182 // a) Query CPU usage and run queue length and "fake" an answer by 4183 // returning the CPU usage if it's under 100%, and the run queue 4184 // length otherwise. It turns out that querying is pretty slow 4185 // on Windows, on the order of 200 microseconds on a fast machine. 4186 // Note that on the Windows the CPU usage value is the % usage 4187 // since the last time the API was called (and the first call 4188 // returns 100%), so we'd have to deal with that as well. 4189 // 4190 // b) Sample the "fake" answer using a sampling thread and store 4191 // the answer in a global variable. The call to loadavg would 4192 // just return the value of the global, avoiding the slow query. 4193 // 4194 // c) Sample a better answer using exponential decay to smooth the 4195 // value. This is basically the algorithm used by UNIX kernels. 4196 // 4197 // Note that sampling thread starvation could affect both (b) and (c). 4198 int os::loadavg(double loadavg[], int nelem) { 4199 return -1; 4200 } 4201 4202 4203 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4204 bool os::dont_yield() { 4205 return DontYieldALot; 4206 } 4207 4208 // This method is a slightly reworked copy of JDK's sysOpen 4209 // from src/windows/hpi/src/sys_api_md.c 4210 4211 int os::open(const char *path, int oflag, int mode) { 4212 char pathbuf[MAX_PATH]; 4213 4214 if (strlen(path) > MAX_PATH - 1) { 4215 errno = ENAMETOOLONG; 4216 return -1; 4217 } 4218 os::native_path(strcpy(pathbuf, path)); 4219 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4220 } 4221 4222 FILE* os::open(int fd, const char* mode) { 4223 return ::_fdopen(fd, mode); 4224 } 4225 4226 // Is a (classpath) directory empty? 4227 bool os::dir_is_empty(const char* path) { 4228 WIN32_FIND_DATA fd; 4229 HANDLE f = FindFirstFile(path, &fd); 4230 if (f == INVALID_HANDLE_VALUE) { 4231 return true; 4232 } 4233 FindClose(f); 4234 return false; 4235 } 4236 4237 // create binary file, rewriting existing file if required 4238 int os::create_binary_file(const char* path, bool rewrite_existing) { 4239 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4240 if (!rewrite_existing) { 4241 oflags |= _O_EXCL; 4242 } 4243 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4244 } 4245 4246 // return current position of file pointer 4247 jlong os::current_file_offset(int fd) { 4248 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4249 } 4250 4251 // move file pointer to the specified offset 4252 jlong os::seek_to_file_offset(int fd, jlong offset) { 4253 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4254 } 4255 4256 4257 jlong os::lseek(int fd, jlong offset, int whence) { 4258 return (jlong) ::_lseeki64(fd, offset, whence); 4259 } 4260 4261 // This method is a slightly reworked copy of JDK's sysNativePath 4262 // from src/windows/hpi/src/path_md.c 4263 4264 /* Convert a pathname to native format. On win32, this involves forcing all 4265 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4266 sometimes rejects '/') and removing redundant separators. The input path is 4267 assumed to have been converted into the character encoding used by the local 4268 system. Because this might be a double-byte encoding, care is taken to 4269 treat double-byte lead characters correctly. 4270 4271 This procedure modifies the given path in place, as the result is never 4272 longer than the original. There is no error return; this operation always 4273 succeeds. */ 4274 char * os::native_path(char *path) { 4275 char *src = path, *dst = path, *end = path; 4276 char *colon = NULL; /* If a drive specifier is found, this will 4277 point to the colon following the drive 4278 letter */ 4279 4280 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4281 assert(((!::IsDBCSLeadByte('/')) 4282 && (!::IsDBCSLeadByte('\\')) 4283 && (!::IsDBCSLeadByte(':'))), 4284 "Illegal lead byte"); 4285 4286 /* Check for leading separators */ 4287 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4288 while (isfilesep(*src)) { 4289 src++; 4290 } 4291 4292 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4293 /* Remove leading separators if followed by drive specifier. This 4294 hack is necessary to support file URLs containing drive 4295 specifiers (e.g., "file://c:/path"). As a side effect, 4296 "/c:/path" can be used as an alternative to "c:/path". */ 4297 *dst++ = *src++; 4298 colon = dst; 4299 *dst++ = ':'; 4300 src++; 4301 } else { 4302 src = path; 4303 if (isfilesep(src[0]) && isfilesep(src[1])) { 4304 /* UNC pathname: Retain first separator; leave src pointed at 4305 second separator so that further separators will be collapsed 4306 into the second separator. The result will be a pathname 4307 beginning with "\\\\" followed (most likely) by a host name. */ 4308 src = dst = path + 1; 4309 path[0] = '\\'; /* Force first separator to '\\' */ 4310 } 4311 } 4312 4313 end = dst; 4314 4315 /* Remove redundant separators from remainder of path, forcing all 4316 separators to be '\\' rather than '/'. Also, single byte space 4317 characters are removed from the end of the path because those 4318 are not legal ending characters on this operating system. 4319 */ 4320 while (*src != '\0') { 4321 if (isfilesep(*src)) { 4322 *dst++ = '\\'; src++; 4323 while (isfilesep(*src)) src++; 4324 if (*src == '\0') { 4325 /* Check for trailing separator */ 4326 end = dst; 4327 if (colon == dst - 2) break; /* "z:\\" */ 4328 if (dst == path + 1) break; /* "\\" */ 4329 if (dst == path + 2 && isfilesep(path[0])) { 4330 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4331 beginning of a UNC pathname. Even though it is not, by 4332 itself, a valid UNC pathname, we leave it as is in order 4333 to be consistent with the path canonicalizer as well 4334 as the win32 APIs, which treat this case as an invalid 4335 UNC pathname rather than as an alias for the root 4336 directory of the current drive. */ 4337 break; 4338 } 4339 end = --dst; /* Path does not denote a root directory, so 4340 remove trailing separator */ 4341 break; 4342 } 4343 end = dst; 4344 } else { 4345 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4346 *dst++ = *src++; 4347 if (*src) *dst++ = *src++; 4348 end = dst; 4349 } else { /* Copy a single-byte character */ 4350 char c = *src++; 4351 *dst++ = c; 4352 /* Space is not a legal ending character */ 4353 if (c != ' ') end = dst; 4354 } 4355 } 4356 } 4357 4358 *end = '\0'; 4359 4360 /* For "z:", add "." to work around a bug in the C runtime library */ 4361 if (colon == dst - 1) { 4362 path[2] = '.'; 4363 path[3] = '\0'; 4364 } 4365 4366 return path; 4367 } 4368 4369 // This code is a copy of JDK's sysSetLength 4370 // from src/windows/hpi/src/sys_api_md.c 4371 4372 int os::ftruncate(int fd, jlong length) { 4373 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4374 long high = (long)(length >> 32); 4375 DWORD ret; 4376 4377 if (h == (HANDLE)(-1)) { 4378 return -1; 4379 } 4380 4381 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4382 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4383 return -1; 4384 } 4385 4386 if (::SetEndOfFile(h) == FALSE) { 4387 return -1; 4388 } 4389 4390 return 0; 4391 } 4392 4393 4394 // This code is a copy of JDK's sysSync 4395 // from src/windows/hpi/src/sys_api_md.c 4396 // except for the legacy workaround for a bug in Win 98 4397 4398 int os::fsync(int fd) { 4399 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4400 4401 if ( (!::FlushFileBuffers(handle)) && 4402 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4403 /* from winerror.h */ 4404 return -1; 4405 } 4406 return 0; 4407 } 4408 4409 static int nonSeekAvailable(int, long *); 4410 static int stdinAvailable(int, long *); 4411 4412 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4413 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4414 4415 // This code is a copy of JDK's sysAvailable 4416 // from src/windows/hpi/src/sys_api_md.c 4417 4418 int os::available(int fd, jlong *bytes) { 4419 jlong cur, end; 4420 struct _stati64 stbuf64; 4421 4422 if (::_fstati64(fd, &stbuf64) >= 0) { 4423 int mode = stbuf64.st_mode; 4424 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4425 int ret; 4426 long lpbytes; 4427 if (fd == 0) { 4428 ret = stdinAvailable(fd, &lpbytes); 4429 } else { 4430 ret = nonSeekAvailable(fd, &lpbytes); 4431 } 4432 (*bytes) = (jlong)(lpbytes); 4433 return ret; 4434 } 4435 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4436 return FALSE; 4437 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4438 return FALSE; 4439 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4440 return FALSE; 4441 } 4442 *bytes = end - cur; 4443 return TRUE; 4444 } else { 4445 return FALSE; 4446 } 4447 } 4448 4449 // This code is a copy of JDK's nonSeekAvailable 4450 // from src/windows/hpi/src/sys_api_md.c 4451 4452 static int nonSeekAvailable(int fd, long *pbytes) { 4453 /* This is used for available on non-seekable devices 4454 * (like both named and anonymous pipes, such as pipes 4455 * connected to an exec'd process). 4456 * Standard Input is a special case. 4457 * 4458 */ 4459 HANDLE han; 4460 4461 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4462 return FALSE; 4463 } 4464 4465 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4466 /* PeekNamedPipe fails when at EOF. In that case we 4467 * simply make *pbytes = 0 which is consistent with the 4468 * behavior we get on Solaris when an fd is at EOF. 4469 * The only alternative is to raise an Exception, 4470 * which isn't really warranted. 4471 */ 4472 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4473 return FALSE; 4474 } 4475 *pbytes = 0; 4476 } 4477 return TRUE; 4478 } 4479 4480 #define MAX_INPUT_EVENTS 2000 4481 4482 // This code is a copy of JDK's stdinAvailable 4483 // from src/windows/hpi/src/sys_api_md.c 4484 4485 static int stdinAvailable(int fd, long *pbytes) { 4486 HANDLE han; 4487 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4488 DWORD numEvents = 0; /* Number of events in buffer */ 4489 DWORD i = 0; /* Loop index */ 4490 DWORD curLength = 0; /* Position marker */ 4491 DWORD actualLength = 0; /* Number of bytes readable */ 4492 BOOL error = FALSE; /* Error holder */ 4493 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4494 4495 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4496 return FALSE; 4497 } 4498 4499 /* Construct an array of input records in the console buffer */ 4500 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4501 if (error == 0) { 4502 return nonSeekAvailable(fd, pbytes); 4503 } 4504 4505 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4506 if (numEvents > MAX_INPUT_EVENTS) { 4507 numEvents = MAX_INPUT_EVENTS; 4508 } 4509 4510 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4511 if (lpBuffer == NULL) { 4512 return FALSE; 4513 } 4514 4515 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4516 if (error == 0) { 4517 os::free(lpBuffer, mtInternal); 4518 return FALSE; 4519 } 4520 4521 /* Examine input records for the number of bytes available */ 4522 for(i=0; i<numEvents; i++) { 4523 if (lpBuffer[i].EventType == KEY_EVENT) { 4524 4525 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4526 &(lpBuffer[i].Event); 4527 if (keyRecord->bKeyDown == TRUE) { 4528 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4529 curLength++; 4530 if (*keyPressed == '\r') { 4531 actualLength = curLength; 4532 } 4533 } 4534 } 4535 } 4536 4537 if(lpBuffer != NULL) { 4538 os::free(lpBuffer, mtInternal); 4539 } 4540 4541 *pbytes = (long) actualLength; 4542 return TRUE; 4543 } 4544 4545 // Map a block of memory. 4546 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4547 char *addr, size_t bytes, bool read_only, 4548 bool allow_exec) { 4549 HANDLE hFile; 4550 char* base; 4551 4552 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4553 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4554 if (hFile == NULL) { 4555 if (PrintMiscellaneous && Verbose) { 4556 DWORD err = GetLastError(); 4557 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4558 } 4559 return NULL; 4560 } 4561 4562 if (allow_exec) { 4563 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4564 // unless it comes from a PE image (which the shared archive is not.) 4565 // Even VirtualProtect refuses to give execute access to mapped memory 4566 // that was not previously executable. 4567 // 4568 // Instead, stick the executable region in anonymous memory. Yuck. 4569 // Penalty is that ~4 pages will not be shareable - in the future 4570 // we might consider DLLizing the shared archive with a proper PE 4571 // header so that mapping executable + sharing is possible. 4572 4573 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4574 PAGE_READWRITE); 4575 if (base == NULL) { 4576 if (PrintMiscellaneous && Verbose) { 4577 DWORD err = GetLastError(); 4578 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4579 } 4580 CloseHandle(hFile); 4581 return NULL; 4582 } 4583 4584 DWORD bytes_read; 4585 OVERLAPPED overlapped; 4586 overlapped.Offset = (DWORD)file_offset; 4587 overlapped.OffsetHigh = 0; 4588 overlapped.hEvent = NULL; 4589 // ReadFile guarantees that if the return value is true, the requested 4590 // number of bytes were read before returning. 4591 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4592 if (!res) { 4593 if (PrintMiscellaneous && Verbose) { 4594 DWORD err = GetLastError(); 4595 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4596 } 4597 release_memory(base, bytes); 4598 CloseHandle(hFile); 4599 return NULL; 4600 } 4601 } else { 4602 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4603 NULL /*file_name*/); 4604 if (hMap == NULL) { 4605 if (PrintMiscellaneous && Verbose) { 4606 DWORD err = GetLastError(); 4607 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4608 } 4609 CloseHandle(hFile); 4610 return NULL; 4611 } 4612 4613 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4614 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4615 (DWORD)bytes, addr); 4616 if (base == NULL) { 4617 if (PrintMiscellaneous && Verbose) { 4618 DWORD err = GetLastError(); 4619 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4620 } 4621 CloseHandle(hMap); 4622 CloseHandle(hFile); 4623 return NULL; 4624 } 4625 4626 if (CloseHandle(hMap) == 0) { 4627 if (PrintMiscellaneous && Verbose) { 4628 DWORD err = GetLastError(); 4629 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4630 } 4631 CloseHandle(hFile); 4632 return base; 4633 } 4634 } 4635 4636 if (allow_exec) { 4637 DWORD old_protect; 4638 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4639 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4640 4641 if (!res) { 4642 if (PrintMiscellaneous && Verbose) { 4643 DWORD err = GetLastError(); 4644 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4645 } 4646 // Don't consider this a hard error, on IA32 even if the 4647 // VirtualProtect fails, we should still be able to execute 4648 CloseHandle(hFile); 4649 return base; 4650 } 4651 } 4652 4653 if (CloseHandle(hFile) == 0) { 4654 if (PrintMiscellaneous && Verbose) { 4655 DWORD err = GetLastError(); 4656 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4657 } 4658 return base; 4659 } 4660 4661 return base; 4662 } 4663 4664 4665 // Remap a block of memory. 4666 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4667 char *addr, size_t bytes, bool read_only, 4668 bool allow_exec) { 4669 // This OS does not allow existing memory maps to be remapped so we 4670 // have to unmap the memory before we remap it. 4671 if (!os::unmap_memory(addr, bytes)) { 4672 return NULL; 4673 } 4674 4675 // There is a very small theoretical window between the unmap_memory() 4676 // call above and the map_memory() call below where a thread in native 4677 // code may be able to access an address that is no longer mapped. 4678 4679 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4680 read_only, allow_exec); 4681 } 4682 4683 4684 // Unmap a block of memory. 4685 // Returns true=success, otherwise false. 4686 4687 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4688 BOOL result = UnmapViewOfFile(addr); 4689 if (result == 0) { 4690 if (PrintMiscellaneous && Verbose) { 4691 DWORD err = GetLastError(); 4692 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4693 } 4694 return false; 4695 } 4696 return true; 4697 } 4698 4699 void os::pause() { 4700 char filename[MAX_PATH]; 4701 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4702 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4703 } else { 4704 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4705 } 4706 4707 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4708 if (fd != -1) { 4709 struct stat buf; 4710 ::close(fd); 4711 while (::stat(filename, &buf) == 0) { 4712 Sleep(100); 4713 } 4714 } else { 4715 jio_fprintf(stderr, 4716 "Could not open pause file '%s', continuing immediately.\n", filename); 4717 } 4718 } 4719 4720 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4721 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4722 } 4723 4724 /* 4725 * See the caveats for this class in os_windows.hpp 4726 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4727 * into this method and returns false. If no OS EXCEPTION was raised, returns 4728 * true. 4729 * The callback is supposed to provide the method that should be protected. 4730 */ 4731 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4732 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4733 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4734 "crash_protection already set?"); 4735 4736 bool success = true; 4737 __try { 4738 WatcherThread::watcher_thread()->set_crash_protection(this); 4739 cb.call(); 4740 } __except(EXCEPTION_EXECUTE_HANDLER) { 4741 // only for protection, nothing to do 4742 success = false; 4743 } 4744 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4745 return success; 4746 } 4747 4748 // An Event wraps a win32 "CreateEvent" kernel handle. 4749 // 4750 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4751 // 4752 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4753 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4754 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4755 // In addition, an unpark() operation might fetch the handle field, but the 4756 // event could recycle between the fetch and the SetEvent() operation. 4757 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4758 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4759 // on an stale but recycled handle would be harmless, but in practice this might 4760 // confuse other non-Sun code, so it's not a viable approach. 4761 // 4762 // 2: Once a win32 event handle is associated with an Event, it remains associated 4763 // with the Event. The event handle is never closed. This could be construed 4764 // as handle leakage, but only up to the maximum # of threads that have been extant 4765 // at any one time. This shouldn't be an issue, as windows platforms typically 4766 // permit a process to have hundreds of thousands of open handles. 4767 // 4768 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4769 // and release unused handles. 4770 // 4771 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4772 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4773 // 4774 // 5. Use an RCU-like mechanism (Read-Copy Update). 4775 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4776 // 4777 // We use (2). 4778 // 4779 // TODO-FIXME: 4780 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4781 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4782 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4783 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4784 // into a single win32 CreateEvent() handle. 4785 // 4786 // _Event transitions in park() 4787 // -1 => -1 : illegal 4788 // 1 => 0 : pass - return immediately 4789 // 0 => -1 : block 4790 // 4791 // _Event serves as a restricted-range semaphore : 4792 // -1 : thread is blocked 4793 // 0 : neutral - thread is running or ready 4794 // 1 : signaled - thread is running or ready 4795 // 4796 // Another possible encoding of _Event would be 4797 // with explicit "PARKED" and "SIGNALED" bits. 4798 4799 int os::PlatformEvent::park (jlong Millis) { 4800 guarantee (_ParkHandle != NULL , "Invariant") ; 4801 guarantee (Millis > 0 , "Invariant") ; 4802 int v ; 4803 4804 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4805 // the initial park() operation. 4806 4807 for (;;) { 4808 v = _Event ; 4809 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4810 } 4811 guarantee ((v == 0) || (v == 1), "invariant") ; 4812 if (v != 0) return OS_OK ; 4813 4814 // Do this the hard way by blocking ... 4815 // TODO: consider a brief spin here, gated on the success of recent 4816 // spin attempts by this thread. 4817 // 4818 // We decompose long timeouts into series of shorter timed waits. 4819 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4820 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4821 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4822 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4823 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4824 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4825 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4826 // for the already waited time. This policy does not admit any new outcomes. 4827 // In the future, however, we might want to track the accumulated wait time and 4828 // adjust Millis accordingly if we encounter a spurious wakeup. 4829 4830 const int MAXTIMEOUT = 0x10000000 ; 4831 DWORD rv = WAIT_TIMEOUT ; 4832 while (_Event < 0 && Millis > 0) { 4833 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) 4834 if (Millis > MAXTIMEOUT) { 4835 prd = MAXTIMEOUT ; 4836 } 4837 rv = ::WaitForSingleObject (_ParkHandle, prd) ; 4838 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; 4839 if (rv == WAIT_TIMEOUT) { 4840 Millis -= prd ; 4841 } 4842 } 4843 v = _Event ; 4844 _Event = 0 ; 4845 // see comment at end of os::PlatformEvent::park() below: 4846 OrderAccess::fence() ; 4847 // If we encounter a nearly simultanous timeout expiry and unpark() 4848 // we return OS_OK indicating we awoke via unpark(). 4849 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4850 return (v >= 0) ? OS_OK : OS_TIMEOUT ; 4851 } 4852 4853 void os::PlatformEvent::park () { 4854 guarantee (_ParkHandle != NULL, "Invariant") ; 4855 // Invariant: Only the thread associated with the Event/PlatformEvent 4856 // may call park(). 4857 int v ; 4858 for (;;) { 4859 v = _Event ; 4860 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4861 } 4862 guarantee ((v == 0) || (v == 1), "invariant") ; 4863 if (v != 0) return ; 4864 4865 // Do this the hard way by blocking ... 4866 // TODO: consider a brief spin here, gated on the success of recent 4867 // spin attempts by this thread. 4868 while (_Event < 0) { 4869 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; 4870 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; 4871 } 4872 4873 // Usually we'll find _Event == 0 at this point, but as 4874 // an optional optimization we clear it, just in case can 4875 // multiple unpark() operations drove _Event up to 1. 4876 _Event = 0 ; 4877 OrderAccess::fence() ; 4878 guarantee (_Event >= 0, "invariant") ; 4879 } 4880 4881 void os::PlatformEvent::unpark() { 4882 guarantee (_ParkHandle != NULL, "Invariant") ; 4883 4884 // Transitions for _Event: 4885 // 0 :=> 1 4886 // 1 :=> 1 4887 // -1 :=> either 0 or 1; must signal target thread 4888 // That is, we can safely transition _Event from -1 to either 4889 // 0 or 1. Forcing 1 is slightly more efficient for back-to-back 4890 // unpark() calls. 4891 // See also: "Semaphores in Plan 9" by Mullender & Cox 4892 // 4893 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4894 // that it will take two back-to-back park() calls for the owning 4895 // thread to block. This has the benefit of forcing a spurious return 4896 // from the first park() call after an unpark() call which will help 4897 // shake out uses of park() and unpark() without condition variables. 4898 4899 if (Atomic::xchg(1, &_Event) >= 0) return; 4900 4901 ::SetEvent(_ParkHandle); 4902 } 4903 4904 4905 // JSR166 4906 // ------------------------------------------------------- 4907 4908 /* 4909 * The Windows implementation of Park is very straightforward: Basic 4910 * operations on Win32 Events turn out to have the right semantics to 4911 * use them directly. We opportunistically resuse the event inherited 4912 * from Monitor. 4913 */ 4914 4915 4916 void Parker::park(bool isAbsolute, jlong time) { 4917 guarantee (_ParkEvent != NULL, "invariant") ; 4918 // First, demultiplex/decode time arguments 4919 if (time < 0) { // don't wait 4920 return; 4921 } 4922 else if (time == 0 && !isAbsolute) { 4923 time = INFINITE; 4924 } 4925 else if (isAbsolute) { 4926 time -= os::javaTimeMillis(); // convert to relative time 4927 if (time <= 0) // already elapsed 4928 return; 4929 } 4930 else { // relative 4931 time /= 1000000; // Must coarsen from nanos to millis 4932 if (time == 0) // Wait for the minimal time unit if zero 4933 time = 1; 4934 } 4935 4936 JavaThread* thread = (JavaThread*)(Thread::current()); 4937 assert(thread->is_Java_thread(), "Must be JavaThread"); 4938 JavaThread *jt = (JavaThread *)thread; 4939 4940 // Don't wait if interrupted or already triggered 4941 if (Thread::is_interrupted(thread, false) || 4942 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4943 ResetEvent(_ParkEvent); 4944 return; 4945 } 4946 else { 4947 ThreadBlockInVM tbivm(jt); 4948 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4949 jt->set_suspend_equivalent(); 4950 4951 WaitForSingleObject(_ParkEvent, time); 4952 ResetEvent(_ParkEvent); 4953 4954 // If externally suspended while waiting, re-suspend 4955 if (jt->handle_special_suspend_equivalent_condition()) { 4956 jt->java_suspend_self(); 4957 } 4958 } 4959 } 4960 4961 void Parker::unpark() { 4962 guarantee (_ParkEvent != NULL, "invariant") ; 4963 SetEvent(_ParkEvent); 4964 } 4965 4966 // Run the specified command in a separate process. Return its exit value, 4967 // or -1 on failure (e.g. can't create a new process). 4968 int os::fork_and_exec(char* cmd) { 4969 STARTUPINFO si; 4970 PROCESS_INFORMATION pi; 4971 4972 memset(&si, 0, sizeof(si)); 4973 si.cb = sizeof(si); 4974 memset(&pi, 0, sizeof(pi)); 4975 BOOL rslt = CreateProcess(NULL, // executable name - use command line 4976 cmd, // command line 4977 NULL, // process security attribute 4978 NULL, // thread security attribute 4979 TRUE, // inherits system handles 4980 0, // no creation flags 4981 NULL, // use parent's environment block 4982 NULL, // use parent's starting directory 4983 &si, // (in) startup information 4984 &pi); // (out) process information 4985 4986 if (rslt) { 4987 // Wait until child process exits. 4988 WaitForSingleObject(pi.hProcess, INFINITE); 4989 4990 DWORD exit_code; 4991 GetExitCodeProcess(pi.hProcess, &exit_code); 4992 4993 // Close process and thread handles. 4994 CloseHandle(pi.hProcess); 4995 CloseHandle(pi.hThread); 4996 4997 return (int)exit_code; 4998 } else { 4999 return -1; 5000 } 5001 } 5002 5003 //-------------------------------------------------------------------------------------------------- 5004 // Non-product code 5005 5006 static int mallocDebugIntervalCounter = 0; 5007 static int mallocDebugCounter = 0; 5008 bool os::check_heap(bool force) { 5009 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5010 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5011 // Note: HeapValidate executes two hardware breakpoints when it finds something 5012 // wrong; at these points, eax contains the address of the offending block (I think). 5013 // To get to the exlicit error message(s) below, just continue twice. 5014 HANDLE heap = GetProcessHeap(); 5015 { HeapLock(heap); 5016 PROCESS_HEAP_ENTRY phe; 5017 phe.lpData = NULL; 5018 while (HeapWalk(heap, &phe) != 0) { 5019 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5020 !HeapValidate(heap, 0, phe.lpData)) { 5021 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5022 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5023 fatal("corrupted C heap"); 5024 } 5025 } 5026 DWORD err = GetLastError(); 5027 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5028 fatal(err_msg("heap walk aborted with error %d", err)); 5029 } 5030 HeapUnlock(heap); 5031 } 5032 mallocDebugIntervalCounter = 0; 5033 } 5034 return true; 5035 } 5036 5037 5038 bool os::find(address addr, outputStream* st) { 5039 // Nothing yet 5040 return false; 5041 } 5042 5043 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5044 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5045 5046 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 5047 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5048 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5049 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5050 5051 if (os::is_memory_serialize_page(thread, addr)) 5052 return EXCEPTION_CONTINUE_EXECUTION; 5053 } 5054 5055 return EXCEPTION_CONTINUE_SEARCH; 5056 } 5057 5058 // We don't build a headless jre for Windows 5059 bool os::is_headless_jre() { return false; } 5060 5061 static jint initSock() { 5062 WSADATA wsadata; 5063 5064 if (!os::WinSock2Dll::WinSock2Available()) { 5065 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5066 ::GetLastError()); 5067 return JNI_ERR; 5068 } 5069 5070 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5071 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5072 ::GetLastError()); 5073 return JNI_ERR; 5074 } 5075 return JNI_OK; 5076 } 5077 5078 struct hostent* os::get_host_by_name(char* name) { 5079 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5080 } 5081 5082 int os::socket_close(int fd) { 5083 return ::closesocket(fd); 5084 } 5085 5086 int os::socket_available(int fd, jint *pbytes) { 5087 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5088 return (ret < 0) ? 0 : 1; 5089 } 5090 5091 int os::socket(int domain, int type, int protocol) { 5092 return ::socket(domain, type, protocol); 5093 } 5094 5095 int os::listen(int fd, int count) { 5096 return ::listen(fd, count); 5097 } 5098 5099 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5100 return ::connect(fd, him, len); 5101 } 5102 5103 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5104 return ::accept(fd, him, len); 5105 } 5106 5107 int os::sendto(int fd, char* buf, size_t len, uint flags, 5108 struct sockaddr* to, socklen_t tolen) { 5109 5110 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5111 } 5112 5113 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5114 sockaddr* from, socklen_t* fromlen) { 5115 5116 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5117 } 5118 5119 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5120 return ::recv(fd, buf, (int)nBytes, flags); 5121 } 5122 5123 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5124 return ::send(fd, buf, (int)nBytes, flags); 5125 } 5126 5127 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5128 return ::send(fd, buf, (int)nBytes, flags); 5129 } 5130 5131 int os::timeout(int fd, long timeout) { 5132 fd_set tbl; 5133 struct timeval t; 5134 5135 t.tv_sec = timeout / 1000; 5136 t.tv_usec = (timeout % 1000) * 1000; 5137 5138 tbl.fd_count = 1; 5139 tbl.fd_array[0] = fd; 5140 5141 return ::select(1, &tbl, 0, 0, &t); 5142 } 5143 5144 int os::get_host_name(char* name, int namelen) { 5145 return ::gethostname(name, namelen); 5146 } 5147 5148 int os::socket_shutdown(int fd, int howto) { 5149 return ::shutdown(fd, howto); 5150 } 5151 5152 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5153 return ::bind(fd, him, len); 5154 } 5155 5156 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5157 return ::getsockname(fd, him, len); 5158 } 5159 5160 int os::get_sock_opt(int fd, int level, int optname, 5161 char* optval, socklen_t* optlen) { 5162 return ::getsockopt(fd, level, optname, optval, optlen); 5163 } 5164 5165 int os::set_sock_opt(int fd, int level, int optname, 5166 const char* optval, socklen_t optlen) { 5167 return ::setsockopt(fd, level, optname, optval, optlen); 5168 } 5169 5170 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5171 #if defined(IA32) 5172 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5173 #elif defined (AMD64) 5174 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5175 #endif 5176 5177 // returns true if thread could be suspended, 5178 // false otherwise 5179 static bool do_suspend(HANDLE* h) { 5180 if (h != NULL) { 5181 if (SuspendThread(*h) != ~0) { 5182 return true; 5183 } 5184 } 5185 return false; 5186 } 5187 5188 // resume the thread 5189 // calling resume on an active thread is a no-op 5190 static void do_resume(HANDLE* h) { 5191 if (h != NULL) { 5192 ResumeThread(*h); 5193 } 5194 } 5195 5196 // retrieve a suspend/resume context capable handle 5197 // from the tid. Caller validates handle return value. 5198 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5199 if (h != NULL) { 5200 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5201 } 5202 } 5203 5204 // 5205 // Thread sampling implementation 5206 // 5207 void os::SuspendedThreadTask::internal_do_task() { 5208 CONTEXT ctxt; 5209 HANDLE h = NULL; 5210 5211 // get context capable handle for thread 5212 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5213 5214 // sanity 5215 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5216 return; 5217 } 5218 5219 // suspend the thread 5220 if (do_suspend(&h)) { 5221 ctxt.ContextFlags = sampling_context_flags; 5222 // get thread context 5223 GetThreadContext(h, &ctxt); 5224 SuspendedThreadTaskContext context(_thread, &ctxt); 5225 // pass context to Thread Sampling impl 5226 do_task(context); 5227 // resume thread 5228 do_resume(&h); 5229 } 5230 5231 // close handle 5232 CloseHandle(h); 5233 } 5234 5235 5236 // Kernel32 API 5237 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5238 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5239 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5240 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5241 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5242 5243 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5244 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5245 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5246 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5247 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5248 5249 5250 BOOL os::Kernel32Dll::initialized = FALSE; 5251 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5252 assert(initialized && _GetLargePageMinimum != NULL, 5253 "GetLargePageMinimumAvailable() not yet called"); 5254 return _GetLargePageMinimum(); 5255 } 5256 5257 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5258 if (!initialized) { 5259 initialize(); 5260 } 5261 return _GetLargePageMinimum != NULL; 5262 } 5263 5264 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5265 if (!initialized) { 5266 initialize(); 5267 } 5268 return _VirtualAllocExNuma != NULL; 5269 } 5270 5271 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5272 assert(initialized && _VirtualAllocExNuma != NULL, 5273 "NUMACallsAvailable() not yet called"); 5274 5275 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5276 } 5277 5278 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5279 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5280 "NUMACallsAvailable() not yet called"); 5281 5282 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5283 } 5284 5285 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5286 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5287 "NUMACallsAvailable() not yet called"); 5288 5289 return _GetNumaNodeProcessorMask(node, proc_mask); 5290 } 5291 5292 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5293 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5294 if (!initialized) { 5295 initialize(); 5296 } 5297 5298 if (_RtlCaptureStackBackTrace != NULL) { 5299 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5300 BackTrace, BackTraceHash); 5301 } else { 5302 return 0; 5303 } 5304 } 5305 5306 void os::Kernel32Dll::initializeCommon() { 5307 if (!initialized) { 5308 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5309 assert(handle != NULL, "Just check"); 5310 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5311 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5312 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5313 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5314 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5315 initialized = TRUE; 5316 } 5317 } 5318 5319 5320 5321 #ifndef JDK6_OR_EARLIER 5322 5323 void os::Kernel32Dll::initialize() { 5324 initializeCommon(); 5325 } 5326 5327 5328 // Kernel32 API 5329 inline BOOL os::Kernel32Dll::SwitchToThread() { 5330 return ::SwitchToThread(); 5331 } 5332 5333 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5334 return true; 5335 } 5336 5337 // Help tools 5338 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5339 return true; 5340 } 5341 5342 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5343 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5344 } 5345 5346 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5347 return ::Module32First(hSnapshot, lpme); 5348 } 5349 5350 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5351 return ::Module32Next(hSnapshot, lpme); 5352 } 5353 5354 5355 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5356 return true; 5357 } 5358 5359 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5360 ::GetNativeSystemInfo(lpSystemInfo); 5361 } 5362 5363 // PSAPI API 5364 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5365 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5366 } 5367 5368 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5369 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5370 } 5371 5372 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5373 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5374 } 5375 5376 inline BOOL os::PSApiDll::PSApiAvailable() { 5377 return true; 5378 } 5379 5380 5381 // WinSock2 API 5382 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5383 return ::WSAStartup(wVersionRequested, lpWSAData); 5384 } 5385 5386 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5387 return ::gethostbyname(name); 5388 } 5389 5390 inline BOOL os::WinSock2Dll::WinSock2Available() { 5391 return true; 5392 } 5393 5394 // Advapi API 5395 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5396 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5397 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5398 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5399 BufferLength, PreviousState, ReturnLength); 5400 } 5401 5402 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5403 PHANDLE TokenHandle) { 5404 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5405 } 5406 5407 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5408 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5409 } 5410 5411 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5412 return true; 5413 } 5414 5415 void* os::get_default_process_handle() { 5416 return (void*)GetModuleHandle(NULL); 5417 } 5418 5419 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5420 // which is used to find statically linked in agents. 5421 // Additionally for windows, takes into account __stdcall names. 5422 // Parameters: 5423 // sym_name: Symbol in library we are looking for 5424 // lib_name: Name of library to look in, NULL for shared libs. 5425 // is_absolute_path == true if lib_name is absolute path to agent 5426 // such as "C:/a/b/L.dll" 5427 // == false if only the base name of the library is passed in 5428 // such as "L" 5429 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5430 bool is_absolute_path) { 5431 char *agent_entry_name; 5432 size_t len; 5433 size_t name_len; 5434 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5435 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5436 const char *start; 5437 5438 if (lib_name != NULL) { 5439 len = name_len = strlen(lib_name); 5440 if (is_absolute_path) { 5441 // Need to strip path, prefix and suffix 5442 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5443 lib_name = ++start; 5444 } else { 5445 // Need to check for drive prefix 5446 if ((start = strchr(lib_name, ':')) != NULL) { 5447 lib_name = ++start; 5448 } 5449 } 5450 if (len <= (prefix_len + suffix_len)) { 5451 return NULL; 5452 } 5453 lib_name += prefix_len; 5454 name_len = strlen(lib_name) - suffix_len; 5455 } 5456 } 5457 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5458 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5459 if (agent_entry_name == NULL) { 5460 return NULL; 5461 } 5462 if (lib_name != NULL) { 5463 const char *p = strrchr(sym_name, '@'); 5464 if (p != NULL && p != sym_name) { 5465 // sym_name == _Agent_OnLoad@XX 5466 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5467 agent_entry_name[(p-sym_name)] = '\0'; 5468 // agent_entry_name == _Agent_OnLoad 5469 strcat(agent_entry_name, "_"); 5470 strncat(agent_entry_name, lib_name, name_len); 5471 strcat(agent_entry_name, p); 5472 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5473 } else { 5474 strcpy(agent_entry_name, sym_name); 5475 strcat(agent_entry_name, "_"); 5476 strncat(agent_entry_name, lib_name, name_len); 5477 } 5478 } else { 5479 strcpy(agent_entry_name, sym_name); 5480 } 5481 return agent_entry_name; 5482 } 5483 5484 #else 5485 // Kernel32 API 5486 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5487 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5488 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5489 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5490 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5491 5492 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5493 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5494 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5495 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5496 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5497 5498 void os::Kernel32Dll::initialize() { 5499 if (!initialized) { 5500 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5501 assert(handle != NULL, "Just check"); 5502 5503 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5504 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5505 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5506 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5507 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5508 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5509 initializeCommon(); // resolve the functions that always need resolving 5510 5511 initialized = TRUE; 5512 } 5513 } 5514 5515 BOOL os::Kernel32Dll::SwitchToThread() { 5516 assert(initialized && _SwitchToThread != NULL, 5517 "SwitchToThreadAvailable() not yet called"); 5518 return _SwitchToThread(); 5519 } 5520 5521 5522 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5523 if (!initialized) { 5524 initialize(); 5525 } 5526 return _SwitchToThread != NULL; 5527 } 5528 5529 // Help tools 5530 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5531 if (!initialized) { 5532 initialize(); 5533 } 5534 return _CreateToolhelp32Snapshot != NULL && 5535 _Module32First != NULL && 5536 _Module32Next != NULL; 5537 } 5538 5539 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5540 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5541 "HelpToolsAvailable() not yet called"); 5542 5543 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5544 } 5545 5546 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5547 assert(initialized && _Module32First != NULL, 5548 "HelpToolsAvailable() not yet called"); 5549 5550 return _Module32First(hSnapshot, lpme); 5551 } 5552 5553 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5554 assert(initialized && _Module32Next != NULL, 5555 "HelpToolsAvailable() not yet called"); 5556 5557 return _Module32Next(hSnapshot, lpme); 5558 } 5559 5560 5561 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5562 if (!initialized) { 5563 initialize(); 5564 } 5565 return _GetNativeSystemInfo != NULL; 5566 } 5567 5568 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5569 assert(initialized && _GetNativeSystemInfo != NULL, 5570 "GetNativeSystemInfoAvailable() not yet called"); 5571 5572 _GetNativeSystemInfo(lpSystemInfo); 5573 } 5574 5575 // PSAPI API 5576 5577 5578 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5579 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5580 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5581 5582 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5583 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5584 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5585 BOOL os::PSApiDll::initialized = FALSE; 5586 5587 void os::PSApiDll::initialize() { 5588 if (!initialized) { 5589 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5590 if (handle != NULL) { 5591 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5592 "EnumProcessModules"); 5593 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5594 "GetModuleFileNameExA"); 5595 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5596 "GetModuleInformation"); 5597 } 5598 initialized = TRUE; 5599 } 5600 } 5601 5602 5603 5604 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5605 assert(initialized && _EnumProcessModules != NULL, 5606 "PSApiAvailable() not yet called"); 5607 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5608 } 5609 5610 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5611 assert(initialized && _GetModuleFileNameEx != NULL, 5612 "PSApiAvailable() not yet called"); 5613 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5614 } 5615 5616 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5617 assert(initialized && _GetModuleInformation != NULL, 5618 "PSApiAvailable() not yet called"); 5619 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5620 } 5621 5622 BOOL os::PSApiDll::PSApiAvailable() { 5623 if (!initialized) { 5624 initialize(); 5625 } 5626 return _EnumProcessModules != NULL && 5627 _GetModuleFileNameEx != NULL && 5628 _GetModuleInformation != NULL; 5629 } 5630 5631 5632 // WinSock2 API 5633 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5634 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5635 5636 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5637 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5638 BOOL os::WinSock2Dll::initialized = FALSE; 5639 5640 void os::WinSock2Dll::initialize() { 5641 if (!initialized) { 5642 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5643 if (handle != NULL) { 5644 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5645 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5646 } 5647 initialized = TRUE; 5648 } 5649 } 5650 5651 5652 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5653 assert(initialized && _WSAStartup != NULL, 5654 "WinSock2Available() not yet called"); 5655 return _WSAStartup(wVersionRequested, lpWSAData); 5656 } 5657 5658 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5659 assert(initialized && _gethostbyname != NULL, 5660 "WinSock2Available() not yet called"); 5661 return _gethostbyname(name); 5662 } 5663 5664 BOOL os::WinSock2Dll::WinSock2Available() { 5665 if (!initialized) { 5666 initialize(); 5667 } 5668 return _WSAStartup != NULL && 5669 _gethostbyname != NULL; 5670 } 5671 5672 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5673 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5674 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5675 5676 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5677 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5678 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5679 BOOL os::Advapi32Dll::initialized = FALSE; 5680 5681 void os::Advapi32Dll::initialize() { 5682 if (!initialized) { 5683 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5684 if (handle != NULL) { 5685 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5686 "AdjustTokenPrivileges"); 5687 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5688 "OpenProcessToken"); 5689 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5690 "LookupPrivilegeValueA"); 5691 } 5692 initialized = TRUE; 5693 } 5694 } 5695 5696 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5697 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5698 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5699 assert(initialized && _AdjustTokenPrivileges != NULL, 5700 "AdvapiAvailable() not yet called"); 5701 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5702 BufferLength, PreviousState, ReturnLength); 5703 } 5704 5705 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5706 PHANDLE TokenHandle) { 5707 assert(initialized && _OpenProcessToken != NULL, 5708 "AdvapiAvailable() not yet called"); 5709 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5710 } 5711 5712 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5713 assert(initialized && _LookupPrivilegeValue != NULL, 5714 "AdvapiAvailable() not yet called"); 5715 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5716 } 5717 5718 BOOL os::Advapi32Dll::AdvapiAvailable() { 5719 if (!initialized) { 5720 initialize(); 5721 } 5722 return _AdjustTokenPrivileges != NULL && 5723 _OpenProcessToken != NULL && 5724 _LookupPrivilegeValue != NULL; 5725 } 5726 5727 #endif 5728 5729 #ifndef PRODUCT 5730 5731 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5732 // contiguous memory block at a particular address. 5733 // The test first tries to find a good approximate address to allocate at by using the same 5734 // method to allocate some memory at any address. The test then tries to allocate memory in 5735 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5736 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5737 // the previously allocated memory is available for allocation. The only actual failure 5738 // that is reported is when the test tries to allocate at a particular location but gets a 5739 // different valid one. A NULL return value at this point is not considered an error but may 5740 // be legitimate. 5741 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5742 void TestReserveMemorySpecial_test() { 5743 if (!UseLargePages) { 5744 if (VerboseInternalVMTests) { 5745 gclog_or_tty->print("Skipping test because large pages are disabled"); 5746 } 5747 return; 5748 } 5749 // save current value of globals 5750 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5751 bool old_use_numa_interleaving = UseNUMAInterleaving; 5752 5753 // set globals to make sure we hit the correct code path 5754 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5755 5756 // do an allocation at an address selected by the OS to get a good one. 5757 const size_t large_allocation_size = os::large_page_size() * 4; 5758 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5759 if (result == NULL) { 5760 if (VerboseInternalVMTests) { 5761 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5762 large_allocation_size); 5763 } 5764 } else { 5765 os::release_memory_special(result, large_allocation_size); 5766 5767 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5768 // we managed to get it once. 5769 const size_t expected_allocation_size = os::large_page_size(); 5770 char* expected_location = result + os::large_page_size(); 5771 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5772 if (actual_location == NULL) { 5773 if (VerboseInternalVMTests) { 5774 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5775 expected_location, large_allocation_size); 5776 } 5777 } else { 5778 // release memory 5779 os::release_memory_special(actual_location, expected_allocation_size); 5780 // only now check, after releasing any memory to avoid any leaks. 5781 assert(actual_location == expected_location, 5782 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5783 expected_location, expected_allocation_size, actual_location)); 5784 } 5785 } 5786 5787 // restore globals 5788 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5789 UseNUMAInterleaving = old_use_numa_interleaving; 5790 } 5791 #endif // PRODUCT 5792