1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "services/attachListener.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/vmError.hpp" 74 75 #ifdef _DEBUG 76 #include <crtdbg.h> 77 #endif 78 79 80 #include <windows.h> 81 #include <sys/types.h> 82 #include <sys/stat.h> 83 #include <sys/timeb.h> 84 #include <objidl.h> 85 #include <shlobj.h> 86 87 #include <malloc.h> 88 #include <signal.h> 89 #include <direct.h> 90 #include <errno.h> 91 #include <fcntl.h> 92 #include <io.h> 93 #include <process.h> // For _beginthreadex(), _endthreadex() 94 #include <imagehlp.h> // For os::dll_address_to_function_name 95 /* for enumerating dll libraries */ 96 #include <vdmdbg.h> 97 98 // for timer info max values which include all bits 99 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 100 101 // For DLL loading/load error detection 102 // Values of PE COFF 103 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 104 #define IMAGE_FILE_SIGNATURE_LENGTH 4 105 106 static HANDLE main_process; 107 static HANDLE main_thread; 108 static int main_thread_id; 109 110 static FILETIME process_creation_time; 111 static FILETIME process_exit_time; 112 static FILETIME process_user_time; 113 static FILETIME process_kernel_time; 114 115 #ifdef _M_IA64 116 #define __CPU__ ia64 117 #elif _M_AMD64 118 #define __CPU__ amd64 119 #else 120 #define __CPU__ i486 121 #endif 122 123 // save DLL module handle, used by GetModuleFileName 124 125 HINSTANCE vm_lib_handle; 126 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 128 switch (reason) { 129 case DLL_PROCESS_ATTACH: 130 vm_lib_handle = hinst; 131 if (ForceTimeHighResolution) 132 timeBeginPeriod(1L); 133 break; 134 case DLL_PROCESS_DETACH: 135 if (ForceTimeHighResolution) 136 timeEndPeriod(1L); 137 138 break; 139 default: 140 break; 141 } 142 return true; 143 } 144 145 static inline double fileTimeAsDouble(FILETIME* time) { 146 const double high = (double) ((unsigned int) ~0); 147 const double split = 10000000.0; 148 double result = (time->dwLowDateTime / split) + 149 time->dwHighDateTime * (high/split); 150 return result; 151 } 152 153 // Implementation of os 154 155 bool os::getenv(const char* name, char* buffer, int len) { 156 int result = GetEnvironmentVariable(name, buffer, len); 157 return result > 0 && result < len; 158 } 159 160 bool os::unsetenv(const char* name) { 161 assert(name != NULL, "Null pointer"); 162 return (SetEnvironmentVariable(name, NULL) == TRUE); 163 } 164 165 // No setuid programs under Windows. 166 bool os::have_special_privileges() { 167 return false; 168 } 169 170 171 // This method is a periodic task to check for misbehaving JNI applications 172 // under CheckJNI, we can add any periodic checks here. 173 // For Windows at the moment does nothing 174 void os::run_periodic_checks() { 175 return; 176 } 177 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 void os::init_system_properties_values() { 183 /* sysclasspath, java_home, dll_dir */ 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH]; 190 191 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 192 os::jvm_path(home_dir, sizeof(home_dir)); 193 // Found the full path to jvm.dll. 194 // Now cut the path to <java_home>/jre if we can. 195 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 196 pslash = strrchr(home_dir, '\\'); 197 if (pslash != NULL) { 198 *pslash = '\0'; /* get rid of \{client|server} */ 199 pslash = strrchr(home_dir, '\\'); 200 if (pslash != NULL) 201 *pslash = '\0'; /* get rid of \bin */ 202 } 203 } 204 205 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 206 if (home_path == NULL) 207 return; 208 strcpy(home_path, home_dir); 209 Arguments::set_java_home(home_path); 210 211 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 212 if (dll_path == NULL) 213 return; 214 strcpy(dll_path, home_dir); 215 strcat(dll_path, bin); 216 Arguments::set_dll_dir(dll_path); 217 218 if (!set_boot_path('\\', ';')) 219 return; 220 } 221 222 /* library_path */ 223 #define EXT_DIR "\\lib\\ext" 224 #define BIN_DIR "\\bin" 225 #define PACKAGE_DIR "\\Sun\\Java" 226 { 227 /* Win32 library search order (See the documentation for LoadLibrary): 228 * 229 * 1. The directory from which application is loaded. 230 * 2. The system wide Java Extensions directory (Java only) 231 * 3. System directory (GetSystemDirectory) 232 * 4. Windows directory (GetWindowsDirectory) 233 * 5. The PATH environment variable 234 * 6. The current directory 235 */ 236 237 char *library_path; 238 char tmp[MAX_PATH]; 239 char *path_str = ::getenv("PATH"); 240 241 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 242 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 243 244 library_path[0] = '\0'; 245 246 GetModuleFileName(NULL, tmp, sizeof(tmp)); 247 *(strrchr(tmp, '\\')) = '\0'; 248 strcat(library_path, tmp); 249 250 GetWindowsDirectory(tmp, sizeof(tmp)); 251 strcat(library_path, ";"); 252 strcat(library_path, tmp); 253 strcat(library_path, PACKAGE_DIR BIN_DIR); 254 255 GetSystemDirectory(tmp, sizeof(tmp)); 256 strcat(library_path, ";"); 257 strcat(library_path, tmp); 258 259 GetWindowsDirectory(tmp, sizeof(tmp)); 260 strcat(library_path, ";"); 261 strcat(library_path, tmp); 262 263 if (path_str) { 264 strcat(library_path, ";"); 265 strcat(library_path, path_str); 266 } 267 268 strcat(library_path, ";."); 269 270 Arguments::set_library_path(library_path); 271 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 272 } 273 274 /* Default extensions directory */ 275 { 276 char path[MAX_PATH]; 277 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 278 GetWindowsDirectory(path, MAX_PATH); 279 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 280 path, PACKAGE_DIR, EXT_DIR); 281 Arguments::set_ext_dirs(buf); 282 } 283 #undef EXT_DIR 284 #undef BIN_DIR 285 #undef PACKAGE_DIR 286 287 /* Default endorsed standards directory. */ 288 { 289 #define ENDORSED_DIR "\\lib\\endorsed" 290 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 291 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 292 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 293 Arguments::set_endorsed_dirs(buf); 294 #undef ENDORSED_DIR 295 } 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 /* 316 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 317 * So far, this method is only used by Native Memory Tracking, which is 318 * only supported on Windows XP or later. 319 */ 320 int os::get_native_stack(address* stack, int frames, int toSkip) { 321 #ifdef _NMT_NOINLINE_ 322 toSkip ++; 323 #endif 324 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 325 (PVOID*)stack, NULL); 326 for (int index = captured; index < frames; index ++) { 327 stack[index] = NULL; 328 } 329 return captured; 330 } 331 332 333 // os::current_stack_base() 334 // 335 // Returns the base of the stack, which is the stack's 336 // starting address. This function must be called 337 // while running on the stack of the thread being queried. 338 339 address os::current_stack_base() { 340 MEMORY_BASIC_INFORMATION minfo; 341 address stack_bottom; 342 size_t stack_size; 343 344 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 345 stack_bottom = (address)minfo.AllocationBase; 346 stack_size = minfo.RegionSize; 347 348 // Add up the sizes of all the regions with the same 349 // AllocationBase. 350 while (1) 351 { 352 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 353 if (stack_bottom == (address)minfo.AllocationBase) 354 stack_size += minfo.RegionSize; 355 else 356 break; 357 } 358 359 #ifdef _M_IA64 360 // IA64 has memory and register stacks 361 // 362 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 363 // at thread creation (1MB backing store growing upwards, 1MB memory stack 364 // growing downwards, 2MB summed up) 365 // 366 // ... 367 // ------- top of stack (high address) ----- 368 // | 369 // | 1MB 370 // | Backing Store (Register Stack) 371 // | 372 // | / \ 373 // | | 374 // | | 375 // | | 376 // ------------------------ stack base ----- 377 // | 1MB 378 // | Memory Stack 379 // | 380 // | | 381 // | | 382 // | | 383 // | \ / 384 // | 385 // ----- bottom of stack (low address) ----- 386 // ... 387 388 stack_size = stack_size / 2; 389 #endif 390 return stack_bottom + stack_size; 391 } 392 393 size_t os::current_stack_size() { 394 size_t sz; 395 MEMORY_BASIC_INFORMATION minfo; 396 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 397 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 398 return sz; 399 } 400 401 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 402 const struct tm* time_struct_ptr = localtime(clock); 403 if (time_struct_ptr != NULL) { 404 *res = *time_struct_ptr; 405 return res; 406 } 407 return NULL; 408 } 409 410 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 411 412 // Thread start routine for all new Java threads 413 static unsigned __stdcall java_start(Thread* thread) { 414 // Try to randomize the cache line index of hot stack frames. 415 // This helps when threads of the same stack traces evict each other's 416 // cache lines. The threads can be either from the same JVM instance, or 417 // from different JVM instances. The benefit is especially true for 418 // processors with hyperthreading technology. 419 static int counter = 0; 420 int pid = os::current_process_id(); 421 _alloca(((pid ^ counter++) & 7) * 128); 422 423 OSThread* osthr = thread->osthread(); 424 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 425 426 if (UseNUMA) { 427 int lgrp_id = os::numa_get_group_id(); 428 if (lgrp_id != -1) { 429 thread->set_lgrp_id(lgrp_id); 430 } 431 } 432 433 // Diagnostic code to investigate JDK-6573254 434 int res = 90115; // non-java thread 435 if (thread->is_Java_thread()) { 436 res = 60115; // java thread 437 } 438 439 // Install a win32 structured exception handler around every thread created 440 // by VM, so VM can generate error dump when an exception occurred in non- 441 // Java thread (e.g. VM thread). 442 __try { 443 thread->run(); 444 } __except(topLevelExceptionFilter( 445 (_EXCEPTION_POINTERS*)_exception_info())) { 446 // Nothing to do. 447 } 448 449 // One less thread is executing 450 // When the VMThread gets here, the main thread may have already exited 451 // which frees the CodeHeap containing the Atomic::add code 452 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 453 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 454 } 455 456 // Thread must not return from exit_process_or_thread(), but if it does, 457 // let it proceed to exit normally 458 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 459 } 460 461 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 462 // Allocate the OSThread object 463 OSThread* osthread = new OSThread(NULL, NULL); 464 if (osthread == NULL) return NULL; 465 466 // Initialize support for Java interrupts 467 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 468 if (interrupt_event == NULL) { 469 delete osthread; 470 return NULL; 471 } 472 osthread->set_interrupt_event(interrupt_event); 473 474 // Store info on the Win32 thread into the OSThread 475 osthread->set_thread_handle(thread_handle); 476 osthread->set_thread_id(thread_id); 477 478 if (UseNUMA) { 479 int lgrp_id = os::numa_get_group_id(); 480 if (lgrp_id != -1) { 481 thread->set_lgrp_id(lgrp_id); 482 } 483 } 484 485 // Initial thread state is INITIALIZED, not SUSPENDED 486 osthread->set_state(INITIALIZED); 487 488 return osthread; 489 } 490 491 492 bool os::create_attached_thread(JavaThread* thread) { 493 #ifdef ASSERT 494 thread->verify_not_published(); 495 #endif 496 HANDLE thread_h; 497 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 498 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 499 fatal("DuplicateHandle failed\n"); 500 } 501 OSThread* osthread = create_os_thread(thread, thread_h, 502 (int)current_thread_id()); 503 if (osthread == NULL) { 504 return false; 505 } 506 507 // Initial thread state is RUNNABLE 508 osthread->set_state(RUNNABLE); 509 510 thread->set_osthread(osthread); 511 return true; 512 } 513 514 bool os::create_main_thread(JavaThread* thread) { 515 #ifdef ASSERT 516 thread->verify_not_published(); 517 #endif 518 if (_starting_thread == NULL) { 519 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 520 if (_starting_thread == NULL) { 521 return false; 522 } 523 } 524 525 // The primordial thread is runnable from the start) 526 _starting_thread->set_state(RUNNABLE); 527 528 thread->set_osthread(_starting_thread); 529 return true; 530 } 531 532 // Allocate and initialize a new OSThread 533 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 534 unsigned thread_id; 535 536 // Allocate the OSThread object 537 OSThread* osthread = new OSThread(NULL, NULL); 538 if (osthread == NULL) { 539 return false; 540 } 541 542 // Initialize support for Java interrupts 543 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 544 if (interrupt_event == NULL) { 545 delete osthread; 546 return NULL; 547 } 548 osthread->set_interrupt_event(interrupt_event); 549 osthread->set_interrupted(false); 550 551 thread->set_osthread(osthread); 552 553 if (stack_size == 0) { 554 switch (thr_type) { 555 case os::java_thread: 556 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 557 if (JavaThread::stack_size_at_create() > 0) 558 stack_size = JavaThread::stack_size_at_create(); 559 break; 560 case os::compiler_thread: 561 if (CompilerThreadStackSize > 0) { 562 stack_size = (size_t)(CompilerThreadStackSize * K); 563 break; 564 } // else fall through: 565 // use VMThreadStackSize if CompilerThreadStackSize is not defined 566 case os::vm_thread: 567 case os::pgc_thread: 568 case os::cgc_thread: 569 case os::watcher_thread: 570 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 571 break; 572 } 573 } 574 575 // Create the Win32 thread 576 // 577 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 578 // does not specify stack size. Instead, it specifies the size of 579 // initially committed space. The stack size is determined by 580 // PE header in the executable. If the committed "stack_size" is larger 581 // than default value in the PE header, the stack is rounded up to the 582 // nearest multiple of 1MB. For example if the launcher has default 583 // stack size of 320k, specifying any size less than 320k does not 584 // affect the actual stack size at all, it only affects the initial 585 // commitment. On the other hand, specifying 'stack_size' larger than 586 // default value may cause significant increase in memory usage, because 587 // not only the stack space will be rounded up to MB, but also the 588 // entire space is committed upfront. 589 // 590 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 591 // for CreateThread() that can treat 'stack_size' as stack size. However we 592 // are not supposed to call CreateThread() directly according to MSDN 593 // document because JVM uses C runtime library. The good news is that the 594 // flag appears to work with _beginthredex() as well. 595 596 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 597 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 598 #endif 599 600 HANDLE thread_handle = 601 (HANDLE)_beginthreadex(NULL, 602 (unsigned)stack_size, 603 (unsigned (__stdcall *)(void*)) java_start, 604 thread, 605 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 606 &thread_id); 607 if (thread_handle == NULL) { 608 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 609 // without the flag. 610 thread_handle = 611 (HANDLE)_beginthreadex(NULL, 612 (unsigned)stack_size, 613 (unsigned (__stdcall *)(void*)) java_start, 614 thread, 615 CREATE_SUSPENDED, 616 &thread_id); 617 } 618 if (thread_handle == NULL) { 619 // Need to clean up stuff we've allocated so far 620 CloseHandle(osthread->interrupt_event()); 621 thread->set_osthread(NULL); 622 delete osthread; 623 return NULL; 624 } 625 626 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 627 628 // Store info on the Win32 thread into the OSThread 629 osthread->set_thread_handle(thread_handle); 630 osthread->set_thread_id(thread_id); 631 632 // Initial thread state is INITIALIZED, not SUSPENDED 633 osthread->set_state(INITIALIZED); 634 635 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 636 return true; 637 } 638 639 640 // Free Win32 resources related to the OSThread 641 void os::free_thread(OSThread* osthread) { 642 assert(osthread != NULL, "osthread not set"); 643 CloseHandle(osthread->thread_handle()); 644 CloseHandle(osthread->interrupt_event()); 645 delete osthread; 646 } 647 648 static jlong first_filetime; 649 static jlong initial_performance_count; 650 static jlong performance_frequency; 651 652 653 jlong as_long(LARGE_INTEGER x) { 654 jlong result = 0; // initialization to avoid warning 655 set_high(&result, x.HighPart); 656 set_low(&result, x.LowPart); 657 return result; 658 } 659 660 661 jlong os::elapsed_counter() { 662 LARGE_INTEGER count; 663 if (win32::_has_performance_count) { 664 QueryPerformanceCounter(&count); 665 return as_long(count) - initial_performance_count; 666 } else { 667 FILETIME wt; 668 GetSystemTimeAsFileTime(&wt); 669 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 670 } 671 } 672 673 674 jlong os::elapsed_frequency() { 675 if (win32::_has_performance_count) { 676 return performance_frequency; 677 } else { 678 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 679 return 10000000; 680 } 681 } 682 683 684 julong os::available_memory() { 685 return win32::available_memory(); 686 } 687 688 julong os::win32::available_memory() { 689 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 690 // value if total memory is larger than 4GB 691 MEMORYSTATUSEX ms; 692 ms.dwLength = sizeof(ms); 693 GlobalMemoryStatusEx(&ms); 694 695 return (julong)ms.ullAvailPhys; 696 } 697 698 julong os::physical_memory() { 699 return win32::physical_memory(); 700 } 701 702 bool os::has_allocatable_memory_limit(julong* limit) { 703 MEMORYSTATUSEX ms; 704 ms.dwLength = sizeof(ms); 705 GlobalMemoryStatusEx(&ms); 706 #ifdef _LP64 707 *limit = (julong)ms.ullAvailVirtual; 708 return true; 709 #else 710 // Limit to 1400m because of the 2gb address space wall 711 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 712 return true; 713 #endif 714 } 715 716 // VC6 lacks DWORD_PTR 717 #if _MSC_VER < 1300 718 typedef UINT_PTR DWORD_PTR; 719 #endif 720 721 int os::active_processor_count() { 722 DWORD_PTR lpProcessAffinityMask = 0; 723 DWORD_PTR lpSystemAffinityMask = 0; 724 int proc_count = processor_count(); 725 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 726 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 727 // Nof active processors is number of bits in process affinity mask 728 int bitcount = 0; 729 while (lpProcessAffinityMask != 0) { 730 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 731 bitcount++; 732 } 733 return bitcount; 734 } else { 735 return proc_count; 736 } 737 } 738 739 void os::set_native_thread_name(const char *name) { 740 // Not yet implemented. 741 return; 742 } 743 744 bool os::distribute_processes(uint length, uint* distribution) { 745 // Not yet implemented. 746 return false; 747 } 748 749 bool os::bind_to_processor(uint processor_id) { 750 // Not yet implemented. 751 return false; 752 } 753 754 void os::win32::initialize_performance_counter() { 755 LARGE_INTEGER count; 756 if (QueryPerformanceFrequency(&count)) { 757 win32::_has_performance_count = 1; 758 performance_frequency = as_long(count); 759 QueryPerformanceCounter(&count); 760 initial_performance_count = as_long(count); 761 } else { 762 win32::_has_performance_count = 0; 763 FILETIME wt; 764 GetSystemTimeAsFileTime(&wt); 765 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 766 } 767 } 768 769 770 double os::elapsedTime() { 771 return (double) elapsed_counter() / (double) elapsed_frequency(); 772 } 773 774 775 // Windows format: 776 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 777 // Java format: 778 // Java standards require the number of milliseconds since 1/1/1970 779 780 // Constant offset - calculated using offset() 781 static jlong _offset = 116444736000000000; 782 // Fake time counter for reproducible results when debugging 783 static jlong fake_time = 0; 784 785 #ifdef ASSERT 786 // Just to be safe, recalculate the offset in debug mode 787 static jlong _calculated_offset = 0; 788 static int _has_calculated_offset = 0; 789 790 jlong offset() { 791 if (_has_calculated_offset) return _calculated_offset; 792 SYSTEMTIME java_origin; 793 java_origin.wYear = 1970; 794 java_origin.wMonth = 1; 795 java_origin.wDayOfWeek = 0; // ignored 796 java_origin.wDay = 1; 797 java_origin.wHour = 0; 798 java_origin.wMinute = 0; 799 java_origin.wSecond = 0; 800 java_origin.wMilliseconds = 0; 801 FILETIME jot; 802 if (!SystemTimeToFileTime(&java_origin, &jot)) { 803 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 804 } 805 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 806 _has_calculated_offset = 1; 807 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 808 return _calculated_offset; 809 } 810 #else 811 jlong offset() { 812 return _offset; 813 } 814 #endif 815 816 jlong windows_to_java_time(FILETIME wt) { 817 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 818 return (a - offset()) / 10000; 819 } 820 821 FILETIME java_to_windows_time(jlong l) { 822 jlong a = (l * 10000) + offset(); 823 FILETIME result; 824 result.dwHighDateTime = high(a); 825 result.dwLowDateTime = low(a); 826 return result; 827 } 828 829 bool os::supports_vtime() { return true; } 830 bool os::enable_vtime() { return false; } 831 bool os::vtime_enabled() { return false; } 832 833 double os::elapsedVTime() { 834 FILETIME created; 835 FILETIME exited; 836 FILETIME kernel; 837 FILETIME user; 838 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 839 // the resolution of windows_to_java_time() should be sufficient (ms) 840 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 841 } else { 842 return elapsedTime(); 843 } 844 } 845 846 jlong os::javaTimeMillis() { 847 if (UseFakeTimers) { 848 return fake_time++; 849 } else { 850 FILETIME wt; 851 GetSystemTimeAsFileTime(&wt); 852 return windows_to_java_time(wt); 853 } 854 } 855 856 jlong os::javaTimeNanos() { 857 if (!win32::_has_performance_count) { 858 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 859 } else { 860 LARGE_INTEGER current_count; 861 QueryPerformanceCounter(¤t_count); 862 double current = as_long(current_count); 863 double freq = performance_frequency; 864 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 865 return time; 866 } 867 } 868 869 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 870 if (!win32::_has_performance_count) { 871 // javaTimeMillis() doesn't have much percision, 872 // but it is not going to wrap -- so all 64 bits 873 info_ptr->max_value = ALL_64_BITS; 874 875 // this is a wall clock timer, so may skip 876 info_ptr->may_skip_backward = true; 877 info_ptr->may_skip_forward = true; 878 } else { 879 jlong freq = performance_frequency; 880 if (freq < NANOSECS_PER_SEC) { 881 // the performance counter is 64 bits and we will 882 // be multiplying it -- so no wrap in 64 bits 883 info_ptr->max_value = ALL_64_BITS; 884 } else if (freq > NANOSECS_PER_SEC) { 885 // use the max value the counter can reach to 886 // determine the max value which could be returned 887 julong max_counter = (julong)ALL_64_BITS; 888 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 889 } else { 890 // the performance counter is 64 bits and we will 891 // be using it directly -- so no wrap in 64 bits 892 info_ptr->max_value = ALL_64_BITS; 893 } 894 895 // using a counter, so no skipping 896 info_ptr->may_skip_backward = false; 897 info_ptr->may_skip_forward = false; 898 } 899 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 900 } 901 902 char* os::local_time_string(char *buf, size_t buflen) { 903 SYSTEMTIME st; 904 GetLocalTime(&st); 905 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 906 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 907 return buf; 908 } 909 910 bool os::getTimesSecs(double* process_real_time, 911 double* process_user_time, 912 double* process_system_time) { 913 HANDLE h_process = GetCurrentProcess(); 914 FILETIME create_time, exit_time, kernel_time, user_time; 915 BOOL result = GetProcessTimes(h_process, 916 &create_time, 917 &exit_time, 918 &kernel_time, 919 &user_time); 920 if (result != 0) { 921 FILETIME wt; 922 GetSystemTimeAsFileTime(&wt); 923 jlong rtc_millis = windows_to_java_time(wt); 924 jlong user_millis = windows_to_java_time(user_time); 925 jlong system_millis = windows_to_java_time(kernel_time); 926 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 927 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 928 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 929 return true; 930 } else { 931 return false; 932 } 933 } 934 935 void os::shutdown() { 936 937 // allow PerfMemory to attempt cleanup of any persistent resources 938 perfMemory_exit(); 939 940 // flush buffered output, finish log files 941 ostream_abort(); 942 943 // Check for abort hook 944 abort_hook_t abort_hook = Arguments::abort_hook(); 945 if (abort_hook != NULL) { 946 abort_hook(); 947 } 948 } 949 950 951 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 952 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 953 954 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 955 HINSTANCE dbghelp; 956 EXCEPTION_POINTERS ep; 957 MINIDUMP_EXCEPTION_INFORMATION mei; 958 MINIDUMP_EXCEPTION_INFORMATION* pmei; 959 960 HANDLE hProcess = GetCurrentProcess(); 961 DWORD processId = GetCurrentProcessId(); 962 HANDLE dumpFile; 963 MINIDUMP_TYPE dumpType; 964 static const char* cwd; 965 966 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 967 #ifndef ASSERT 968 // If running on a client version of Windows and user has not explicitly enabled dumping 969 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 970 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 971 return; 972 // If running on a server version of Windows and user has explictly disabled dumping 973 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 974 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 975 return; 976 } 977 #else 978 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 979 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 980 return; 981 } 982 #endif 983 984 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 985 986 if (dbghelp == NULL) { 987 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 988 return; 989 } 990 991 _MiniDumpWriteDump = CAST_TO_FN_PTR( 992 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 993 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 994 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 995 996 if (_MiniDumpWriteDump == NULL) { 997 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 998 return; 999 } 1000 1001 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1002 1003 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1004 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1005 #if API_VERSION_NUMBER >= 11 1006 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1007 MiniDumpWithUnloadedModules); 1008 #endif 1009 1010 cwd = get_current_directory(NULL, 0); 1011 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id()); 1012 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1013 1014 if (dumpFile == INVALID_HANDLE_VALUE) { 1015 VMError::report_coredump_status("Failed to create file for dumping", false); 1016 return; 1017 } 1018 if (exceptionRecord != NULL && contextRecord != NULL) { 1019 ep.ContextRecord = (PCONTEXT) contextRecord; 1020 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1021 1022 mei.ThreadId = GetCurrentThreadId(); 1023 mei.ExceptionPointers = &ep; 1024 pmei = &mei; 1025 } else { 1026 pmei = NULL; 1027 } 1028 1029 1030 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1031 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1032 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1033 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1034 DWORD error = GetLastError(); 1035 LPTSTR msgbuf = NULL; 1036 1037 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1038 FORMAT_MESSAGE_FROM_SYSTEM | 1039 FORMAT_MESSAGE_IGNORE_INSERTS, 1040 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1041 1042 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1043 LocalFree(msgbuf); 1044 } else { 1045 // Call to FormatMessage failed, just include the result from GetLastError 1046 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1047 } 1048 VMError::report_coredump_status(buffer, false); 1049 } else { 1050 VMError::report_coredump_status(buffer, true); 1051 } 1052 1053 CloseHandle(dumpFile); 1054 } 1055 1056 1057 void os::abort(bool dump_core) { 1058 os::shutdown(); 1059 // no core dump on Windows 1060 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1061 } 1062 1063 // Die immediately, no exit hook, no abort hook, no cleanup. 1064 void os::die() { 1065 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1066 } 1067 1068 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1069 // * dirent_md.c 1.15 00/02/02 1070 // 1071 // The declarations for DIR and struct dirent are in jvm_win32.h. 1072 1073 /* Caller must have already run dirname through JVM_NativePath, which removes 1074 duplicate slashes and converts all instances of '/' into '\\'. */ 1075 1076 DIR * 1077 os::opendir(const char *dirname) 1078 { 1079 assert(dirname != NULL, "just checking"); // hotspot change 1080 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1081 DWORD fattr; // hotspot change 1082 char alt_dirname[4] = { 0, 0, 0, 0 }; 1083 1084 if (dirp == 0) { 1085 errno = ENOMEM; 1086 return 0; 1087 } 1088 1089 /* 1090 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1091 * as a directory in FindFirstFile(). We detect this case here and 1092 * prepend the current drive name. 1093 */ 1094 if (dirname[1] == '\0' && dirname[0] == '\\') { 1095 alt_dirname[0] = _getdrive() + 'A' - 1; 1096 alt_dirname[1] = ':'; 1097 alt_dirname[2] = '\\'; 1098 alt_dirname[3] = '\0'; 1099 dirname = alt_dirname; 1100 } 1101 1102 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1103 if (dirp->path == 0) { 1104 free(dirp, mtInternal); 1105 errno = ENOMEM; 1106 return 0; 1107 } 1108 strcpy(dirp->path, dirname); 1109 1110 fattr = GetFileAttributes(dirp->path); 1111 if (fattr == 0xffffffff) { 1112 free(dirp->path, mtInternal); 1113 free(dirp, mtInternal); 1114 errno = ENOENT; 1115 return 0; 1116 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1117 free(dirp->path, mtInternal); 1118 free(dirp, mtInternal); 1119 errno = ENOTDIR; 1120 return 0; 1121 } 1122 1123 /* Append "*.*", or possibly "\\*.*", to path */ 1124 if (dirp->path[1] == ':' 1125 && (dirp->path[2] == '\0' 1126 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1127 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1128 strcat(dirp->path, "*.*"); 1129 } else { 1130 strcat(dirp->path, "\\*.*"); 1131 } 1132 1133 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1134 if (dirp->handle == INVALID_HANDLE_VALUE) { 1135 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1136 free(dirp->path, mtInternal); 1137 free(dirp, mtInternal); 1138 errno = EACCES; 1139 return 0; 1140 } 1141 } 1142 return dirp; 1143 } 1144 1145 /* parameter dbuf unused on Windows */ 1146 1147 struct dirent * 1148 os::readdir(DIR *dirp, dirent *dbuf) 1149 { 1150 assert(dirp != NULL, "just checking"); // hotspot change 1151 if (dirp->handle == INVALID_HANDLE_VALUE) { 1152 return 0; 1153 } 1154 1155 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1156 1157 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1158 if (GetLastError() == ERROR_INVALID_HANDLE) { 1159 errno = EBADF; 1160 return 0; 1161 } 1162 FindClose(dirp->handle); 1163 dirp->handle = INVALID_HANDLE_VALUE; 1164 } 1165 1166 return &dirp->dirent; 1167 } 1168 1169 int 1170 os::closedir(DIR *dirp) 1171 { 1172 assert(dirp != NULL, "just checking"); // hotspot change 1173 if (dirp->handle != INVALID_HANDLE_VALUE) { 1174 if (!FindClose(dirp->handle)) { 1175 errno = EBADF; 1176 return -1; 1177 } 1178 dirp->handle = INVALID_HANDLE_VALUE; 1179 } 1180 free(dirp->path, mtInternal); 1181 free(dirp, mtInternal); 1182 return 0; 1183 } 1184 1185 // This must be hard coded because it's the system's temporary 1186 // directory not the java application's temp directory, ala java.io.tmpdir. 1187 const char* os::get_temp_directory() { 1188 static char path_buf[MAX_PATH]; 1189 if (GetTempPath(MAX_PATH, path_buf)>0) 1190 return path_buf; 1191 else{ 1192 path_buf[0]='\0'; 1193 return path_buf; 1194 } 1195 } 1196 1197 static bool file_exists(const char* filename) { 1198 if (filename == NULL || strlen(filename) == 0) { 1199 return false; 1200 } 1201 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1202 } 1203 1204 bool os::dll_build_name(char *buffer, size_t buflen, 1205 const char* pname, const char* fname) { 1206 bool retval = false; 1207 const size_t pnamelen = pname ? strlen(pname) : 0; 1208 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1209 1210 // Return error on buffer overflow. 1211 if (pnamelen + strlen(fname) + 10 > buflen) { 1212 return retval; 1213 } 1214 1215 if (pnamelen == 0) { 1216 jio_snprintf(buffer, buflen, "%s.dll", fname); 1217 retval = true; 1218 } else if (c == ':' || c == '\\') { 1219 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1220 retval = true; 1221 } else if (strchr(pname, *os::path_separator()) != NULL) { 1222 int n; 1223 char** pelements = split_path(pname, &n); 1224 if (pelements == NULL) { 1225 return false; 1226 } 1227 for (int i = 0; i < n; i++) { 1228 char* path = pelements[i]; 1229 // Really shouldn't be NULL, but check can't hurt 1230 size_t plen = (path == NULL) ? 0 : strlen(path); 1231 if (plen == 0) { 1232 continue; // skip the empty path values 1233 } 1234 const char lastchar = path[plen - 1]; 1235 if (lastchar == ':' || lastchar == '\\') { 1236 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1237 } else { 1238 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1239 } 1240 if (file_exists(buffer)) { 1241 retval = true; 1242 break; 1243 } 1244 } 1245 // release the storage 1246 for (int i = 0; i < n; i++) { 1247 if (pelements[i] != NULL) { 1248 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1249 } 1250 } 1251 if (pelements != NULL) { 1252 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1253 } 1254 } else { 1255 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1256 retval = true; 1257 } 1258 return retval; 1259 } 1260 1261 // Needs to be in os specific directory because windows requires another 1262 // header file <direct.h> 1263 const char* os::get_current_directory(char *buf, size_t buflen) { 1264 int n = static_cast<int>(buflen); 1265 if (buflen > INT_MAX) n = INT_MAX; 1266 return _getcwd(buf, n); 1267 } 1268 1269 //----------------------------------------------------------- 1270 // Helper functions for fatal error handler 1271 #ifdef _WIN64 1272 // Helper routine which returns true if address in 1273 // within the NTDLL address space. 1274 // 1275 static bool _addr_in_ntdll( address addr ) 1276 { 1277 HMODULE hmod; 1278 MODULEINFO minfo; 1279 1280 hmod = GetModuleHandle("NTDLL.DLL"); 1281 if (hmod == NULL) return false; 1282 if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1283 &minfo, sizeof(MODULEINFO)) ) 1284 return false; 1285 1286 if ((addr >= minfo.lpBaseOfDll) && 1287 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1288 return true; 1289 else 1290 return false; 1291 } 1292 #endif 1293 1294 struct _modinfo { 1295 address addr; 1296 char* full_path; // point to a char buffer 1297 int buflen; // size of the buffer 1298 address base_addr; 1299 }; 1300 1301 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1302 address top_address, void * param) { 1303 struct _modinfo *pmod = (struct _modinfo *)param; 1304 if (!pmod) return -1; 1305 1306 if (base_addr <= pmod->addr && 1307 top_address > pmod->addr) { 1308 // if a buffer is provided, copy path name to the buffer 1309 if (pmod->full_path) { 1310 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1311 } 1312 pmod->base_addr = base_addr; 1313 return 1; 1314 } 1315 return 0; 1316 } 1317 1318 bool os::dll_address_to_library_name(address addr, char* buf, 1319 int buflen, int* offset) { 1320 // buf is not optional, but offset is optional 1321 assert(buf != NULL, "sanity check"); 1322 1323 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1324 // return the full path to the DLL file, sometimes it returns path 1325 // to the corresponding PDB file (debug info); sometimes it only 1326 // returns partial path, which makes life painful. 1327 1328 struct _modinfo mi; 1329 mi.addr = addr; 1330 mi.full_path = buf; 1331 mi.buflen = buflen; 1332 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1333 // buf already contains path name 1334 if (offset) *offset = addr - mi.base_addr; 1335 return true; 1336 } 1337 1338 buf[0] = '\0'; 1339 if (offset) *offset = -1; 1340 return false; 1341 } 1342 1343 bool os::dll_address_to_function_name(address addr, char *buf, 1344 int buflen, int *offset) { 1345 // buf is not optional, but offset is optional 1346 assert(buf != NULL, "sanity check"); 1347 1348 if (Decoder::decode(addr, buf, buflen, offset)) { 1349 return true; 1350 } 1351 if (offset != NULL) *offset = -1; 1352 buf[0] = '\0'; 1353 return false; 1354 } 1355 1356 // save the start and end address of jvm.dll into param[0] and param[1] 1357 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1358 address top_address, void * param) { 1359 if (!param) return -1; 1360 1361 if (base_addr <= (address)_locate_jvm_dll && 1362 top_address > (address)_locate_jvm_dll) { 1363 ((address*)param)[0] = base_addr; 1364 ((address*)param)[1] = top_address; 1365 return 1; 1366 } 1367 return 0; 1368 } 1369 1370 address vm_lib_location[2]; // start and end address of jvm.dll 1371 1372 // check if addr is inside jvm.dll 1373 bool os::address_is_in_vm(address addr) { 1374 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1375 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1376 assert(false, "Can't find jvm module."); 1377 return false; 1378 } 1379 } 1380 1381 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1382 } 1383 1384 // print module info; param is outputStream* 1385 static int _print_module(const char* fname, address base_address, 1386 address top_address, void* param) { 1387 if (!param) return -1; 1388 1389 outputStream* st = (outputStream*)param; 1390 1391 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1392 return 0; 1393 } 1394 1395 // Loads .dll/.so and 1396 // in case of error it checks if .dll/.so was built for the 1397 // same architecture as Hotspot is running on 1398 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1399 { 1400 void * result = LoadLibrary(name); 1401 if (result != NULL) 1402 { 1403 return result; 1404 } 1405 1406 DWORD errcode = GetLastError(); 1407 if (errcode == ERROR_MOD_NOT_FOUND) { 1408 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1409 ebuf[ebuflen-1]='\0'; 1410 return NULL; 1411 } 1412 1413 // Parsing dll below 1414 // If we can read dll-info and find that dll was built 1415 // for an architecture other than Hotspot is running in 1416 // - then print to buffer "DLL was built for a different architecture" 1417 // else call os::lasterror to obtain system error message 1418 1419 // Read system error message into ebuf 1420 // It may or may not be overwritten below (in the for loop and just above) 1421 lasterror(ebuf, (size_t) ebuflen); 1422 ebuf[ebuflen-1]='\0'; 1423 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1424 if (file_descriptor<0) 1425 { 1426 return NULL; 1427 } 1428 1429 uint32_t signature_offset; 1430 uint16_t lib_arch=0; 1431 bool failed_to_get_lib_arch= 1432 ( 1433 //Go to position 3c in the dll 1434 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1435 || 1436 // Read loacation of signature 1437 (sizeof(signature_offset)!= 1438 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1439 || 1440 //Go to COFF File Header in dll 1441 //that is located after"signature" (4 bytes long) 1442 (os::seek_to_file_offset(file_descriptor, 1443 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1444 || 1445 //Read field that contains code of architecture 1446 // that dll was build for 1447 (sizeof(lib_arch)!= 1448 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1449 ); 1450 1451 ::close(file_descriptor); 1452 if (failed_to_get_lib_arch) 1453 { 1454 // file i/o error - report os::lasterror(...) msg 1455 return NULL; 1456 } 1457 1458 typedef struct 1459 { 1460 uint16_t arch_code; 1461 char* arch_name; 1462 } arch_t; 1463 1464 static const arch_t arch_array[]={ 1465 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1466 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1467 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1468 }; 1469 #if (defined _M_IA64) 1470 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1471 #elif (defined _M_AMD64) 1472 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1473 #elif (defined _M_IX86) 1474 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1475 #else 1476 #error Method os::dll_load requires that one of following \ 1477 is defined :_M_IA64,_M_AMD64 or _M_IX86 1478 #endif 1479 1480 1481 // Obtain a string for printf operation 1482 // lib_arch_str shall contain string what platform this .dll was built for 1483 // running_arch_str shall string contain what platform Hotspot was built for 1484 char *running_arch_str=NULL,*lib_arch_str=NULL; 1485 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1486 { 1487 if (lib_arch==arch_array[i].arch_code) 1488 lib_arch_str=arch_array[i].arch_name; 1489 if (running_arch==arch_array[i].arch_code) 1490 running_arch_str=arch_array[i].arch_name; 1491 } 1492 1493 assert(running_arch_str, 1494 "Didn't find runing architecture code in arch_array"); 1495 1496 // If the architure is right 1497 // but some other error took place - report os::lasterror(...) msg 1498 if (lib_arch == running_arch) 1499 { 1500 return NULL; 1501 } 1502 1503 if (lib_arch_str!=NULL) 1504 { 1505 ::_snprintf(ebuf, ebuflen-1, 1506 "Can't load %s-bit .dll on a %s-bit platform", 1507 lib_arch_str,running_arch_str); 1508 } 1509 else 1510 { 1511 // don't know what architecture this dll was build for 1512 ::_snprintf(ebuf, ebuflen-1, 1513 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1514 lib_arch,running_arch_str); 1515 } 1516 1517 return NULL; 1518 } 1519 1520 void os::print_dll_info(outputStream *st) { 1521 st->print_cr("Dynamic libraries:"); 1522 get_loaded_modules_info(_print_module, (void *)st); 1523 } 1524 1525 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1526 HANDLE hProcess; 1527 1528 # define MAX_NUM_MODULES 128 1529 HMODULE modules[MAX_NUM_MODULES]; 1530 static char filename[MAX_PATH]; 1531 int result = 0; 1532 1533 if (!os::PSApiDll::PSApiAvailable()) { 1534 return 0; 1535 } 1536 1537 int pid = os::current_process_id(); 1538 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1539 FALSE, pid); 1540 if (hProcess == NULL) return 0; 1541 1542 DWORD size_needed; 1543 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1544 sizeof(modules), &size_needed)) { 1545 CloseHandle(hProcess); 1546 return 0; 1547 } 1548 1549 // number of modules that are currently loaded 1550 int num_modules = size_needed / sizeof(HMODULE); 1551 1552 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1553 // Get Full pathname: 1554 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1555 filename, sizeof(filename))) { 1556 filename[0] = '\0'; 1557 } 1558 1559 MODULEINFO modinfo; 1560 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1561 &modinfo, sizeof(modinfo))) { 1562 modinfo.lpBaseOfDll = NULL; 1563 modinfo.SizeOfImage = 0; 1564 } 1565 1566 // Invoke callback function 1567 result = callback(filename, (address)modinfo.lpBaseOfDll, 1568 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1569 if (result) break; 1570 } 1571 1572 CloseHandle(hProcess); 1573 return result; 1574 } 1575 1576 void os::print_os_info_brief(outputStream* st) { 1577 os::print_os_info(st); 1578 } 1579 1580 void os::print_os_info(outputStream* st) { 1581 st->print("OS:"); 1582 1583 os::win32::print_windows_version(st); 1584 } 1585 1586 void os::win32::print_windows_version(outputStream* st) { 1587 OSVERSIONINFOEX osvi; 1588 SYSTEM_INFO si; 1589 1590 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1591 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1592 1593 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1594 st->print_cr("N/A"); 1595 return; 1596 } 1597 1598 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; 1599 1600 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1601 if (os_vers >= 5002) { 1602 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1603 // find out whether we are running on 64 bit processor or not. 1604 if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) { 1605 os::Kernel32Dll::GetNativeSystemInfo(&si); 1606 } else { 1607 GetSystemInfo(&si); 1608 } 1609 } 1610 1611 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { 1612 switch (os_vers) { 1613 case 3051: st->print(" Windows NT 3.51"); break; 1614 case 4000: st->print(" Windows NT 4.0"); break; 1615 case 5000: st->print(" Windows 2000"); break; 1616 case 5001: st->print(" Windows XP"); break; 1617 case 5002: 1618 if (osvi.wProductType == VER_NT_WORKSTATION && 1619 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1620 st->print(" Windows XP x64 Edition"); 1621 } else { 1622 st->print(" Windows Server 2003 family"); 1623 } 1624 break; 1625 1626 case 6000: 1627 if (osvi.wProductType == VER_NT_WORKSTATION) { 1628 st->print(" Windows Vista"); 1629 } else { 1630 st->print(" Windows Server 2008"); 1631 } 1632 break; 1633 1634 case 6001: 1635 if (osvi.wProductType == VER_NT_WORKSTATION) { 1636 st->print(" Windows 7"); 1637 } else { 1638 st->print(" Windows Server 2008 R2"); 1639 } 1640 break; 1641 1642 case 6002: 1643 if (osvi.wProductType == VER_NT_WORKSTATION) { 1644 st->print(" Windows 8"); 1645 } else { 1646 st->print(" Windows Server 2012"); 1647 } 1648 break; 1649 1650 case 6003: 1651 if (osvi.wProductType == VER_NT_WORKSTATION) { 1652 st->print(" Windows 8.1"); 1653 } else { 1654 st->print(" Windows Server 2012 R2"); 1655 } 1656 break; 1657 1658 default: // future os 1659 // Unrecognized windows, print out its major and minor versions 1660 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1661 } 1662 } else { 1663 switch (os_vers) { 1664 case 4000: st->print(" Windows 95"); break; 1665 case 4010: st->print(" Windows 98"); break; 1666 case 4090: st->print(" Windows Me"); break; 1667 default: // future windows, print out its major and minor versions 1668 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1669 } 1670 } 1671 1672 if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1673 st->print(" , 64 bit"); 1674 } 1675 1676 st->print(" Build %d", osvi.dwBuildNumber); 1677 st->print(" %s", osvi.szCSDVersion); // service pack 1678 st->cr(); 1679 } 1680 1681 void os::pd_print_cpu_info(outputStream* st) { 1682 // Nothing to do for now. 1683 } 1684 1685 void os::print_memory_info(outputStream* st) { 1686 st->print("Memory:"); 1687 st->print(" %dk page", os::vm_page_size()>>10); 1688 1689 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1690 // value if total memory is larger than 4GB 1691 MEMORYSTATUSEX ms; 1692 ms.dwLength = sizeof(ms); 1693 GlobalMemoryStatusEx(&ms); 1694 1695 st->print(", physical %uk", os::physical_memory() >> 10); 1696 st->print("(%uk free)", os::available_memory() >> 10); 1697 1698 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1699 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1700 st->cr(); 1701 } 1702 1703 void os::print_siginfo(outputStream *st, void *siginfo) { 1704 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1705 st->print("siginfo:"); 1706 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1707 1708 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1709 er->NumberParameters >= 2) { 1710 switch (er->ExceptionInformation[0]) { 1711 case 0: st->print(", reading address"); break; 1712 case 1: st->print(", writing address"); break; 1713 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1714 er->ExceptionInformation[0]); 1715 } 1716 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1717 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1718 er->NumberParameters >= 2 && UseSharedSpaces) { 1719 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1720 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1721 st->print("\n\nError accessing class data sharing archive." \ 1722 " Mapped file inaccessible during execution, " \ 1723 " possible disk/network problem."); 1724 } 1725 } else { 1726 int num = er->NumberParameters; 1727 if (num > 0) { 1728 st->print(", ExceptionInformation="); 1729 for (int i = 0; i < num; i++) { 1730 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1731 } 1732 } 1733 } 1734 st->cr(); 1735 } 1736 1737 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1738 // do nothing 1739 } 1740 1741 static char saved_jvm_path[MAX_PATH] = {0}; 1742 1743 // Find the full path to the current module, jvm.dll 1744 void os::jvm_path(char *buf, jint buflen) { 1745 // Error checking. 1746 if (buflen < MAX_PATH) { 1747 assert(false, "must use a large-enough buffer"); 1748 buf[0] = '\0'; 1749 return; 1750 } 1751 // Lazy resolve the path to current module. 1752 if (saved_jvm_path[0] != 0) { 1753 strcpy(buf, saved_jvm_path); 1754 return; 1755 } 1756 1757 buf[0] = '\0'; 1758 if (Arguments::sun_java_launcher_is_altjvm()) { 1759 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1760 // for a JAVA_HOME environment variable and fix up the path so it 1761 // looks like jvm.dll is installed there (append a fake suffix 1762 // hotspot/jvm.dll). 1763 char* java_home_var = ::getenv("JAVA_HOME"); 1764 if (java_home_var != NULL && java_home_var[0] != 0 && 1765 strlen(java_home_var) < (size_t)buflen) { 1766 1767 strncpy(buf, java_home_var, buflen); 1768 1769 // determine if this is a legacy image or modules image 1770 // modules image doesn't have "jre" subdirectory 1771 size_t len = strlen(buf); 1772 char* jrebin_p = buf + len; 1773 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1774 if (0 != _access(buf, 0)) { 1775 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1776 } 1777 len = strlen(buf); 1778 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1779 } 1780 } 1781 1782 if (buf[0] == '\0') { 1783 GetModuleFileName(vm_lib_handle, buf, buflen); 1784 } 1785 strncpy(saved_jvm_path, buf, MAX_PATH); 1786 } 1787 1788 1789 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1790 #ifndef _WIN64 1791 st->print("_"); 1792 #endif 1793 } 1794 1795 1796 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1797 #ifndef _WIN64 1798 st->print("@%d", args_size * sizeof(int)); 1799 #endif 1800 } 1801 1802 // This method is a copy of JDK's sysGetLastErrorString 1803 // from src/windows/hpi/src/system_md.c 1804 1805 size_t os::lasterror(char* buf, size_t len) { 1806 DWORD errval; 1807 1808 if ((errval = GetLastError()) != 0) { 1809 // DOS error 1810 size_t n = (size_t)FormatMessage( 1811 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1812 NULL, 1813 errval, 1814 0, 1815 buf, 1816 (DWORD)len, 1817 NULL); 1818 if (n > 3) { 1819 // Drop final '.', CR, LF 1820 if (buf[n - 1] == '\n') n--; 1821 if (buf[n - 1] == '\r') n--; 1822 if (buf[n - 1] == '.') n--; 1823 buf[n] = '\0'; 1824 } 1825 return n; 1826 } 1827 1828 if (errno != 0) { 1829 // C runtime error that has no corresponding DOS error code 1830 const char* s = strerror(errno); 1831 size_t n = strlen(s); 1832 if (n >= len) n = len - 1; 1833 strncpy(buf, s, n); 1834 buf[n] = '\0'; 1835 return n; 1836 } 1837 1838 return 0; 1839 } 1840 1841 int os::get_last_error() { 1842 DWORD error = GetLastError(); 1843 if (error == 0) 1844 error = errno; 1845 return (int)error; 1846 } 1847 1848 // sun.misc.Signal 1849 // NOTE that this is a workaround for an apparent kernel bug where if 1850 // a signal handler for SIGBREAK is installed then that signal handler 1851 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1852 // See bug 4416763. 1853 static void (*sigbreakHandler)(int) = NULL; 1854 1855 static void UserHandler(int sig, void *siginfo, void *context) { 1856 os::signal_notify(sig); 1857 // We need to reinstate the signal handler each time... 1858 os::signal(sig, (void*)UserHandler); 1859 } 1860 1861 void* os::user_handler() { 1862 return (void*) UserHandler; 1863 } 1864 1865 void* os::signal(int signal_number, void* handler) { 1866 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1867 void (*oldHandler)(int) = sigbreakHandler; 1868 sigbreakHandler = (void (*)(int)) handler; 1869 return (void*) oldHandler; 1870 } else { 1871 return (void*)::signal(signal_number, (void (*)(int))handler); 1872 } 1873 } 1874 1875 void os::signal_raise(int signal_number) { 1876 raise(signal_number); 1877 } 1878 1879 // The Win32 C runtime library maps all console control events other than ^C 1880 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1881 // logoff, and shutdown events. We therefore install our own console handler 1882 // that raises SIGTERM for the latter cases. 1883 // 1884 static BOOL WINAPI consoleHandler(DWORD event) { 1885 switch (event) { 1886 case CTRL_C_EVENT: 1887 if (is_error_reported()) { 1888 // Ctrl-C is pressed during error reporting, likely because the error 1889 // handler fails to abort. Let VM die immediately. 1890 os::die(); 1891 } 1892 1893 os::signal_raise(SIGINT); 1894 return TRUE; 1895 break; 1896 case CTRL_BREAK_EVENT: 1897 if (sigbreakHandler != NULL) { 1898 (*sigbreakHandler)(SIGBREAK); 1899 } 1900 return TRUE; 1901 break; 1902 case CTRL_LOGOFF_EVENT: { 1903 // Don't terminate JVM if it is running in a non-interactive session, 1904 // such as a service process. 1905 USEROBJECTFLAGS flags; 1906 HANDLE handle = GetProcessWindowStation(); 1907 if (handle != NULL && 1908 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1909 sizeof(USEROBJECTFLAGS), NULL)) { 1910 // If it is a non-interactive session, let next handler to deal 1911 // with it. 1912 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1913 return FALSE; 1914 } 1915 } 1916 } 1917 case CTRL_CLOSE_EVENT: 1918 case CTRL_SHUTDOWN_EVENT: 1919 os::signal_raise(SIGTERM); 1920 return TRUE; 1921 break; 1922 default: 1923 break; 1924 } 1925 return FALSE; 1926 } 1927 1928 /* 1929 * The following code is moved from os.cpp for making this 1930 * code platform specific, which it is by its very nature. 1931 */ 1932 1933 // Return maximum OS signal used + 1 for internal use only 1934 // Used as exit signal for signal_thread 1935 int os::sigexitnum_pd() { 1936 return NSIG; 1937 } 1938 1939 // a counter for each possible signal value, including signal_thread exit signal 1940 static volatile jint pending_signals[NSIG+1] = { 0 }; 1941 static HANDLE sig_sem = NULL; 1942 1943 void os::signal_init_pd() { 1944 // Initialize signal structures 1945 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1946 1947 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1948 1949 // Programs embedding the VM do not want it to attempt to receive 1950 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1951 // shutdown hooks mechanism introduced in 1.3. For example, when 1952 // the VM is run as part of a Windows NT service (i.e., a servlet 1953 // engine in a web server), the correct behavior is for any console 1954 // control handler to return FALSE, not TRUE, because the OS's 1955 // "final" handler for such events allows the process to continue if 1956 // it is a service (while terminating it if it is not a service). 1957 // To make this behavior uniform and the mechanism simpler, we 1958 // completely disable the VM's usage of these console events if -Xrs 1959 // (=ReduceSignalUsage) is specified. This means, for example, that 1960 // the CTRL-BREAK thread dump mechanism is also disabled in this 1961 // case. See bugs 4323062, 4345157, and related bugs. 1962 1963 if (!ReduceSignalUsage) { 1964 // Add a CTRL-C handler 1965 SetConsoleCtrlHandler(consoleHandler, TRUE); 1966 } 1967 } 1968 1969 void os::signal_notify(int signal_number) { 1970 BOOL ret; 1971 if (sig_sem != NULL) { 1972 Atomic::inc(&pending_signals[signal_number]); 1973 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1974 assert(ret != 0, "ReleaseSemaphore() failed"); 1975 } 1976 } 1977 1978 static int check_pending_signals(bool wait_for_signal) { 1979 DWORD ret; 1980 while (true) { 1981 for (int i = 0; i < NSIG + 1; i++) { 1982 jint n = pending_signals[i]; 1983 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1984 return i; 1985 } 1986 } 1987 if (!wait_for_signal) { 1988 return -1; 1989 } 1990 1991 JavaThread *thread = JavaThread::current(); 1992 1993 ThreadBlockInVM tbivm(thread); 1994 1995 bool threadIsSuspended; 1996 do { 1997 thread->set_suspend_equivalent(); 1998 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 1999 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2000 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2001 2002 // were we externally suspended while we were waiting? 2003 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2004 if (threadIsSuspended) { 2005 // 2006 // The semaphore has been incremented, but while we were waiting 2007 // another thread suspended us. We don't want to continue running 2008 // while suspended because that would surprise the thread that 2009 // suspended us. 2010 // 2011 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2012 assert(ret != 0, "ReleaseSemaphore() failed"); 2013 2014 thread->java_suspend_self(); 2015 } 2016 } while (threadIsSuspended); 2017 } 2018 } 2019 2020 int os::signal_lookup() { 2021 return check_pending_signals(false); 2022 } 2023 2024 int os::signal_wait() { 2025 return check_pending_signals(true); 2026 } 2027 2028 // Implicit OS exception handling 2029 2030 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2031 JavaThread* thread = JavaThread::current(); 2032 // Save pc in thread 2033 #ifdef _M_IA64 2034 // Do not blow up if no thread info available. 2035 if (thread) { 2036 // Saving PRECISE pc (with slot information) in thread. 2037 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2038 // Convert precise PC into "Unix" format 2039 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2040 thread->set_saved_exception_pc((address)precise_pc); 2041 } 2042 // Set pc to handler 2043 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2044 // Clear out psr.ri (= Restart Instruction) in order to continue 2045 // at the beginning of the target bundle. 2046 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2047 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2048 #elif _M_AMD64 2049 // Do not blow up if no thread info available. 2050 if (thread) { 2051 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2052 } 2053 // Set pc to handler 2054 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2055 #else 2056 // Do not blow up if no thread info available. 2057 if (thread) { 2058 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2059 } 2060 // Set pc to handler 2061 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2062 #endif 2063 2064 // Continue the execution 2065 return EXCEPTION_CONTINUE_EXECUTION; 2066 } 2067 2068 2069 // Used for PostMortemDump 2070 extern "C" void safepoints(); 2071 extern "C" void find(int x); 2072 extern "C" void events(); 2073 2074 // According to Windows API documentation, an illegal instruction sequence should generate 2075 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2076 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2077 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2078 2079 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2080 2081 // From "Execution Protection in the Windows Operating System" draft 0.35 2082 // Once a system header becomes available, the "real" define should be 2083 // included or copied here. 2084 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2085 2086 // Handle NAT Bit consumption on IA64. 2087 #ifdef _M_IA64 2088 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2089 #endif 2090 2091 // Windows Vista/2008 heap corruption check 2092 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2093 2094 #define def_excpt(val) #val, val 2095 2096 struct siglabel { 2097 char *name; 2098 int number; 2099 }; 2100 2101 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2102 // C++ compiler contain this error code. Because this is a compiler-generated 2103 // error, the code is not listed in the Win32 API header files. 2104 // The code is actually a cryptic mnemonic device, with the initial "E" 2105 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2106 // ASCII values of "msc". 2107 2108 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2109 2110 2111 struct siglabel exceptlabels[] = { 2112 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2113 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2114 def_excpt(EXCEPTION_BREAKPOINT), 2115 def_excpt(EXCEPTION_SINGLE_STEP), 2116 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2117 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2118 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2119 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2120 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2121 def_excpt(EXCEPTION_FLT_OVERFLOW), 2122 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2123 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2124 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2125 def_excpt(EXCEPTION_INT_OVERFLOW), 2126 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2127 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2128 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2129 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2130 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2131 def_excpt(EXCEPTION_STACK_OVERFLOW), 2132 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2133 def_excpt(EXCEPTION_GUARD_PAGE), 2134 def_excpt(EXCEPTION_INVALID_HANDLE), 2135 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2136 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2137 #ifdef _M_IA64 2138 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2139 #endif 2140 NULL, 0 2141 }; 2142 2143 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2144 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2145 if (exceptlabels[i].number == exception_code) { 2146 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2147 return buf; 2148 } 2149 } 2150 2151 return NULL; 2152 } 2153 2154 //----------------------------------------------------------------------------- 2155 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2156 // handle exception caused by idiv; should only happen for -MinInt/-1 2157 // (division by zero is handled explicitly) 2158 #ifdef _M_IA64 2159 assert(0, "Fix Handle_IDiv_Exception"); 2160 #elif _M_AMD64 2161 PCONTEXT ctx = exceptionInfo->ContextRecord; 2162 address pc = (address)ctx->Rip; 2163 assert(pc[0] == 0xF7, "not an idiv opcode"); 2164 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2165 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2166 // set correct result values and continue after idiv instruction 2167 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2168 ctx->Rax = (DWORD)min_jint; // result 2169 ctx->Rdx = (DWORD)0; // remainder 2170 // Continue the execution 2171 #else 2172 PCONTEXT ctx = exceptionInfo->ContextRecord; 2173 address pc = (address)ctx->Eip; 2174 assert(pc[0] == 0xF7, "not an idiv opcode"); 2175 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2176 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2177 // set correct result values and continue after idiv instruction 2178 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2179 ctx->Eax = (DWORD)min_jint; // result 2180 ctx->Edx = (DWORD)0; // remainder 2181 // Continue the execution 2182 #endif 2183 return EXCEPTION_CONTINUE_EXECUTION; 2184 } 2185 2186 //----------------------------------------------------------------------------- 2187 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2188 PCONTEXT ctx = exceptionInfo->ContextRecord; 2189 #ifndef _WIN64 2190 // handle exception caused by native method modifying control word 2191 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2192 2193 switch (exception_code) { 2194 case EXCEPTION_FLT_DENORMAL_OPERAND: 2195 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2196 case EXCEPTION_FLT_INEXACT_RESULT: 2197 case EXCEPTION_FLT_INVALID_OPERATION: 2198 case EXCEPTION_FLT_OVERFLOW: 2199 case EXCEPTION_FLT_STACK_CHECK: 2200 case EXCEPTION_FLT_UNDERFLOW: 2201 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2202 if (fp_control_word != ctx->FloatSave.ControlWord) { 2203 // Restore FPCW and mask out FLT exceptions 2204 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2205 // Mask out pending FLT exceptions 2206 ctx->FloatSave.StatusWord &= 0xffffff00; 2207 return EXCEPTION_CONTINUE_EXECUTION; 2208 } 2209 } 2210 2211 if (prev_uef_handler != NULL) { 2212 // We didn't handle this exception so pass it to the previous 2213 // UnhandledExceptionFilter. 2214 return (prev_uef_handler)(exceptionInfo); 2215 } 2216 #else // !_WIN64 2217 /* 2218 On Windows, the mxcsr control bits are non-volatile across calls 2219 See also CR 6192333 2220 */ 2221 jint MxCsr = INITIAL_MXCSR; 2222 // we can't use StubRoutines::addr_mxcsr_std() 2223 // because in Win64 mxcsr is not saved there 2224 if (MxCsr != ctx->MxCsr) { 2225 ctx->MxCsr = MxCsr; 2226 return EXCEPTION_CONTINUE_EXECUTION; 2227 } 2228 #endif // !_WIN64 2229 2230 return EXCEPTION_CONTINUE_SEARCH; 2231 } 2232 2233 static inline void report_error(Thread* t, DWORD exception_code, 2234 address addr, void* siginfo, void* context) { 2235 VMError err(t, exception_code, addr, siginfo, context); 2236 err.report_and_die(); 2237 2238 // If UseOsErrorReporting, this will return here and save the error file 2239 // somewhere where we can find it in the minidump. 2240 } 2241 2242 //----------------------------------------------------------------------------- 2243 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2244 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2245 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2246 #ifdef _M_IA64 2247 // On Itanium, we need the "precise pc", which has the slot number coded 2248 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2249 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2250 // Convert the pc to "Unix format", which has the slot number coded 2251 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2252 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2253 // information is saved in the Unix format. 2254 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2255 #elif _M_AMD64 2256 address pc = (address) exceptionInfo->ContextRecord->Rip; 2257 #else 2258 address pc = (address) exceptionInfo->ContextRecord->Eip; 2259 #endif 2260 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2261 2262 // Handle SafeFetch32 and SafeFetchN exceptions. 2263 if (StubRoutines::is_safefetch_fault(pc)) { 2264 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2265 } 2266 2267 #ifndef _WIN64 2268 // Execution protection violation - win32 running on AMD64 only 2269 // Handled first to avoid misdiagnosis as a "normal" access violation; 2270 // This is safe to do because we have a new/unique ExceptionInformation 2271 // code for this condition. 2272 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2273 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2274 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2275 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2276 2277 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2278 int page_size = os::vm_page_size(); 2279 2280 // Make sure the pc and the faulting address are sane. 2281 // 2282 // If an instruction spans a page boundary, and the page containing 2283 // the beginning of the instruction is executable but the following 2284 // page is not, the pc and the faulting address might be slightly 2285 // different - we still want to unguard the 2nd page in this case. 2286 // 2287 // 15 bytes seems to be a (very) safe value for max instruction size. 2288 bool pc_is_near_addr = 2289 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2290 bool instr_spans_page_boundary = 2291 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2292 (intptr_t) page_size) > 0); 2293 2294 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2295 static volatile address last_addr = 2296 (address) os::non_memory_address_word(); 2297 2298 // In conservative mode, don't unguard unless the address is in the VM 2299 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2300 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2301 2302 // Set memory to RWX and retry 2303 address page_start = 2304 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2305 bool res = os::protect_memory((char*) page_start, page_size, 2306 os::MEM_PROT_RWX); 2307 2308 if (PrintMiscellaneous && Verbose) { 2309 char buf[256]; 2310 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2311 "at " INTPTR_FORMAT 2312 ", unguarding " INTPTR_FORMAT ": %s", addr, 2313 page_start, (res ? "success" : strerror(errno))); 2314 tty->print_raw_cr(buf); 2315 } 2316 2317 // Set last_addr so if we fault again at the same address, we don't 2318 // end up in an endless loop. 2319 // 2320 // There are two potential complications here. Two threads trapping 2321 // at the same address at the same time could cause one of the 2322 // threads to think it already unguarded, and abort the VM. Likely 2323 // very rare. 2324 // 2325 // The other race involves two threads alternately trapping at 2326 // different addresses and failing to unguard the page, resulting in 2327 // an endless loop. This condition is probably even more unlikely 2328 // than the first. 2329 // 2330 // Although both cases could be avoided by using locks or thread 2331 // local last_addr, these solutions are unnecessary complication: 2332 // this handler is a best-effort safety net, not a complete solution. 2333 // It is disabled by default and should only be used as a workaround 2334 // in case we missed any no-execute-unsafe VM code. 2335 2336 last_addr = addr; 2337 2338 return EXCEPTION_CONTINUE_EXECUTION; 2339 } 2340 } 2341 2342 // Last unguard failed or not unguarding 2343 tty->print_raw_cr("Execution protection violation"); 2344 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2345 exceptionInfo->ContextRecord); 2346 return EXCEPTION_CONTINUE_SEARCH; 2347 } 2348 } 2349 #endif // _WIN64 2350 2351 // Check to see if we caught the safepoint code in the 2352 // process of write protecting the memory serialization page. 2353 // It write enables the page immediately after protecting it 2354 // so just return. 2355 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2356 JavaThread* thread = (JavaThread*) t; 2357 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2358 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2359 if (os::is_memory_serialize_page(thread, addr)) { 2360 // Block current thread until the memory serialize page permission restored. 2361 os::block_on_serialize_page_trap(); 2362 return EXCEPTION_CONTINUE_EXECUTION; 2363 } 2364 } 2365 2366 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2367 VM_Version::is_cpuinfo_segv_addr(pc)) { 2368 // Verify that OS save/restore AVX registers. 2369 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2370 } 2371 2372 if (t != NULL && t->is_Java_thread()) { 2373 JavaThread* thread = (JavaThread*) t; 2374 bool in_java = thread->thread_state() == _thread_in_Java; 2375 2376 // Handle potential stack overflows up front. 2377 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2378 if (os::uses_stack_guard_pages()) { 2379 #ifdef _M_IA64 2380 // Use guard page for register stack. 2381 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2382 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2383 // Check for a register stack overflow on Itanium 2384 if (thread->addr_inside_register_stack_red_zone(addr)) { 2385 // Fatal red zone violation happens if the Java program 2386 // catches a StackOverflow error and does so much processing 2387 // that it runs beyond the unprotected yellow guard zone. As 2388 // a result, we are out of here. 2389 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2390 } else if(thread->addr_inside_register_stack(addr)) { 2391 // Disable the yellow zone which sets the state that 2392 // we've got a stack overflow problem. 2393 if (thread->stack_yellow_zone_enabled()) { 2394 thread->disable_stack_yellow_zone(); 2395 } 2396 // Give us some room to process the exception. 2397 thread->disable_register_stack_guard(); 2398 // Tracing with +Verbose. 2399 if (Verbose) { 2400 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2401 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2402 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2403 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2404 thread->register_stack_base(), 2405 thread->register_stack_base() + thread->stack_size()); 2406 } 2407 2408 // Reguard the permanent register stack red zone just to be sure. 2409 // We saw Windows silently disabling this without telling us. 2410 thread->enable_register_stack_red_zone(); 2411 2412 return Handle_Exception(exceptionInfo, 2413 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2414 } 2415 #endif 2416 if (thread->stack_yellow_zone_enabled()) { 2417 // Yellow zone violation. The o/s has unprotected the first yellow 2418 // zone page for us. Note: must call disable_stack_yellow_zone to 2419 // update the enabled status, even if the zone contains only one page. 2420 thread->disable_stack_yellow_zone(); 2421 // If not in java code, return and hope for the best. 2422 return in_java ? Handle_Exception(exceptionInfo, 2423 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2424 : EXCEPTION_CONTINUE_EXECUTION; 2425 } else { 2426 // Fatal red zone violation. 2427 thread->disable_stack_red_zone(); 2428 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2429 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2430 exceptionInfo->ContextRecord); 2431 return EXCEPTION_CONTINUE_SEARCH; 2432 } 2433 } else if (in_java) { 2434 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2435 // a one-time-only guard page, which it has released to us. The next 2436 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2437 return Handle_Exception(exceptionInfo, 2438 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2439 } else { 2440 // Can only return and hope for the best. Further stack growth will 2441 // result in an ACCESS_VIOLATION. 2442 return EXCEPTION_CONTINUE_EXECUTION; 2443 } 2444 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2445 // Either stack overflow or null pointer exception. 2446 if (in_java) { 2447 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2448 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2449 address stack_end = thread->stack_base() - thread->stack_size(); 2450 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2451 // Stack overflow. 2452 assert(!os::uses_stack_guard_pages(), 2453 "should be caught by red zone code above."); 2454 return Handle_Exception(exceptionInfo, 2455 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2456 } 2457 // 2458 // Check for safepoint polling and implicit null 2459 // We only expect null pointers in the stubs (vtable) 2460 // the rest are checked explicitly now. 2461 // 2462 CodeBlob* cb = CodeCache::find_blob(pc); 2463 if (cb != NULL) { 2464 if (os::is_poll_address(addr)) { 2465 address stub = SharedRuntime::get_poll_stub(pc); 2466 return Handle_Exception(exceptionInfo, stub); 2467 } 2468 } 2469 { 2470 #ifdef _WIN64 2471 // 2472 // If it's a legal stack address map the entire region in 2473 // 2474 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2475 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2476 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2477 addr = (address)((uintptr_t)addr & 2478 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2479 os::commit_memory((char *)addr, thread->stack_base() - addr, 2480 !ExecMem); 2481 return EXCEPTION_CONTINUE_EXECUTION; 2482 } 2483 else 2484 #endif 2485 { 2486 // Null pointer exception. 2487 #ifdef _M_IA64 2488 // Process implicit null checks in compiled code. Note: Implicit null checks 2489 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2490 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2491 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2492 // Handle implicit null check in UEP method entry 2493 if (cb && (cb->is_frame_complete_at(pc) || 2494 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2495 if (Verbose) { 2496 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2497 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2498 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2499 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2500 *(bundle_start + 1), *bundle_start); 2501 } 2502 return Handle_Exception(exceptionInfo, 2503 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2504 } 2505 } 2506 2507 // Implicit null checks were processed above. Hence, we should not reach 2508 // here in the usual case => die! 2509 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2510 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2511 exceptionInfo->ContextRecord); 2512 return EXCEPTION_CONTINUE_SEARCH; 2513 2514 #else // !IA64 2515 2516 // Windows 98 reports faulting addresses incorrectly 2517 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2518 !os::win32::is_nt()) { 2519 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2520 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2521 } 2522 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2523 exceptionInfo->ContextRecord); 2524 return EXCEPTION_CONTINUE_SEARCH; 2525 #endif 2526 } 2527 } 2528 } 2529 2530 #ifdef _WIN64 2531 // Special care for fast JNI field accessors. 2532 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2533 // in and the heap gets shrunk before the field access. 2534 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2535 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2536 if (addr != (address)-1) { 2537 return Handle_Exception(exceptionInfo, addr); 2538 } 2539 } 2540 #endif 2541 2542 // Stack overflow or null pointer exception in native code. 2543 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2544 exceptionInfo->ContextRecord); 2545 return EXCEPTION_CONTINUE_SEARCH; 2546 } // /EXCEPTION_ACCESS_VIOLATION 2547 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2548 #if defined _M_IA64 2549 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2550 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2551 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2552 2553 // Compiled method patched to be non entrant? Following conditions must apply: 2554 // 1. must be first instruction in bundle 2555 // 2. must be a break instruction with appropriate code 2556 if ((((uint64_t) pc & 0x0F) == 0) && 2557 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2558 return Handle_Exception(exceptionInfo, 2559 (address)SharedRuntime::get_handle_wrong_method_stub()); 2560 } 2561 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2562 #endif 2563 2564 2565 if (in_java) { 2566 switch (exception_code) { 2567 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2568 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2569 2570 case EXCEPTION_INT_OVERFLOW: 2571 return Handle_IDiv_Exception(exceptionInfo); 2572 2573 } // switch 2574 } 2575 if (((thread->thread_state() == _thread_in_Java) || 2576 (thread->thread_state() == _thread_in_native)) && 2577 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2578 { 2579 LONG result=Handle_FLT_Exception(exceptionInfo); 2580 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2581 } 2582 } 2583 2584 if (exception_code != EXCEPTION_BREAKPOINT) { 2585 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2586 exceptionInfo->ContextRecord); 2587 } 2588 return EXCEPTION_CONTINUE_SEARCH; 2589 } 2590 2591 #ifndef _WIN64 2592 // Special care for fast JNI accessors. 2593 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2594 // the heap gets shrunk before the field access. 2595 // Need to install our own structured exception handler since native code may 2596 // install its own. 2597 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2598 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2599 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2600 address pc = (address) exceptionInfo->ContextRecord->Eip; 2601 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2602 if (addr != (address)-1) { 2603 return Handle_Exception(exceptionInfo, addr); 2604 } 2605 } 2606 return EXCEPTION_CONTINUE_SEARCH; 2607 } 2608 2609 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2610 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2611 __try { \ 2612 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2613 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2614 } \ 2615 return 0; \ 2616 } 2617 2618 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2619 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2620 DEFINE_FAST_GETFIELD(jchar, char, Char) 2621 DEFINE_FAST_GETFIELD(jshort, short, Short) 2622 DEFINE_FAST_GETFIELD(jint, int, Int) 2623 DEFINE_FAST_GETFIELD(jlong, long, Long) 2624 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2625 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2626 2627 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2628 switch (type) { 2629 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2630 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2631 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2632 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2633 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2634 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2635 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2636 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2637 default: ShouldNotReachHere(); 2638 } 2639 return (address)-1; 2640 } 2641 #endif 2642 2643 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2644 // Install a win32 structured exception handler around the test 2645 // function call so the VM can generate an error dump if needed. 2646 __try { 2647 (*funcPtr)(); 2648 } __except(topLevelExceptionFilter( 2649 (_EXCEPTION_POINTERS*)_exception_info())) { 2650 // Nothing to do. 2651 } 2652 } 2653 2654 // Virtual Memory 2655 2656 int os::vm_page_size() { return os::win32::vm_page_size(); } 2657 int os::vm_allocation_granularity() { 2658 return os::win32::vm_allocation_granularity(); 2659 } 2660 2661 // Windows large page support is available on Windows 2003. In order to use 2662 // large page memory, the administrator must first assign additional privilege 2663 // to the user: 2664 // + select Control Panel -> Administrative Tools -> Local Security Policy 2665 // + select Local Policies -> User Rights Assignment 2666 // + double click "Lock pages in memory", add users and/or groups 2667 // + reboot 2668 // Note the above steps are needed for administrator as well, as administrators 2669 // by default do not have the privilege to lock pages in memory. 2670 // 2671 // Note about Windows 2003: although the API supports committing large page 2672 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2673 // scenario, I found through experiment it only uses large page if the entire 2674 // memory region is reserved and committed in a single VirtualAlloc() call. 2675 // This makes Windows large page support more or less like Solaris ISM, in 2676 // that the entire heap must be committed upfront. This probably will change 2677 // in the future, if so the code below needs to be revisited. 2678 2679 #ifndef MEM_LARGE_PAGES 2680 #define MEM_LARGE_PAGES 0x20000000 2681 #endif 2682 2683 static HANDLE _hProcess; 2684 static HANDLE _hToken; 2685 2686 // Container for NUMA node list info 2687 class NUMANodeListHolder { 2688 private: 2689 int *_numa_used_node_list; // allocated below 2690 int _numa_used_node_count; 2691 2692 void free_node_list() { 2693 if (_numa_used_node_list != NULL) { 2694 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2695 } 2696 } 2697 2698 public: 2699 NUMANodeListHolder() { 2700 _numa_used_node_count = 0; 2701 _numa_used_node_list = NULL; 2702 // do rest of initialization in build routine (after function pointers are set up) 2703 } 2704 2705 ~NUMANodeListHolder() { 2706 free_node_list(); 2707 } 2708 2709 bool build() { 2710 DWORD_PTR proc_aff_mask; 2711 DWORD_PTR sys_aff_mask; 2712 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2713 ULONG highest_node_number; 2714 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2715 free_node_list(); 2716 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2717 for (unsigned int i = 0; i <= highest_node_number; i++) { 2718 ULONGLONG proc_mask_numa_node; 2719 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2720 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2721 _numa_used_node_list[_numa_used_node_count++] = i; 2722 } 2723 } 2724 return (_numa_used_node_count > 1); 2725 } 2726 2727 int get_count() { return _numa_used_node_count; } 2728 int get_node_list_entry(int n) { 2729 // for indexes out of range, returns -1 2730 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2731 } 2732 2733 } numa_node_list_holder; 2734 2735 2736 2737 static size_t _large_page_size = 0; 2738 2739 static bool resolve_functions_for_large_page_init() { 2740 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2741 os::Advapi32Dll::AdvapiAvailable(); 2742 } 2743 2744 static bool request_lock_memory_privilege() { 2745 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2746 os::current_process_id()); 2747 2748 LUID luid; 2749 if (_hProcess != NULL && 2750 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2751 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2752 2753 TOKEN_PRIVILEGES tp; 2754 tp.PrivilegeCount = 1; 2755 tp.Privileges[0].Luid = luid; 2756 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2757 2758 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2759 // privilege. Check GetLastError() too. See MSDN document. 2760 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2761 (GetLastError() == ERROR_SUCCESS)) { 2762 return true; 2763 } 2764 } 2765 2766 return false; 2767 } 2768 2769 static void cleanup_after_large_page_init() { 2770 if (_hProcess) CloseHandle(_hProcess); 2771 _hProcess = NULL; 2772 if (_hToken) CloseHandle(_hToken); 2773 _hToken = NULL; 2774 } 2775 2776 static bool numa_interleaving_init() { 2777 bool success = false; 2778 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2779 2780 // print a warning if UseNUMAInterleaving flag is specified on command line 2781 bool warn_on_failure = use_numa_interleaving_specified; 2782 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2783 2784 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2785 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2786 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2787 2788 if (os::Kernel32Dll::NumaCallsAvailable()) { 2789 if (numa_node_list_holder.build()) { 2790 if (PrintMiscellaneous && Verbose) { 2791 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2792 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2793 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2794 } 2795 tty->print("\n"); 2796 } 2797 success = true; 2798 } else { 2799 WARN("Process does not cover multiple NUMA nodes."); 2800 } 2801 } else { 2802 WARN("NUMA Interleaving is not supported by the operating system."); 2803 } 2804 if (!success) { 2805 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2806 } 2807 return success; 2808 #undef WARN 2809 } 2810 2811 // this routine is used whenever we need to reserve a contiguous VA range 2812 // but we need to make separate VirtualAlloc calls for each piece of the range 2813 // Reasons for doing this: 2814 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2815 // * UseNUMAInterleaving requires a separate node for each piece 2816 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2817 bool should_inject_error=false) { 2818 char * p_buf; 2819 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2820 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2821 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2822 2823 // first reserve enough address space in advance since we want to be 2824 // able to break a single contiguous virtual address range into multiple 2825 // large page commits but WS2003 does not allow reserving large page space 2826 // so we just use 4K pages for reserve, this gives us a legal contiguous 2827 // address space. then we will deallocate that reservation, and re alloc 2828 // using large pages 2829 const size_t size_of_reserve = bytes + chunk_size; 2830 if (bytes > size_of_reserve) { 2831 // Overflowed. 2832 return NULL; 2833 } 2834 p_buf = (char *) VirtualAlloc(addr, 2835 size_of_reserve, // size of Reserve 2836 MEM_RESERVE, 2837 PAGE_READWRITE); 2838 // If reservation failed, return NULL 2839 if (p_buf == NULL) return NULL; 2840 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2841 os::release_memory(p_buf, bytes + chunk_size); 2842 2843 // we still need to round up to a page boundary (in case we are using large pages) 2844 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2845 // instead we handle this in the bytes_to_rq computation below 2846 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2847 2848 // now go through and allocate one chunk at a time until all bytes are 2849 // allocated 2850 size_t bytes_remaining = bytes; 2851 // An overflow of align_size_up() would have been caught above 2852 // in the calculation of size_of_reserve. 2853 char * next_alloc_addr = p_buf; 2854 HANDLE hProc = GetCurrentProcess(); 2855 2856 #ifdef ASSERT 2857 // Variable for the failure injection 2858 long ran_num = os::random(); 2859 size_t fail_after = ran_num % bytes; 2860 #endif 2861 2862 int count=0; 2863 while (bytes_remaining) { 2864 // select bytes_to_rq to get to the next chunk_size boundary 2865 2866 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2867 // Note allocate and commit 2868 char * p_new; 2869 2870 #ifdef ASSERT 2871 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2872 #else 2873 const bool inject_error_now = false; 2874 #endif 2875 2876 if (inject_error_now) { 2877 p_new = NULL; 2878 } else { 2879 if (!UseNUMAInterleaving) { 2880 p_new = (char *) VirtualAlloc(next_alloc_addr, 2881 bytes_to_rq, 2882 flags, 2883 prot); 2884 } else { 2885 // get the next node to use from the used_node_list 2886 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2887 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2888 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2889 next_alloc_addr, 2890 bytes_to_rq, 2891 flags, 2892 prot, 2893 node); 2894 } 2895 } 2896 2897 if (p_new == NULL) { 2898 // Free any allocated pages 2899 if (next_alloc_addr > p_buf) { 2900 // Some memory was committed so release it. 2901 size_t bytes_to_release = bytes - bytes_remaining; 2902 // NMT has yet to record any individual blocks, so it 2903 // need to create a dummy 'reserve' record to match 2904 // the release. 2905 MemTracker::record_virtual_memory_reserve((address)p_buf, 2906 bytes_to_release, CALLER_PC); 2907 os::release_memory(p_buf, bytes_to_release); 2908 } 2909 #ifdef ASSERT 2910 if (should_inject_error) { 2911 if (TracePageSizes && Verbose) { 2912 tty->print_cr("Reserving pages individually failed."); 2913 } 2914 } 2915 #endif 2916 return NULL; 2917 } 2918 2919 bytes_remaining -= bytes_to_rq; 2920 next_alloc_addr += bytes_to_rq; 2921 count++; 2922 } 2923 // Although the memory is allocated individually, it is returned as one. 2924 // NMT records it as one block. 2925 if ((flags & MEM_COMMIT) != 0) { 2926 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2927 } else { 2928 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2929 } 2930 2931 // made it this far, success 2932 return p_buf; 2933 } 2934 2935 2936 2937 void os::large_page_init() { 2938 if (!UseLargePages) return; 2939 2940 // print a warning if any large page related flag is specified on command line 2941 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2942 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2943 bool success = false; 2944 2945 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2946 if (resolve_functions_for_large_page_init()) { 2947 if (request_lock_memory_privilege()) { 2948 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 2949 if (s) { 2950 #if defined(IA32) || defined(AMD64) 2951 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2952 WARN("JVM cannot use large pages bigger than 4mb."); 2953 } else { 2954 #endif 2955 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2956 _large_page_size = LargePageSizeInBytes; 2957 } else { 2958 _large_page_size = s; 2959 } 2960 success = true; 2961 #if defined(IA32) || defined(AMD64) 2962 } 2963 #endif 2964 } else { 2965 WARN("Large page is not supported by the processor."); 2966 } 2967 } else { 2968 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2969 } 2970 } else { 2971 WARN("Large page is not supported by the operating system."); 2972 } 2973 #undef WARN 2974 2975 const size_t default_page_size = (size_t) vm_page_size(); 2976 if (success && _large_page_size > default_page_size) { 2977 _page_sizes[0] = _large_page_size; 2978 _page_sizes[1] = default_page_size; 2979 _page_sizes[2] = 0; 2980 } 2981 2982 cleanup_after_large_page_init(); 2983 UseLargePages = success; 2984 } 2985 2986 // On win32, one cannot release just a part of reserved memory, it's an 2987 // all or nothing deal. When we split a reservation, we must break the 2988 // reservation into two reservations. 2989 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2990 bool realloc) { 2991 if (size > 0) { 2992 release_memory(base, size); 2993 if (realloc) { 2994 reserve_memory(split, base); 2995 } 2996 if (size != split) { 2997 reserve_memory(size - split, base + split); 2998 } 2999 } 3000 } 3001 3002 // Multiple threads can race in this code but it's not possible to unmap small sections of 3003 // virtual space to get requested alignment, like posix-like os's. 3004 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3005 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3006 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3007 "Alignment must be a multiple of allocation granularity (page size)"); 3008 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3009 3010 size_t extra_size = size + alignment; 3011 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3012 3013 char* aligned_base = NULL; 3014 3015 do { 3016 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3017 if (extra_base == NULL) { 3018 return NULL; 3019 } 3020 // Do manual alignment 3021 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3022 3023 os::release_memory(extra_base, extra_size); 3024 3025 aligned_base = os::reserve_memory(size, aligned_base); 3026 3027 } while (aligned_base == NULL); 3028 3029 return aligned_base; 3030 } 3031 3032 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3033 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3034 "reserve alignment"); 3035 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3036 char* res; 3037 // note that if UseLargePages is on, all the areas that require interleaving 3038 // will go thru reserve_memory_special rather than thru here. 3039 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3040 if (!use_individual) { 3041 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3042 } else { 3043 elapsedTimer reserveTimer; 3044 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3045 // in numa interleaving, we have to allocate pages individually 3046 // (well really chunks of NUMAInterleaveGranularity size) 3047 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3048 if (res == NULL) { 3049 warning("NUMA page allocation failed"); 3050 } 3051 if (Verbose && PrintMiscellaneous) { 3052 reserveTimer.stop(); 3053 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3054 reserveTimer.milliseconds(), reserveTimer.ticks()); 3055 } 3056 } 3057 assert(res == NULL || addr == NULL || addr == res, 3058 "Unexpected address from reserve."); 3059 3060 return res; 3061 } 3062 3063 // Reserve memory at an arbitrary address, only if that area is 3064 // available (and not reserved for something else). 3065 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3066 // Windows os::reserve_memory() fails of the requested address range is 3067 // not avilable. 3068 return reserve_memory(bytes, requested_addr); 3069 } 3070 3071 size_t os::large_page_size() { 3072 return _large_page_size; 3073 } 3074 3075 bool os::can_commit_large_page_memory() { 3076 // Windows only uses large page memory when the entire region is reserved 3077 // and committed in a single VirtualAlloc() call. This may change in the 3078 // future, but with Windows 2003 it's not possible to commit on demand. 3079 return false; 3080 } 3081 3082 bool os::can_execute_large_page_memory() { 3083 return true; 3084 } 3085 3086 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3087 assert(UseLargePages, "only for large pages"); 3088 3089 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3090 return NULL; // Fallback to small pages. 3091 } 3092 3093 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3094 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3095 3096 // with large pages, there are two cases where we need to use Individual Allocation 3097 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3098 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3099 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3100 if (TracePageSizes && Verbose) { 3101 tty->print_cr("Reserving large pages individually."); 3102 } 3103 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3104 if (p_buf == NULL) { 3105 // give an appropriate warning message 3106 if (UseNUMAInterleaving) { 3107 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3108 } 3109 if (UseLargePagesIndividualAllocation) { 3110 warning("Individually allocated large pages failed, " 3111 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3112 } 3113 return NULL; 3114 } 3115 3116 return p_buf; 3117 3118 } else { 3119 if (TracePageSizes && Verbose) { 3120 tty->print_cr("Reserving large pages in a single large chunk."); 3121 } 3122 // normal policy just allocate it all at once 3123 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3124 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3125 if (res != NULL) { 3126 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3127 } 3128 3129 return res; 3130 } 3131 } 3132 3133 bool os::release_memory_special(char* base, size_t bytes) { 3134 assert(base != NULL, "Sanity check"); 3135 return release_memory(base, bytes); 3136 } 3137 3138 void os::print_statistics() { 3139 } 3140 3141 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3142 int err = os::get_last_error(); 3143 char buf[256]; 3144 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3145 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3146 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3147 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3148 } 3149 3150 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3151 if (bytes == 0) { 3152 // Don't bother the OS with noops. 3153 return true; 3154 } 3155 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3156 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3157 // Don't attempt to print anything if the OS call fails. We're 3158 // probably low on resources, so the print itself may cause crashes. 3159 3160 // unless we have NUMAInterleaving enabled, the range of a commit 3161 // is always within a reserve covered by a single VirtualAlloc 3162 // in that case we can just do a single commit for the requested size 3163 if (!UseNUMAInterleaving) { 3164 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3165 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3166 return false; 3167 } 3168 if (exec) { 3169 DWORD oldprot; 3170 // Windows doc says to use VirtualProtect to get execute permissions 3171 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3172 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3173 return false; 3174 } 3175 } 3176 return true; 3177 } else { 3178 3179 // when NUMAInterleaving is enabled, the commit might cover a range that 3180 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3181 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3182 // returns represents the number of bytes that can be committed in one step. 3183 size_t bytes_remaining = bytes; 3184 char * next_alloc_addr = addr; 3185 while (bytes_remaining > 0) { 3186 MEMORY_BASIC_INFORMATION alloc_info; 3187 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3188 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3189 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3190 PAGE_READWRITE) == NULL) { 3191 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3192 exec);) 3193 return false; 3194 } 3195 if (exec) { 3196 DWORD oldprot; 3197 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3198 PAGE_EXECUTE_READWRITE, &oldprot)) { 3199 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3200 exec);) 3201 return false; 3202 } 3203 } 3204 bytes_remaining -= bytes_to_rq; 3205 next_alloc_addr += bytes_to_rq; 3206 } 3207 } 3208 // if we made it this far, return true 3209 return true; 3210 } 3211 3212 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3213 bool exec) { 3214 // alignment_hint is ignored on this OS 3215 return pd_commit_memory(addr, size, exec); 3216 } 3217 3218 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3219 const char* mesg) { 3220 assert(mesg != NULL, "mesg must be specified"); 3221 if (!pd_commit_memory(addr, size, exec)) { 3222 warn_fail_commit_memory(addr, size, exec); 3223 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3224 } 3225 } 3226 3227 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3228 size_t alignment_hint, bool exec, 3229 const char* mesg) { 3230 // alignment_hint is ignored on this OS 3231 pd_commit_memory_or_exit(addr, size, exec, mesg); 3232 } 3233 3234 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3235 if (bytes == 0) { 3236 // Don't bother the OS with noops. 3237 return true; 3238 } 3239 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3240 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3241 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3242 } 3243 3244 bool os::pd_release_memory(char* addr, size_t bytes) { 3245 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3246 } 3247 3248 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3249 return os::commit_memory(addr, size, !ExecMem); 3250 } 3251 3252 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3253 return os::uncommit_memory(addr, size); 3254 } 3255 3256 // Set protections specified 3257 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3258 bool is_committed) { 3259 unsigned int p = 0; 3260 switch (prot) { 3261 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3262 case MEM_PROT_READ: p = PAGE_READONLY; break; 3263 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3264 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3265 default: 3266 ShouldNotReachHere(); 3267 } 3268 3269 DWORD old_status; 3270 3271 // Strange enough, but on Win32 one can change protection only for committed 3272 // memory, not a big deal anyway, as bytes less or equal than 64K 3273 if (!is_committed) { 3274 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3275 "cannot commit protection page"); 3276 } 3277 // One cannot use os::guard_memory() here, as on Win32 guard page 3278 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3279 // 3280 // Pages in the region become guard pages. Any attempt to access a guard page 3281 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3282 // the guard page status. Guard pages thus act as a one-time access alarm. 3283 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3284 } 3285 3286 bool os::guard_memory(char* addr, size_t bytes) { 3287 DWORD old_status; 3288 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3289 } 3290 3291 bool os::unguard_memory(char* addr, size_t bytes) { 3292 DWORD old_status; 3293 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3294 } 3295 3296 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3297 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3298 void os::numa_make_global(char *addr, size_t bytes) { } 3299 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3300 bool os::numa_topology_changed() { return false; } 3301 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3302 int os::numa_get_group_id() { return 0; } 3303 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3304 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3305 // Provide an answer for UMA systems 3306 ids[0] = 0; 3307 return 1; 3308 } else { 3309 // check for size bigger than actual groups_num 3310 size = MIN2(size, numa_get_groups_num()); 3311 for (int i = 0; i < (int)size; i++) { 3312 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3313 } 3314 return size; 3315 } 3316 } 3317 3318 bool os::get_page_info(char *start, page_info* info) { 3319 return false; 3320 } 3321 3322 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3323 return end; 3324 } 3325 3326 char* os::non_memory_address_word() { 3327 // Must never look like an address returned by reserve_memory, 3328 // even in its subfields (as defined by the CPU immediate fields, 3329 // if the CPU splits constants across multiple instructions). 3330 return (char*)-1; 3331 } 3332 3333 #define MAX_ERROR_COUNT 100 3334 #define SYS_THREAD_ERROR 0xffffffffUL 3335 3336 void os::pd_start_thread(Thread* thread) { 3337 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3338 // Returns previous suspend state: 3339 // 0: Thread was not suspended 3340 // 1: Thread is running now 3341 // >1: Thread is still suspended. 3342 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3343 } 3344 3345 class HighResolutionInterval : public CHeapObj<mtThread> { 3346 // The default timer resolution seems to be 10 milliseconds. 3347 // (Where is this written down?) 3348 // If someone wants to sleep for only a fraction of the default, 3349 // then we set the timer resolution down to 1 millisecond for 3350 // the duration of their interval. 3351 // We carefully set the resolution back, since otherwise we 3352 // seem to incur an overhead (3%?) that we don't need. 3353 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3354 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3355 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3356 // timeBeginPeriod() if the relative error exceeded some threshold. 3357 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3358 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3359 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3360 // resolution timers running. 3361 private: 3362 jlong resolution; 3363 public: 3364 HighResolutionInterval(jlong ms) { 3365 resolution = ms % 10L; 3366 if (resolution != 0) { 3367 MMRESULT result = timeBeginPeriod(1L); 3368 } 3369 } 3370 ~HighResolutionInterval() { 3371 if (resolution != 0) { 3372 MMRESULT result = timeEndPeriod(1L); 3373 } 3374 resolution = 0L; 3375 } 3376 }; 3377 3378 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3379 jlong limit = (jlong) MAXDWORD; 3380 3381 while (ms > limit) { 3382 int res; 3383 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3384 return res; 3385 ms -= limit; 3386 } 3387 3388 assert(thread == Thread::current(), "thread consistency check"); 3389 OSThread* osthread = thread->osthread(); 3390 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3391 int result; 3392 if (interruptable) { 3393 assert(thread->is_Java_thread(), "must be java thread"); 3394 JavaThread *jt = (JavaThread *) thread; 3395 ThreadBlockInVM tbivm(jt); 3396 3397 jt->set_suspend_equivalent(); 3398 // cleared by handle_special_suspend_equivalent_condition() or 3399 // java_suspend_self() via check_and_wait_while_suspended() 3400 3401 HANDLE events[1]; 3402 events[0] = osthread->interrupt_event(); 3403 HighResolutionInterval *phri=NULL; 3404 if (!ForceTimeHighResolution) 3405 phri = new HighResolutionInterval(ms); 3406 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3407 result = OS_TIMEOUT; 3408 } else { 3409 ResetEvent(osthread->interrupt_event()); 3410 osthread->set_interrupted(false); 3411 result = OS_INTRPT; 3412 } 3413 delete phri; //if it is NULL, harmless 3414 3415 // were we externally suspended while we were waiting? 3416 jt->check_and_wait_while_suspended(); 3417 } else { 3418 assert(!thread->is_Java_thread(), "must not be java thread"); 3419 Sleep((long) ms); 3420 result = OS_TIMEOUT; 3421 } 3422 return result; 3423 } 3424 3425 // 3426 // Short sleep, direct OS call. 3427 // 3428 // ms = 0, means allow others (if any) to run. 3429 // 3430 void os::naked_short_sleep(jlong ms) { 3431 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3432 Sleep(ms); 3433 } 3434 3435 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3436 void os::infinite_sleep() { 3437 while (true) { // sleep forever ... 3438 Sleep(100000); // ... 100 seconds at a time 3439 } 3440 } 3441 3442 typedef BOOL (WINAPI * STTSignature)(void); 3443 3444 void os::naked_yield() { 3445 // Use either SwitchToThread() or Sleep(0) 3446 // Consider passing back the return value from SwitchToThread(). 3447 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3448 SwitchToThread(); 3449 } else { 3450 Sleep(0); 3451 } 3452 } 3453 3454 // Win32 only gives you access to seven real priorities at a time, 3455 // so we compress Java's ten down to seven. It would be better 3456 // if we dynamically adjusted relative priorities. 3457 3458 int os::java_to_os_priority[CriticalPriority + 1] = { 3459 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3460 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3461 THREAD_PRIORITY_LOWEST, // 2 3462 THREAD_PRIORITY_BELOW_NORMAL, // 3 3463 THREAD_PRIORITY_BELOW_NORMAL, // 4 3464 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3465 THREAD_PRIORITY_NORMAL, // 6 3466 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3467 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3468 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3469 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3470 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3471 }; 3472 3473 int prio_policy1[CriticalPriority + 1] = { 3474 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3475 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3476 THREAD_PRIORITY_LOWEST, // 2 3477 THREAD_PRIORITY_BELOW_NORMAL, // 3 3478 THREAD_PRIORITY_BELOW_NORMAL, // 4 3479 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3480 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3481 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3482 THREAD_PRIORITY_HIGHEST, // 8 3483 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3484 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3485 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3486 }; 3487 3488 static int prio_init() { 3489 // If ThreadPriorityPolicy is 1, switch tables 3490 if (ThreadPriorityPolicy == 1) { 3491 int i; 3492 for (i = 0; i < CriticalPriority + 1; i++) { 3493 os::java_to_os_priority[i] = prio_policy1[i]; 3494 } 3495 } 3496 if (UseCriticalJavaThreadPriority) { 3497 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3498 } 3499 return 0; 3500 } 3501 3502 OSReturn os::set_native_priority(Thread* thread, int priority) { 3503 if (!UseThreadPriorities) return OS_OK; 3504 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3505 return ret ? OS_OK : OS_ERR; 3506 } 3507 3508 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3509 if (!UseThreadPriorities) { 3510 *priority_ptr = java_to_os_priority[NormPriority]; 3511 return OS_OK; 3512 } 3513 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3514 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3515 assert(false, "GetThreadPriority failed"); 3516 return OS_ERR; 3517 } 3518 *priority_ptr = os_prio; 3519 return OS_OK; 3520 } 3521 3522 3523 // Hint to the underlying OS that a task switch would not be good. 3524 // Void return because it's a hint and can fail. 3525 void os::hint_no_preempt() {} 3526 3527 void os::interrupt(Thread* thread) { 3528 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3529 "possibility of dangling Thread pointer"); 3530 3531 OSThread* osthread = thread->osthread(); 3532 osthread->set_interrupted(true); 3533 // More than one thread can get here with the same value of osthread, 3534 // resulting in multiple notifications. We do, however, want the store 3535 // to interrupted() to be visible to other threads before we post 3536 // the interrupt event. 3537 OrderAccess::release(); 3538 SetEvent(osthread->interrupt_event()); 3539 // For JSR166: unpark after setting status 3540 if (thread->is_Java_thread()) 3541 ((JavaThread*)thread)->parker()->unpark(); 3542 3543 ParkEvent * ev = thread->_ParkEvent; 3544 if (ev != NULL) ev->unpark(); 3545 3546 } 3547 3548 3549 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3550 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3551 "possibility of dangling Thread pointer"); 3552 3553 OSThread* osthread = thread->osthread(); 3554 // There is no synchronization between the setting of the interrupt 3555 // and it being cleared here. It is critical - see 6535709 - that 3556 // we only clear the interrupt state, and reset the interrupt event, 3557 // if we are going to report that we were indeed interrupted - else 3558 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3559 // depending on the timing. By checking thread interrupt event to see 3560 // if the thread gets real interrupt thus prevent spurious wakeup. 3561 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3562 if (interrupted && clear_interrupted) { 3563 osthread->set_interrupted(false); 3564 ResetEvent(osthread->interrupt_event()); 3565 } // Otherwise leave the interrupted state alone 3566 3567 return interrupted; 3568 } 3569 3570 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3571 ExtendedPC os::get_thread_pc(Thread* thread) { 3572 CONTEXT context; 3573 context.ContextFlags = CONTEXT_CONTROL; 3574 HANDLE handle = thread->osthread()->thread_handle(); 3575 #ifdef _M_IA64 3576 assert(0, "Fix get_thread_pc"); 3577 return ExtendedPC(NULL); 3578 #else 3579 if (GetThreadContext(handle, &context)) { 3580 #ifdef _M_AMD64 3581 return ExtendedPC((address) context.Rip); 3582 #else 3583 return ExtendedPC((address) context.Eip); 3584 #endif 3585 } else { 3586 return ExtendedPC(NULL); 3587 } 3588 #endif 3589 } 3590 3591 // GetCurrentThreadId() returns DWORD 3592 intx os::current_thread_id() { return GetCurrentThreadId(); } 3593 3594 static int _initial_pid = 0; 3595 3596 int os::current_process_id() 3597 { 3598 return (_initial_pid ? _initial_pid : _getpid()); 3599 } 3600 3601 int os::win32::_vm_page_size = 0; 3602 int os::win32::_vm_allocation_granularity = 0; 3603 int os::win32::_processor_type = 0; 3604 // Processor level is not available on non-NT systems, use vm_version instead 3605 int os::win32::_processor_level = 0; 3606 julong os::win32::_physical_memory = 0; 3607 size_t os::win32::_default_stack_size = 0; 3608 3609 intx os::win32::_os_thread_limit = 0; 3610 volatile intx os::win32::_os_thread_count = 0; 3611 3612 bool os::win32::_is_nt = false; 3613 bool os::win32::_is_windows_2003 = false; 3614 bool os::win32::_is_windows_server = false; 3615 3616 // 6573254 3617 // Currently, the bug is observed across all the supported Windows releases, 3618 // including the latest one (as of this writing - Windows Server 2012 R2) 3619 bool os::win32::_has_exit_bug = true; 3620 bool os::win32::_has_performance_count = 0; 3621 3622 void os::win32::initialize_system_info() { 3623 SYSTEM_INFO si; 3624 GetSystemInfo(&si); 3625 _vm_page_size = si.dwPageSize; 3626 _vm_allocation_granularity = si.dwAllocationGranularity; 3627 _processor_type = si.dwProcessorType; 3628 _processor_level = si.wProcessorLevel; 3629 set_processor_count(si.dwNumberOfProcessors); 3630 3631 MEMORYSTATUSEX ms; 3632 ms.dwLength = sizeof(ms); 3633 3634 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3635 // dwMemoryLoad (% of memory in use) 3636 GlobalMemoryStatusEx(&ms); 3637 _physical_memory = ms.ullTotalPhys; 3638 3639 OSVERSIONINFOEX oi; 3640 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3641 GetVersionEx((OSVERSIONINFO*)&oi); 3642 switch (oi.dwPlatformId) { 3643 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3644 case VER_PLATFORM_WIN32_NT: 3645 _is_nt = true; 3646 { 3647 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3648 if (os_vers == 5002) { 3649 _is_windows_2003 = true; 3650 } 3651 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3652 oi.wProductType == VER_NT_SERVER) { 3653 _is_windows_server = true; 3654 } 3655 } 3656 break; 3657 default: fatal("Unknown platform"); 3658 } 3659 3660 _default_stack_size = os::current_stack_size(); 3661 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3662 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3663 "stack size not a multiple of page size"); 3664 3665 initialize_performance_counter(); 3666 3667 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3668 // known to deadlock the system, if the VM issues to thread operations with 3669 // a too high frequency, e.g., such as changing the priorities. 3670 // The 6000 seems to work well - no deadlocks has been notices on the test 3671 // programs that we have seen experience this problem. 3672 if (!os::win32::is_nt()) { 3673 StarvationMonitorInterval = 6000; 3674 } 3675 } 3676 3677 3678 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3679 char path[MAX_PATH]; 3680 DWORD size; 3681 DWORD pathLen = (DWORD)sizeof(path); 3682 HINSTANCE result = NULL; 3683 3684 // only allow library name without path component 3685 assert(strchr(name, '\\') == NULL, "path not allowed"); 3686 assert(strchr(name, ':') == NULL, "path not allowed"); 3687 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3688 jio_snprintf(ebuf, ebuflen, 3689 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3690 return NULL; 3691 } 3692 3693 // search system directory 3694 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3695 strcat(path, "\\"); 3696 strcat(path, name); 3697 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3698 return result; 3699 } 3700 } 3701 3702 // try Windows directory 3703 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3704 strcat(path, "\\"); 3705 strcat(path, name); 3706 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3707 return result; 3708 } 3709 } 3710 3711 jio_snprintf(ebuf, ebuflen, 3712 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3713 return NULL; 3714 } 3715 3716 #define MIN_EXIT_MUTEXES 1 3717 #define MAX_EXIT_MUTEXES 16 3718 3719 struct ExitMutexes { 3720 DWORD count; 3721 HANDLE handles[MAX_EXIT_MUTEXES]; 3722 }; 3723 3724 static BOOL CALLBACK init_muts_call(PINIT_ONCE, PVOID ppmuts, PVOID*) { 3725 static ExitMutexes muts; 3726 3727 muts.count = os::processor_count(); 3728 if (muts.count < MIN_EXIT_MUTEXES) { 3729 muts.count = MIN_EXIT_MUTEXES; 3730 } else if (muts.count > MAX_EXIT_MUTEXES) { 3731 muts.count = MAX_EXIT_MUTEXES; 3732 } 3733 3734 for (DWORD i = 0; i < muts.count; ++i) { 3735 muts.handles[i] = CreateMutex(NULL, FALSE, NULL); 3736 if (muts.handles[i] == NULL) { 3737 return FALSE; 3738 } 3739 } 3740 *((ExitMutexes**)ppmuts) = &muts; 3741 return TRUE; 3742 } 3743 3744 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3745 if (os::win32::has_exit_bug()) { 3746 static INIT_ONCE init_once_muts = INIT_ONCE_STATIC_INIT; 3747 static ExitMutexes* pmuts; 3748 3749 if (!InitOnceExecuteOnce(&init_once_muts, init_muts_call, &pmuts, NULL)) { 3750 warning("ExitMutex initialization failed in %s: %d\n", __FILE__, __LINE__); 3751 } else if (WaitForMultipleObjects(pmuts->count, pmuts->handles, 3752 (what != EPT_THREAD), // exiting process waits for all mutexes 3753 INFINITE) == WAIT_FAILED) { 3754 warning("ExitMutex acquisition failed in %s: %d\n", __FILE__, __LINE__); 3755 } 3756 } 3757 3758 switch (what) { 3759 case EPT_THREAD: 3760 _endthreadex((unsigned)exit_code); 3761 break; 3762 3763 case EPT_PROCESS: 3764 ::exit(exit_code); 3765 break; 3766 3767 case EPT_PROCESS_DIE: 3768 _exit(exit_code); 3769 break; 3770 } 3771 3772 // should not reach here 3773 return exit_code; 3774 } 3775 3776 #undef MIN_EXIT_MUTEXES 3777 #undef MAX_EXIT_MUTEXES 3778 3779 void os::win32::setmode_streams() { 3780 _setmode(_fileno(stdin), _O_BINARY); 3781 _setmode(_fileno(stdout), _O_BINARY); 3782 _setmode(_fileno(stderr), _O_BINARY); 3783 } 3784 3785 3786 bool os::is_debugger_attached() { 3787 return IsDebuggerPresent() ? true : false; 3788 } 3789 3790 3791 void os::wait_for_keypress_at_exit(void) { 3792 if (PauseAtExit) { 3793 fprintf(stderr, "Press any key to continue...\n"); 3794 fgetc(stdin); 3795 } 3796 } 3797 3798 3799 int os::message_box(const char* title, const char* message) { 3800 int result = MessageBox(NULL, message, title, 3801 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3802 return result == IDYES; 3803 } 3804 3805 int os::allocate_thread_local_storage() { 3806 return TlsAlloc(); 3807 } 3808 3809 3810 void os::free_thread_local_storage(int index) { 3811 TlsFree(index); 3812 } 3813 3814 3815 void os::thread_local_storage_at_put(int index, void* value) { 3816 TlsSetValue(index, value); 3817 assert(thread_local_storage_at(index) == value, "Just checking"); 3818 } 3819 3820 3821 void* os::thread_local_storage_at(int index) { 3822 return TlsGetValue(index); 3823 } 3824 3825 3826 #ifndef PRODUCT 3827 #ifndef _WIN64 3828 // Helpers to check whether NX protection is enabled 3829 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3830 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3831 pex->ExceptionRecord->NumberParameters > 0 && 3832 pex->ExceptionRecord->ExceptionInformation[0] == 3833 EXCEPTION_INFO_EXEC_VIOLATION) { 3834 return EXCEPTION_EXECUTE_HANDLER; 3835 } 3836 return EXCEPTION_CONTINUE_SEARCH; 3837 } 3838 3839 void nx_check_protection() { 3840 // If NX is enabled we'll get an exception calling into code on the stack 3841 char code[] = { (char)0xC3 }; // ret 3842 void *code_ptr = (void *)code; 3843 __try { 3844 __asm call code_ptr 3845 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3846 tty->print_raw_cr("NX protection detected."); 3847 } 3848 } 3849 #endif // _WIN64 3850 #endif // PRODUCT 3851 3852 // this is called _before_ the global arguments have been parsed 3853 void os::init(void) { 3854 _initial_pid = _getpid(); 3855 3856 init_random(1234567); 3857 3858 win32::initialize_system_info(); 3859 win32::setmode_streams(); 3860 init_page_sizes((size_t) win32::vm_page_size()); 3861 3862 // This may be overridden later when argument processing is done. 3863 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3864 os::win32::is_windows_2003()); 3865 3866 // Initialize main_process and main_thread 3867 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3868 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3869 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3870 fatal("DuplicateHandle failed\n"); 3871 } 3872 main_thread_id = (int) GetCurrentThreadId(); 3873 } 3874 3875 // To install functions for atexit processing 3876 extern "C" { 3877 static void perfMemory_exit_helper() { 3878 perfMemory_exit(); 3879 } 3880 } 3881 3882 static jint initSock(); 3883 3884 // this is called _after_ the global arguments have been parsed 3885 jint os::init_2(void) { 3886 // Allocate a single page and mark it as readable for safepoint polling 3887 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3888 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3889 3890 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3891 guarantee(return_page != NULL, "Commit Failed for polling page"); 3892 3893 os::set_polling_page(polling_page); 3894 3895 #ifndef PRODUCT 3896 if (Verbose && PrintMiscellaneous) 3897 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3898 #endif 3899 3900 if (!UseMembar) { 3901 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3902 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3903 3904 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3905 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3906 3907 os::set_memory_serialize_page(mem_serialize_page); 3908 3909 #ifndef PRODUCT 3910 if (Verbose && PrintMiscellaneous) 3911 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3912 #endif 3913 } 3914 3915 // Setup Windows Exceptions 3916 3917 // for debugging float code generation bugs 3918 if (ForceFloatExceptions) { 3919 #ifndef _WIN64 3920 static long fp_control_word = 0; 3921 __asm { fstcw fp_control_word } 3922 // see Intel PPro Manual, Vol. 2, p 7-16 3923 const long precision = 0x20; 3924 const long underflow = 0x10; 3925 const long overflow = 0x08; 3926 const long zero_div = 0x04; 3927 const long denorm = 0x02; 3928 const long invalid = 0x01; 3929 fp_control_word |= invalid; 3930 __asm { fldcw fp_control_word } 3931 #endif 3932 } 3933 3934 // If stack_commit_size is 0, windows will reserve the default size, 3935 // but only commit a small portion of it. 3936 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 3937 size_t default_reserve_size = os::win32::default_stack_size(); 3938 size_t actual_reserve_size = stack_commit_size; 3939 if (stack_commit_size < default_reserve_size) { 3940 // If stack_commit_size == 0, we want this too 3941 actual_reserve_size = default_reserve_size; 3942 } 3943 3944 // Check minimum allowable stack size for thread creation and to initialize 3945 // the java system classes, including StackOverflowError - depends on page 3946 // size. Add a page for compiler2 recursion in main thread. 3947 // Add in 2*BytesPerWord times page size to account for VM stack during 3948 // class initialization depending on 32 or 64 bit VM. 3949 size_t min_stack_allowed = 3950 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 3951 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 3952 if (actual_reserve_size < min_stack_allowed) { 3953 tty->print_cr("\nThe stack size specified is too small, " 3954 "Specify at least %dk", 3955 min_stack_allowed / K); 3956 return JNI_ERR; 3957 } 3958 3959 JavaThread::set_stack_size_at_create(stack_commit_size); 3960 3961 // Calculate theoretical max. size of Threads to guard gainst artifical 3962 // out-of-memory situations, where all available address-space has been 3963 // reserved by thread stacks. 3964 assert(actual_reserve_size != 0, "Must have a stack"); 3965 3966 // Calculate the thread limit when we should start doing Virtual Memory 3967 // banging. Currently when the threads will have used all but 200Mb of space. 3968 // 3969 // TODO: consider performing a similar calculation for commit size instead 3970 // as reserve size, since on a 64-bit platform we'll run into that more 3971 // often than running out of virtual memory space. We can use the 3972 // lower value of the two calculations as the os_thread_limit. 3973 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 3974 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 3975 3976 // at exit methods are called in the reverse order of their registration. 3977 // there is no limit to the number of functions registered. atexit does 3978 // not set errno. 3979 3980 if (PerfAllowAtExitRegistration) { 3981 // only register atexit functions if PerfAllowAtExitRegistration is set. 3982 // atexit functions can be delayed until process exit time, which 3983 // can be problematic for embedded VM situations. Embedded VMs should 3984 // call DestroyJavaVM() to assure that VM resources are released. 3985 3986 // note: perfMemory_exit_helper atexit function may be removed in 3987 // the future if the appropriate cleanup code can be added to the 3988 // VM_Exit VMOperation's doit method. 3989 if (atexit(perfMemory_exit_helper) != 0) { 3990 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3991 } 3992 } 3993 3994 #ifndef _WIN64 3995 // Print something if NX is enabled (win32 on AMD64) 3996 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 3997 #endif 3998 3999 // initialize thread priority policy 4000 prio_init(); 4001 4002 if (UseNUMA && !ForceNUMA) { 4003 UseNUMA = false; // We don't fully support this yet 4004 } 4005 4006 if (UseNUMAInterleaving) { 4007 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4008 bool success = numa_interleaving_init(); 4009 if (!success) UseNUMAInterleaving = false; 4010 } 4011 4012 if (initSock() != JNI_OK) { 4013 return JNI_ERR; 4014 } 4015 4016 return JNI_OK; 4017 } 4018 4019 void os::init_3(void) { 4020 return; 4021 } 4022 4023 // Mark the polling page as unreadable 4024 void os::make_polling_page_unreadable(void) { 4025 DWORD old_status; 4026 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status)) 4027 fatal("Could not disable polling page"); 4028 }; 4029 4030 // Mark the polling page as readable 4031 void os::make_polling_page_readable(void) { 4032 DWORD old_status; 4033 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status)) 4034 fatal("Could not enable polling page"); 4035 }; 4036 4037 4038 int os::stat(const char *path, struct stat *sbuf) { 4039 char pathbuf[MAX_PATH]; 4040 if (strlen(path) > MAX_PATH - 1) { 4041 errno = ENAMETOOLONG; 4042 return -1; 4043 } 4044 os::native_path(strcpy(pathbuf, path)); 4045 int ret = ::stat(pathbuf, sbuf); 4046 if (sbuf != NULL && UseUTCFileTimestamp) { 4047 // Fix for 6539723. st_mtime returned from stat() is dependent on 4048 // the system timezone and so can return different values for the 4049 // same file if/when daylight savings time changes. This adjustment 4050 // makes sure the same timestamp is returned regardless of the TZ. 4051 // 4052 // See: 4053 // http://msdn.microsoft.com/library/ 4054 // default.asp?url=/library/en-us/sysinfo/base/ 4055 // time_zone_information_str.asp 4056 // and 4057 // http://msdn.microsoft.com/library/default.asp?url= 4058 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4059 // 4060 // NOTE: there is a insidious bug here: If the timezone is changed 4061 // after the call to stat() but before 'GetTimeZoneInformation()', then 4062 // the adjustment we do here will be wrong and we'll return the wrong 4063 // value (which will likely end up creating an invalid class data 4064 // archive). Absent a better API for this, or some time zone locking 4065 // mechanism, we'll have to live with this risk. 4066 TIME_ZONE_INFORMATION tz; 4067 DWORD tzid = GetTimeZoneInformation(&tz); 4068 int daylightBias = 4069 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4070 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4071 } 4072 return ret; 4073 } 4074 4075 4076 #define FT2INT64(ft) \ 4077 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4078 4079 4080 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4081 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4082 // of a thread. 4083 // 4084 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4085 // the fast estimate available on the platform. 4086 4087 // current_thread_cpu_time() is not optimized for Windows yet 4088 jlong os::current_thread_cpu_time() { 4089 // return user + sys since the cost is the same 4090 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4091 } 4092 4093 jlong os::thread_cpu_time(Thread* thread) { 4094 // consistent with what current_thread_cpu_time() returns. 4095 return os::thread_cpu_time(thread, true /* user+sys */); 4096 } 4097 4098 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4099 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4100 } 4101 4102 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4103 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4104 // If this function changes, os::is_thread_cpu_time_supported() should too 4105 if (os::win32::is_nt()) { 4106 FILETIME CreationTime; 4107 FILETIME ExitTime; 4108 FILETIME KernelTime; 4109 FILETIME UserTime; 4110 4111 if (GetThreadTimes(thread->osthread()->thread_handle(), 4112 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4113 return -1; 4114 else 4115 if (user_sys_cpu_time) { 4116 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4117 } else { 4118 return FT2INT64(UserTime) * 100; 4119 } 4120 } else { 4121 return (jlong) timeGetTime() * 1000000; 4122 } 4123 } 4124 4125 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4126 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4127 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4128 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4129 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4130 } 4131 4132 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4133 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4134 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4135 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4136 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4137 } 4138 4139 bool os::is_thread_cpu_time_supported() { 4140 // see os::thread_cpu_time 4141 if (os::win32::is_nt()) { 4142 FILETIME CreationTime; 4143 FILETIME ExitTime; 4144 FILETIME KernelTime; 4145 FILETIME UserTime; 4146 4147 if (GetThreadTimes(GetCurrentThread(), 4148 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4149 return false; 4150 else 4151 return true; 4152 } else { 4153 return false; 4154 } 4155 } 4156 4157 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4158 // It does have primitives (PDH API) to get CPU usage and run queue length. 4159 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4160 // If we wanted to implement loadavg on Windows, we have a few options: 4161 // 4162 // a) Query CPU usage and run queue length and "fake" an answer by 4163 // returning the CPU usage if it's under 100%, and the run queue 4164 // length otherwise. It turns out that querying is pretty slow 4165 // on Windows, on the order of 200 microseconds on a fast machine. 4166 // Note that on the Windows the CPU usage value is the % usage 4167 // since the last time the API was called (and the first call 4168 // returns 100%), so we'd have to deal with that as well. 4169 // 4170 // b) Sample the "fake" answer using a sampling thread and store 4171 // the answer in a global variable. The call to loadavg would 4172 // just return the value of the global, avoiding the slow query. 4173 // 4174 // c) Sample a better answer using exponential decay to smooth the 4175 // value. This is basically the algorithm used by UNIX kernels. 4176 // 4177 // Note that sampling thread starvation could affect both (b) and (c). 4178 int os::loadavg(double loadavg[], int nelem) { 4179 return -1; 4180 } 4181 4182 4183 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4184 bool os::dont_yield() { 4185 return DontYieldALot; 4186 } 4187 4188 // This method is a slightly reworked copy of JDK's sysOpen 4189 // from src/windows/hpi/src/sys_api_md.c 4190 4191 int os::open(const char *path, int oflag, int mode) { 4192 char pathbuf[MAX_PATH]; 4193 4194 if (strlen(path) > MAX_PATH - 1) { 4195 errno = ENAMETOOLONG; 4196 return -1; 4197 } 4198 os::native_path(strcpy(pathbuf, path)); 4199 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4200 } 4201 4202 FILE* os::open(int fd, const char* mode) { 4203 return ::_fdopen(fd, mode); 4204 } 4205 4206 // Is a (classpath) directory empty? 4207 bool os::dir_is_empty(const char* path) { 4208 WIN32_FIND_DATA fd; 4209 HANDLE f = FindFirstFile(path, &fd); 4210 if (f == INVALID_HANDLE_VALUE) { 4211 return true; 4212 } 4213 FindClose(f); 4214 return false; 4215 } 4216 4217 // create binary file, rewriting existing file if required 4218 int os::create_binary_file(const char* path, bool rewrite_existing) { 4219 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4220 if (!rewrite_existing) { 4221 oflags |= _O_EXCL; 4222 } 4223 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4224 } 4225 4226 // return current position of file pointer 4227 jlong os::current_file_offset(int fd) { 4228 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4229 } 4230 4231 // move file pointer to the specified offset 4232 jlong os::seek_to_file_offset(int fd, jlong offset) { 4233 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4234 } 4235 4236 4237 jlong os::lseek(int fd, jlong offset, int whence) { 4238 return (jlong) ::_lseeki64(fd, offset, whence); 4239 } 4240 4241 // This method is a slightly reworked copy of JDK's sysNativePath 4242 // from src/windows/hpi/src/path_md.c 4243 4244 /* Convert a pathname to native format. On win32, this involves forcing all 4245 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4246 sometimes rejects '/') and removing redundant separators. The input path is 4247 assumed to have been converted into the character encoding used by the local 4248 system. Because this might be a double-byte encoding, care is taken to 4249 treat double-byte lead characters correctly. 4250 4251 This procedure modifies the given path in place, as the result is never 4252 longer than the original. There is no error return; this operation always 4253 succeeds. */ 4254 char * os::native_path(char *path) { 4255 char *src = path, *dst = path, *end = path; 4256 char *colon = NULL; /* If a drive specifier is found, this will 4257 point to the colon following the drive 4258 letter */ 4259 4260 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4261 assert(((!::IsDBCSLeadByte('/')) 4262 && (!::IsDBCSLeadByte('\\')) 4263 && (!::IsDBCSLeadByte(':'))), 4264 "Illegal lead byte"); 4265 4266 /* Check for leading separators */ 4267 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4268 while (isfilesep(*src)) { 4269 src++; 4270 } 4271 4272 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4273 /* Remove leading separators if followed by drive specifier. This 4274 hack is necessary to support file URLs containing drive 4275 specifiers (e.g., "file://c:/path"). As a side effect, 4276 "/c:/path" can be used as an alternative to "c:/path". */ 4277 *dst++ = *src++; 4278 colon = dst; 4279 *dst++ = ':'; 4280 src++; 4281 } else { 4282 src = path; 4283 if (isfilesep(src[0]) && isfilesep(src[1])) { 4284 /* UNC pathname: Retain first separator; leave src pointed at 4285 second separator so that further separators will be collapsed 4286 into the second separator. The result will be a pathname 4287 beginning with "\\\\" followed (most likely) by a host name. */ 4288 src = dst = path + 1; 4289 path[0] = '\\'; /* Force first separator to '\\' */ 4290 } 4291 } 4292 4293 end = dst; 4294 4295 /* Remove redundant separators from remainder of path, forcing all 4296 separators to be '\\' rather than '/'. Also, single byte space 4297 characters are removed from the end of the path because those 4298 are not legal ending characters on this operating system. 4299 */ 4300 while (*src != '\0') { 4301 if (isfilesep(*src)) { 4302 *dst++ = '\\'; src++; 4303 while (isfilesep(*src)) src++; 4304 if (*src == '\0') { 4305 /* Check for trailing separator */ 4306 end = dst; 4307 if (colon == dst - 2) break; /* "z:\\" */ 4308 if (dst == path + 1) break; /* "\\" */ 4309 if (dst == path + 2 && isfilesep(path[0])) { 4310 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4311 beginning of a UNC pathname. Even though it is not, by 4312 itself, a valid UNC pathname, we leave it as is in order 4313 to be consistent with the path canonicalizer as well 4314 as the win32 APIs, which treat this case as an invalid 4315 UNC pathname rather than as an alias for the root 4316 directory of the current drive. */ 4317 break; 4318 } 4319 end = --dst; /* Path does not denote a root directory, so 4320 remove trailing separator */ 4321 break; 4322 } 4323 end = dst; 4324 } else { 4325 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4326 *dst++ = *src++; 4327 if (*src) *dst++ = *src++; 4328 end = dst; 4329 } else { /* Copy a single-byte character */ 4330 char c = *src++; 4331 *dst++ = c; 4332 /* Space is not a legal ending character */ 4333 if (c != ' ') end = dst; 4334 } 4335 } 4336 } 4337 4338 *end = '\0'; 4339 4340 /* For "z:", add "." to work around a bug in the C runtime library */ 4341 if (colon == dst - 1) { 4342 path[2] = '.'; 4343 path[3] = '\0'; 4344 } 4345 4346 return path; 4347 } 4348 4349 // This code is a copy of JDK's sysSetLength 4350 // from src/windows/hpi/src/sys_api_md.c 4351 4352 int os::ftruncate(int fd, jlong length) { 4353 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4354 long high = (long)(length >> 32); 4355 DWORD ret; 4356 4357 if (h == (HANDLE)(-1)) { 4358 return -1; 4359 } 4360 4361 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4362 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4363 return -1; 4364 } 4365 4366 if (::SetEndOfFile(h) == FALSE) { 4367 return -1; 4368 } 4369 4370 return 0; 4371 } 4372 4373 4374 // This code is a copy of JDK's sysSync 4375 // from src/windows/hpi/src/sys_api_md.c 4376 // except for the legacy workaround for a bug in Win 98 4377 4378 int os::fsync(int fd) { 4379 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4380 4381 if ((!::FlushFileBuffers(handle)) && 4382 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4383 /* from winerror.h */ 4384 return -1; 4385 } 4386 return 0; 4387 } 4388 4389 static int nonSeekAvailable(int, long *); 4390 static int stdinAvailable(int, long *); 4391 4392 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4393 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4394 4395 // This code is a copy of JDK's sysAvailable 4396 // from src/windows/hpi/src/sys_api_md.c 4397 4398 int os::available(int fd, jlong *bytes) { 4399 jlong cur, end; 4400 struct _stati64 stbuf64; 4401 4402 if (::_fstati64(fd, &stbuf64) >= 0) { 4403 int mode = stbuf64.st_mode; 4404 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4405 int ret; 4406 long lpbytes; 4407 if (fd == 0) { 4408 ret = stdinAvailable(fd, &lpbytes); 4409 } else { 4410 ret = nonSeekAvailable(fd, &lpbytes); 4411 } 4412 (*bytes) = (jlong)(lpbytes); 4413 return ret; 4414 } 4415 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4416 return FALSE; 4417 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4418 return FALSE; 4419 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4420 return FALSE; 4421 } 4422 *bytes = end - cur; 4423 return TRUE; 4424 } else { 4425 return FALSE; 4426 } 4427 } 4428 4429 // This code is a copy of JDK's nonSeekAvailable 4430 // from src/windows/hpi/src/sys_api_md.c 4431 4432 static int nonSeekAvailable(int fd, long *pbytes) { 4433 /* This is used for available on non-seekable devices 4434 * (like both named and anonymous pipes, such as pipes 4435 * connected to an exec'd process). 4436 * Standard Input is a special case. 4437 * 4438 */ 4439 HANDLE han; 4440 4441 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4442 return FALSE; 4443 } 4444 4445 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4446 /* PeekNamedPipe fails when at EOF. In that case we 4447 * simply make *pbytes = 0 which is consistent with the 4448 * behavior we get on Solaris when an fd is at EOF. 4449 * The only alternative is to raise an Exception, 4450 * which isn't really warranted. 4451 */ 4452 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4453 return FALSE; 4454 } 4455 *pbytes = 0; 4456 } 4457 return TRUE; 4458 } 4459 4460 #define MAX_INPUT_EVENTS 2000 4461 4462 // This code is a copy of JDK's stdinAvailable 4463 // from src/windows/hpi/src/sys_api_md.c 4464 4465 static int stdinAvailable(int fd, long *pbytes) { 4466 HANDLE han; 4467 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4468 DWORD numEvents = 0; /* Number of events in buffer */ 4469 DWORD i = 0; /* Loop index */ 4470 DWORD curLength = 0; /* Position marker */ 4471 DWORD actualLength = 0; /* Number of bytes readable */ 4472 BOOL error = FALSE; /* Error holder */ 4473 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4474 4475 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4476 return FALSE; 4477 } 4478 4479 /* Construct an array of input records in the console buffer */ 4480 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4481 if (error == 0) { 4482 return nonSeekAvailable(fd, pbytes); 4483 } 4484 4485 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4486 if (numEvents > MAX_INPUT_EVENTS) { 4487 numEvents = MAX_INPUT_EVENTS; 4488 } 4489 4490 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4491 if (lpBuffer == NULL) { 4492 return FALSE; 4493 } 4494 4495 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4496 if (error == 0) { 4497 os::free(lpBuffer, mtInternal); 4498 return FALSE; 4499 } 4500 4501 /* Examine input records for the number of bytes available */ 4502 for (i=0; i<numEvents; i++) { 4503 if (lpBuffer[i].EventType == KEY_EVENT) { 4504 4505 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4506 &(lpBuffer[i].Event); 4507 if (keyRecord->bKeyDown == TRUE) { 4508 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4509 curLength++; 4510 if (*keyPressed == '\r') { 4511 actualLength = curLength; 4512 } 4513 } 4514 } 4515 } 4516 4517 if (lpBuffer != NULL) { 4518 os::free(lpBuffer, mtInternal); 4519 } 4520 4521 *pbytes = (long) actualLength; 4522 return TRUE; 4523 } 4524 4525 // Map a block of memory. 4526 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4527 char *addr, size_t bytes, bool read_only, 4528 bool allow_exec) { 4529 HANDLE hFile; 4530 char* base; 4531 4532 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4533 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4534 if (hFile == NULL) { 4535 if (PrintMiscellaneous && Verbose) { 4536 DWORD err = GetLastError(); 4537 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4538 } 4539 return NULL; 4540 } 4541 4542 if (allow_exec) { 4543 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4544 // unless it comes from a PE image (which the shared archive is not.) 4545 // Even VirtualProtect refuses to give execute access to mapped memory 4546 // that was not previously executable. 4547 // 4548 // Instead, stick the executable region in anonymous memory. Yuck. 4549 // Penalty is that ~4 pages will not be shareable - in the future 4550 // we might consider DLLizing the shared archive with a proper PE 4551 // header so that mapping executable + sharing is possible. 4552 4553 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4554 PAGE_READWRITE); 4555 if (base == NULL) { 4556 if (PrintMiscellaneous && Verbose) { 4557 DWORD err = GetLastError(); 4558 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4559 } 4560 CloseHandle(hFile); 4561 return NULL; 4562 } 4563 4564 DWORD bytes_read; 4565 OVERLAPPED overlapped; 4566 overlapped.Offset = (DWORD)file_offset; 4567 overlapped.OffsetHigh = 0; 4568 overlapped.hEvent = NULL; 4569 // ReadFile guarantees that if the return value is true, the requested 4570 // number of bytes were read before returning. 4571 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4572 if (!res) { 4573 if (PrintMiscellaneous && Verbose) { 4574 DWORD err = GetLastError(); 4575 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4576 } 4577 release_memory(base, bytes); 4578 CloseHandle(hFile); 4579 return NULL; 4580 } 4581 } else { 4582 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4583 NULL /*file_name*/); 4584 if (hMap == NULL) { 4585 if (PrintMiscellaneous && Verbose) { 4586 DWORD err = GetLastError(); 4587 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4588 } 4589 CloseHandle(hFile); 4590 return NULL; 4591 } 4592 4593 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4594 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4595 (DWORD)bytes, addr); 4596 if (base == NULL) { 4597 if (PrintMiscellaneous && Verbose) { 4598 DWORD err = GetLastError(); 4599 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4600 } 4601 CloseHandle(hMap); 4602 CloseHandle(hFile); 4603 return NULL; 4604 } 4605 4606 if (CloseHandle(hMap) == 0) { 4607 if (PrintMiscellaneous && Verbose) { 4608 DWORD err = GetLastError(); 4609 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4610 } 4611 CloseHandle(hFile); 4612 return base; 4613 } 4614 } 4615 4616 if (allow_exec) { 4617 DWORD old_protect; 4618 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4619 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4620 4621 if (!res) { 4622 if (PrintMiscellaneous && Verbose) { 4623 DWORD err = GetLastError(); 4624 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4625 } 4626 // Don't consider this a hard error, on IA32 even if the 4627 // VirtualProtect fails, we should still be able to execute 4628 CloseHandle(hFile); 4629 return base; 4630 } 4631 } 4632 4633 if (CloseHandle(hFile) == 0) { 4634 if (PrintMiscellaneous && Verbose) { 4635 DWORD err = GetLastError(); 4636 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4637 } 4638 return base; 4639 } 4640 4641 return base; 4642 } 4643 4644 4645 // Remap a block of memory. 4646 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4647 char *addr, size_t bytes, bool read_only, 4648 bool allow_exec) { 4649 // This OS does not allow existing memory maps to be remapped so we 4650 // have to unmap the memory before we remap it. 4651 if (!os::unmap_memory(addr, bytes)) { 4652 return NULL; 4653 } 4654 4655 // There is a very small theoretical window between the unmap_memory() 4656 // call above and the map_memory() call below where a thread in native 4657 // code may be able to access an address that is no longer mapped. 4658 4659 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4660 read_only, allow_exec); 4661 } 4662 4663 4664 // Unmap a block of memory. 4665 // Returns true=success, otherwise false. 4666 4667 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4668 BOOL result = UnmapViewOfFile(addr); 4669 if (result == 0) { 4670 if (PrintMiscellaneous && Verbose) { 4671 DWORD err = GetLastError(); 4672 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4673 } 4674 return false; 4675 } 4676 return true; 4677 } 4678 4679 void os::pause() { 4680 char filename[MAX_PATH]; 4681 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4682 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4683 } else { 4684 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4685 } 4686 4687 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4688 if (fd != -1) { 4689 struct stat buf; 4690 ::close(fd); 4691 while (::stat(filename, &buf) == 0) { 4692 Sleep(100); 4693 } 4694 } else { 4695 jio_fprintf(stderr, 4696 "Could not open pause file '%s', continuing immediately.\n", filename); 4697 } 4698 } 4699 4700 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4701 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4702 } 4703 4704 /* 4705 * See the caveats for this class in os_windows.hpp 4706 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4707 * into this method and returns false. If no OS EXCEPTION was raised, returns 4708 * true. 4709 * The callback is supposed to provide the method that should be protected. 4710 */ 4711 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4712 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4713 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4714 "crash_protection already set?"); 4715 4716 bool success = true; 4717 __try { 4718 WatcherThread::watcher_thread()->set_crash_protection(this); 4719 cb.call(); 4720 } __except(EXCEPTION_EXECUTE_HANDLER) { 4721 // only for protection, nothing to do 4722 success = false; 4723 } 4724 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4725 return success; 4726 } 4727 4728 // An Event wraps a win32 "CreateEvent" kernel handle. 4729 // 4730 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4731 // 4732 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4733 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4734 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4735 // In addition, an unpark() operation might fetch the handle field, but the 4736 // event could recycle between the fetch and the SetEvent() operation. 4737 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4738 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4739 // on an stale but recycled handle would be harmless, but in practice this might 4740 // confuse other non-Sun code, so it's not a viable approach. 4741 // 4742 // 2: Once a win32 event handle is associated with an Event, it remains associated 4743 // with the Event. The event handle is never closed. This could be construed 4744 // as handle leakage, but only up to the maximum # of threads that have been extant 4745 // at any one time. This shouldn't be an issue, as windows platforms typically 4746 // permit a process to have hundreds of thousands of open handles. 4747 // 4748 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4749 // and release unused handles. 4750 // 4751 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4752 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4753 // 4754 // 5. Use an RCU-like mechanism (Read-Copy Update). 4755 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4756 // 4757 // We use (2). 4758 // 4759 // TODO-FIXME: 4760 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4761 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4762 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4763 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4764 // into a single win32 CreateEvent() handle. 4765 // 4766 // _Event transitions in park() 4767 // -1 => -1 : illegal 4768 // 1 => 0 : pass - return immediately 4769 // 0 => -1 : block 4770 // 4771 // _Event serves as a restricted-range semaphore : 4772 // -1 : thread is blocked 4773 // 0 : neutral - thread is running or ready 4774 // 1 : signaled - thread is running or ready 4775 // 4776 // Another possible encoding of _Event would be 4777 // with explicit "PARKED" and "SIGNALED" bits. 4778 4779 int os::PlatformEvent::park (jlong Millis) { 4780 guarantee(_ParkHandle != NULL , "Invariant"); 4781 guarantee(Millis > 0 , "Invariant"); 4782 int v; 4783 4784 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4785 // the initial park() operation. 4786 4787 for (;;) { 4788 v = _Event; 4789 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4790 } 4791 guarantee((v == 0) || (v == 1), "invariant"); 4792 if (v != 0) return OS_OK; 4793 4794 // Do this the hard way by blocking ... 4795 // TODO: consider a brief spin here, gated on the success of recent 4796 // spin attempts by this thread. 4797 // 4798 // We decompose long timeouts into series of shorter timed waits. 4799 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4800 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4801 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4802 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4803 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4804 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4805 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4806 // for the already waited time. This policy does not admit any new outcomes. 4807 // In the future, however, we might want to track the accumulated wait time and 4808 // adjust Millis accordingly if we encounter a spurious wakeup. 4809 4810 const int MAXTIMEOUT = 0x10000000; 4811 DWORD rv = WAIT_TIMEOUT; 4812 while (_Event < 0 && Millis > 0) { 4813 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4814 if (Millis > MAXTIMEOUT) { 4815 prd = MAXTIMEOUT; 4816 } 4817 rv = ::WaitForSingleObject(_ParkHandle, prd); 4818 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4819 if (rv == WAIT_TIMEOUT) { 4820 Millis -= prd; 4821 } 4822 } 4823 v = _Event; 4824 _Event = 0; 4825 // see comment at end of os::PlatformEvent::park() below: 4826 OrderAccess::fence(); 4827 // If we encounter a nearly simultanous timeout expiry and unpark() 4828 // we return OS_OK indicating we awoke via unpark(). 4829 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4830 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4831 } 4832 4833 void os::PlatformEvent::park() { 4834 guarantee(_ParkHandle != NULL, "Invariant"); 4835 // Invariant: Only the thread associated with the Event/PlatformEvent 4836 // may call park(). 4837 int v; 4838 for (;;) { 4839 v = _Event; 4840 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4841 } 4842 guarantee((v == 0) || (v == 1), "invariant"); 4843 if (v != 0) return; 4844 4845 // Do this the hard way by blocking ... 4846 // TODO: consider a brief spin here, gated on the success of recent 4847 // spin attempts by this thread. 4848 while (_Event < 0) { 4849 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4850 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4851 } 4852 4853 // Usually we'll find _Event == 0 at this point, but as 4854 // an optional optimization we clear it, just in case can 4855 // multiple unpark() operations drove _Event up to 1. 4856 _Event = 0; 4857 OrderAccess::fence(); 4858 guarantee(_Event >= 0, "invariant"); 4859 } 4860 4861 void os::PlatformEvent::unpark() { 4862 guarantee(_ParkHandle != NULL, "Invariant"); 4863 4864 // Transitions for _Event: 4865 // 0 :=> 1 4866 // 1 :=> 1 4867 // -1 :=> either 0 or 1; must signal target thread 4868 // That is, we can safely transition _Event from -1 to either 4869 // 0 or 1. 4870 // See also: "Semaphores in Plan 9" by Mullender & Cox 4871 // 4872 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4873 // that it will take two back-to-back park() calls for the owning 4874 // thread to block. This has the benefit of forcing a spurious return 4875 // from the first park() call after an unpark() call which will help 4876 // shake out uses of park() and unpark() without condition variables. 4877 4878 if (Atomic::xchg(1, &_Event) >= 0) return; 4879 4880 ::SetEvent(_ParkHandle); 4881 } 4882 4883 4884 // JSR166 4885 // ------------------------------------------------------- 4886 4887 /* 4888 * The Windows implementation of Park is very straightforward: Basic 4889 * operations on Win32 Events turn out to have the right semantics to 4890 * use them directly. We opportunistically resuse the event inherited 4891 * from Monitor. 4892 */ 4893 4894 4895 void Parker::park(bool isAbsolute, jlong time) { 4896 guarantee(_ParkEvent != NULL, "invariant"); 4897 // First, demultiplex/decode time arguments 4898 if (time < 0) { // don't wait 4899 return; 4900 } 4901 else if (time == 0 && !isAbsolute) { 4902 time = INFINITE; 4903 } 4904 else if (isAbsolute) { 4905 time -= os::javaTimeMillis(); // convert to relative time 4906 if (time <= 0) // already elapsed 4907 return; 4908 } 4909 else { // relative 4910 time /= 1000000; // Must coarsen from nanos to millis 4911 if (time == 0) // Wait for the minimal time unit if zero 4912 time = 1; 4913 } 4914 4915 JavaThread* thread = (JavaThread*)(Thread::current()); 4916 assert(thread->is_Java_thread(), "Must be JavaThread"); 4917 JavaThread *jt = (JavaThread *)thread; 4918 4919 // Don't wait if interrupted or already triggered 4920 if (Thread::is_interrupted(thread, false) || 4921 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4922 ResetEvent(_ParkEvent); 4923 return; 4924 } 4925 else { 4926 ThreadBlockInVM tbivm(jt); 4927 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4928 jt->set_suspend_equivalent(); 4929 4930 WaitForSingleObject(_ParkEvent, time); 4931 ResetEvent(_ParkEvent); 4932 4933 // If externally suspended while waiting, re-suspend 4934 if (jt->handle_special_suspend_equivalent_condition()) { 4935 jt->java_suspend_self(); 4936 } 4937 } 4938 } 4939 4940 void Parker::unpark() { 4941 guarantee(_ParkEvent != NULL, "invariant"); 4942 SetEvent(_ParkEvent); 4943 } 4944 4945 // Run the specified command in a separate process. Return its exit value, 4946 // or -1 on failure (e.g. can't create a new process). 4947 int os::fork_and_exec(char* cmd) { 4948 STARTUPINFO si; 4949 PROCESS_INFORMATION pi; 4950 4951 memset(&si, 0, sizeof(si)); 4952 si.cb = sizeof(si); 4953 memset(&pi, 0, sizeof(pi)); 4954 BOOL rslt = CreateProcess(NULL, // executable name - use command line 4955 cmd, // command line 4956 NULL, // process security attribute 4957 NULL, // thread security attribute 4958 TRUE, // inherits system handles 4959 0, // no creation flags 4960 NULL, // use parent's environment block 4961 NULL, // use parent's starting directory 4962 &si, // (in) startup information 4963 &pi); // (out) process information 4964 4965 if (rslt) { 4966 // Wait until child process exits. 4967 WaitForSingleObject(pi.hProcess, INFINITE); 4968 4969 DWORD exit_code; 4970 GetExitCodeProcess(pi.hProcess, &exit_code); 4971 4972 // Close process and thread handles. 4973 CloseHandle(pi.hProcess); 4974 CloseHandle(pi.hThread); 4975 4976 return (int)exit_code; 4977 } else { 4978 return -1; 4979 } 4980 } 4981 4982 //-------------------------------------------------------------------------------------------------- 4983 // Non-product code 4984 4985 static int mallocDebugIntervalCounter = 0; 4986 static int mallocDebugCounter = 0; 4987 bool os::check_heap(bool force) { 4988 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 4989 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 4990 // Note: HeapValidate executes two hardware breakpoints when it finds something 4991 // wrong; at these points, eax contains the address of the offending block (I think). 4992 // To get to the exlicit error message(s) below, just continue twice. 4993 HANDLE heap = GetProcessHeap(); 4994 4995 // If we fail to lock the heap, then gflags.exe has been used 4996 // or some other special heap flag has been set that prevents 4997 // locking. We don't try to walk a heap we can't lock. 4998 if (HeapLock(heap) != 0) { 4999 PROCESS_HEAP_ENTRY phe; 5000 phe.lpData = NULL; 5001 while (HeapWalk(heap, &phe) != 0) { 5002 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5003 !HeapValidate(heap, 0, phe.lpData)) { 5004 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5005 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5006 fatal("corrupted C heap"); 5007 } 5008 } 5009 DWORD err = GetLastError(); 5010 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5011 fatal(err_msg("heap walk aborted with error %d", err)); 5012 } 5013 HeapUnlock(heap); 5014 } 5015 mallocDebugIntervalCounter = 0; 5016 } 5017 return true; 5018 } 5019 5020 5021 bool os::find(address addr, outputStream* st) { 5022 // Nothing yet 5023 return false; 5024 } 5025 5026 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5027 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5028 5029 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5030 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 5031 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5032 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5033 5034 if (os::is_memory_serialize_page(thread, addr)) 5035 return EXCEPTION_CONTINUE_EXECUTION; 5036 } 5037 5038 return EXCEPTION_CONTINUE_SEARCH; 5039 } 5040 5041 // We don't build a headless jre for Windows 5042 bool os::is_headless_jre() { return false; } 5043 5044 static jint initSock() { 5045 WSADATA wsadata; 5046 5047 if (!os::WinSock2Dll::WinSock2Available()) { 5048 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 5049 ::GetLastError()); 5050 return JNI_ERR; 5051 } 5052 5053 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5054 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5055 ::GetLastError()); 5056 return JNI_ERR; 5057 } 5058 return JNI_OK; 5059 } 5060 5061 struct hostent* os::get_host_by_name(char* name) { 5062 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5063 } 5064 5065 int os::socket_close(int fd) { 5066 return ::closesocket(fd); 5067 } 5068 5069 int os::socket_available(int fd, jint *pbytes) { 5070 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5071 return (ret < 0) ? 0 : 1; 5072 } 5073 5074 int os::socket(int domain, int type, int protocol) { 5075 return ::socket(domain, type, protocol); 5076 } 5077 5078 int os::listen(int fd, int count) { 5079 return ::listen(fd, count); 5080 } 5081 5082 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5083 return ::connect(fd, him, len); 5084 } 5085 5086 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5087 return ::accept(fd, him, len); 5088 } 5089 5090 int os::sendto(int fd, char* buf, size_t len, uint flags, 5091 struct sockaddr* to, socklen_t tolen) { 5092 5093 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5094 } 5095 5096 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5097 sockaddr* from, socklen_t* fromlen) { 5098 5099 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5100 } 5101 5102 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5103 return ::recv(fd, buf, (int)nBytes, flags); 5104 } 5105 5106 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5107 return ::send(fd, buf, (int)nBytes, flags); 5108 } 5109 5110 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5111 return ::send(fd, buf, (int)nBytes, flags); 5112 } 5113 5114 int os::timeout(int fd, long timeout) { 5115 fd_set tbl; 5116 struct timeval t; 5117 5118 t.tv_sec = timeout / 1000; 5119 t.tv_usec = (timeout % 1000) * 1000; 5120 5121 tbl.fd_count = 1; 5122 tbl.fd_array[0] = fd; 5123 5124 return ::select(1, &tbl, 0, 0, &t); 5125 } 5126 5127 int os::get_host_name(char* name, int namelen) { 5128 return ::gethostname(name, namelen); 5129 } 5130 5131 int os::socket_shutdown(int fd, int howto) { 5132 return ::shutdown(fd, howto); 5133 } 5134 5135 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5136 return ::bind(fd, him, len); 5137 } 5138 5139 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5140 return ::getsockname(fd, him, len); 5141 } 5142 5143 int os::get_sock_opt(int fd, int level, int optname, 5144 char* optval, socklen_t* optlen) { 5145 return ::getsockopt(fd, level, optname, optval, optlen); 5146 } 5147 5148 int os::set_sock_opt(int fd, int level, int optname, 5149 const char* optval, socklen_t optlen) { 5150 return ::setsockopt(fd, level, optname, optval, optlen); 5151 } 5152 5153 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5154 #if defined(IA32) 5155 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5156 #elif defined (AMD64) 5157 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5158 #endif 5159 5160 // returns true if thread could be suspended, 5161 // false otherwise 5162 static bool do_suspend(HANDLE* h) { 5163 if (h != NULL) { 5164 if (SuspendThread(*h) != ~0) { 5165 return true; 5166 } 5167 } 5168 return false; 5169 } 5170 5171 // resume the thread 5172 // calling resume on an active thread is a no-op 5173 static void do_resume(HANDLE* h) { 5174 if (h != NULL) { 5175 ResumeThread(*h); 5176 } 5177 } 5178 5179 // retrieve a suspend/resume context capable handle 5180 // from the tid. Caller validates handle return value. 5181 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5182 if (h != NULL) { 5183 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5184 } 5185 } 5186 5187 // 5188 // Thread sampling implementation 5189 // 5190 void os::SuspendedThreadTask::internal_do_task() { 5191 CONTEXT ctxt; 5192 HANDLE h = NULL; 5193 5194 // get context capable handle for thread 5195 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5196 5197 // sanity 5198 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5199 return; 5200 } 5201 5202 // suspend the thread 5203 if (do_suspend(&h)) { 5204 ctxt.ContextFlags = sampling_context_flags; 5205 // get thread context 5206 GetThreadContext(h, &ctxt); 5207 SuspendedThreadTaskContext context(_thread, &ctxt); 5208 // pass context to Thread Sampling impl 5209 do_task(context); 5210 // resume thread 5211 do_resume(&h); 5212 } 5213 5214 // close handle 5215 CloseHandle(h); 5216 } 5217 5218 5219 // Kernel32 API 5220 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5221 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5222 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5223 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5224 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5225 5226 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5227 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5228 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5229 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5230 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5231 5232 5233 BOOL os::Kernel32Dll::initialized = FALSE; 5234 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5235 assert(initialized && _GetLargePageMinimum != NULL, 5236 "GetLargePageMinimumAvailable() not yet called"); 5237 return _GetLargePageMinimum(); 5238 } 5239 5240 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5241 if (!initialized) { 5242 initialize(); 5243 } 5244 return _GetLargePageMinimum != NULL; 5245 } 5246 5247 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5248 if (!initialized) { 5249 initialize(); 5250 } 5251 return _VirtualAllocExNuma != NULL; 5252 } 5253 5254 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5255 assert(initialized && _VirtualAllocExNuma != NULL, 5256 "NUMACallsAvailable() not yet called"); 5257 5258 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5259 } 5260 5261 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5262 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5263 "NUMACallsAvailable() not yet called"); 5264 5265 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5266 } 5267 5268 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5269 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5270 "NUMACallsAvailable() not yet called"); 5271 5272 return _GetNumaNodeProcessorMask(node, proc_mask); 5273 } 5274 5275 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5276 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5277 if (!initialized) { 5278 initialize(); 5279 } 5280 5281 if (_RtlCaptureStackBackTrace != NULL) { 5282 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5283 BackTrace, BackTraceHash); 5284 } else { 5285 return 0; 5286 } 5287 } 5288 5289 void os::Kernel32Dll::initializeCommon() { 5290 if (!initialized) { 5291 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5292 assert(handle != NULL, "Just check"); 5293 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5294 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5295 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5296 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5297 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5298 initialized = TRUE; 5299 } 5300 } 5301 5302 5303 5304 #ifndef JDK6_OR_EARLIER 5305 5306 void os::Kernel32Dll::initialize() { 5307 initializeCommon(); 5308 } 5309 5310 5311 // Kernel32 API 5312 inline BOOL os::Kernel32Dll::SwitchToThread() { 5313 return ::SwitchToThread(); 5314 } 5315 5316 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5317 return true; 5318 } 5319 5320 // Help tools 5321 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5322 return true; 5323 } 5324 5325 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5326 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5327 } 5328 5329 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5330 return ::Module32First(hSnapshot, lpme); 5331 } 5332 5333 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5334 return ::Module32Next(hSnapshot, lpme); 5335 } 5336 5337 5338 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5339 return true; 5340 } 5341 5342 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5343 ::GetNativeSystemInfo(lpSystemInfo); 5344 } 5345 5346 // PSAPI API 5347 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5348 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5349 } 5350 5351 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5352 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5353 } 5354 5355 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5356 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5357 } 5358 5359 inline BOOL os::PSApiDll::PSApiAvailable() { 5360 return true; 5361 } 5362 5363 5364 // WinSock2 API 5365 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5366 return ::WSAStartup(wVersionRequested, lpWSAData); 5367 } 5368 5369 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5370 return ::gethostbyname(name); 5371 } 5372 5373 inline BOOL os::WinSock2Dll::WinSock2Available() { 5374 return true; 5375 } 5376 5377 // Advapi API 5378 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5379 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5380 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5381 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5382 BufferLength, PreviousState, ReturnLength); 5383 } 5384 5385 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5386 PHANDLE TokenHandle) { 5387 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5388 } 5389 5390 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5391 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5392 } 5393 5394 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5395 return true; 5396 } 5397 5398 void* os::get_default_process_handle() { 5399 return (void*)GetModuleHandle(NULL); 5400 } 5401 5402 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5403 // which is used to find statically linked in agents. 5404 // Additionally for windows, takes into account __stdcall names. 5405 // Parameters: 5406 // sym_name: Symbol in library we are looking for 5407 // lib_name: Name of library to look in, NULL for shared libs. 5408 // is_absolute_path == true if lib_name is absolute path to agent 5409 // such as "C:/a/b/L.dll" 5410 // == false if only the base name of the library is passed in 5411 // such as "L" 5412 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5413 bool is_absolute_path) { 5414 char *agent_entry_name; 5415 size_t len; 5416 size_t name_len; 5417 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5418 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5419 const char *start; 5420 5421 if (lib_name != NULL) { 5422 len = name_len = strlen(lib_name); 5423 if (is_absolute_path) { 5424 // Need to strip path, prefix and suffix 5425 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5426 lib_name = ++start; 5427 } else { 5428 // Need to check for drive prefix 5429 if ((start = strchr(lib_name, ':')) != NULL) { 5430 lib_name = ++start; 5431 } 5432 } 5433 if (len <= (prefix_len + suffix_len)) { 5434 return NULL; 5435 } 5436 lib_name += prefix_len; 5437 name_len = strlen(lib_name) - suffix_len; 5438 } 5439 } 5440 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5441 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5442 if (agent_entry_name == NULL) { 5443 return NULL; 5444 } 5445 if (lib_name != NULL) { 5446 const char *p = strrchr(sym_name, '@'); 5447 if (p != NULL && p != sym_name) { 5448 // sym_name == _Agent_OnLoad@XX 5449 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5450 agent_entry_name[(p-sym_name)] = '\0'; 5451 // agent_entry_name == _Agent_OnLoad 5452 strcat(agent_entry_name, "_"); 5453 strncat(agent_entry_name, lib_name, name_len); 5454 strcat(agent_entry_name, p); 5455 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5456 } else { 5457 strcpy(agent_entry_name, sym_name); 5458 strcat(agent_entry_name, "_"); 5459 strncat(agent_entry_name, lib_name, name_len); 5460 } 5461 } else { 5462 strcpy(agent_entry_name, sym_name); 5463 } 5464 return agent_entry_name; 5465 } 5466 5467 #else 5468 // Kernel32 API 5469 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5470 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5471 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5472 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5473 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5474 5475 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5476 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5477 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5478 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5479 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5480 5481 void os::Kernel32Dll::initialize() { 5482 if (!initialized) { 5483 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5484 assert(handle != NULL, "Just check"); 5485 5486 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5487 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5488 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5489 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5490 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5491 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5492 initializeCommon(); // resolve the functions that always need resolving 5493 5494 initialized = TRUE; 5495 } 5496 } 5497 5498 BOOL os::Kernel32Dll::SwitchToThread() { 5499 assert(initialized && _SwitchToThread != NULL, 5500 "SwitchToThreadAvailable() not yet called"); 5501 return _SwitchToThread(); 5502 } 5503 5504 5505 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5506 if (!initialized) { 5507 initialize(); 5508 } 5509 return _SwitchToThread != NULL; 5510 } 5511 5512 // Help tools 5513 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5514 if (!initialized) { 5515 initialize(); 5516 } 5517 return _CreateToolhelp32Snapshot != NULL && 5518 _Module32First != NULL && 5519 _Module32Next != NULL; 5520 } 5521 5522 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5523 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5524 "HelpToolsAvailable() not yet called"); 5525 5526 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5527 } 5528 5529 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5530 assert(initialized && _Module32First != NULL, 5531 "HelpToolsAvailable() not yet called"); 5532 5533 return _Module32First(hSnapshot, lpme); 5534 } 5535 5536 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5537 assert(initialized && _Module32Next != NULL, 5538 "HelpToolsAvailable() not yet called"); 5539 5540 return _Module32Next(hSnapshot, lpme); 5541 } 5542 5543 5544 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5545 if (!initialized) { 5546 initialize(); 5547 } 5548 return _GetNativeSystemInfo != NULL; 5549 } 5550 5551 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5552 assert(initialized && _GetNativeSystemInfo != NULL, 5553 "GetNativeSystemInfoAvailable() not yet called"); 5554 5555 _GetNativeSystemInfo(lpSystemInfo); 5556 } 5557 5558 // PSAPI API 5559 5560 5561 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5562 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5563 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5564 5565 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5566 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5567 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5568 BOOL os::PSApiDll::initialized = FALSE; 5569 5570 void os::PSApiDll::initialize() { 5571 if (!initialized) { 5572 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5573 if (handle != NULL) { 5574 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5575 "EnumProcessModules"); 5576 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5577 "GetModuleFileNameExA"); 5578 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5579 "GetModuleInformation"); 5580 } 5581 initialized = TRUE; 5582 } 5583 } 5584 5585 5586 5587 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5588 assert(initialized && _EnumProcessModules != NULL, 5589 "PSApiAvailable() not yet called"); 5590 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5591 } 5592 5593 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5594 assert(initialized && _GetModuleFileNameEx != NULL, 5595 "PSApiAvailable() not yet called"); 5596 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5597 } 5598 5599 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5600 assert(initialized && _GetModuleInformation != NULL, 5601 "PSApiAvailable() not yet called"); 5602 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5603 } 5604 5605 BOOL os::PSApiDll::PSApiAvailable() { 5606 if (!initialized) { 5607 initialize(); 5608 } 5609 return _EnumProcessModules != NULL && 5610 _GetModuleFileNameEx != NULL && 5611 _GetModuleInformation != NULL; 5612 } 5613 5614 5615 // WinSock2 API 5616 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5617 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5618 5619 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5620 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5621 BOOL os::WinSock2Dll::initialized = FALSE; 5622 5623 void os::WinSock2Dll::initialize() { 5624 if (!initialized) { 5625 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5626 if (handle != NULL) { 5627 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5628 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5629 } 5630 initialized = TRUE; 5631 } 5632 } 5633 5634 5635 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5636 assert(initialized && _WSAStartup != NULL, 5637 "WinSock2Available() not yet called"); 5638 return _WSAStartup(wVersionRequested, lpWSAData); 5639 } 5640 5641 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5642 assert(initialized && _gethostbyname != NULL, 5643 "WinSock2Available() not yet called"); 5644 return _gethostbyname(name); 5645 } 5646 5647 BOOL os::WinSock2Dll::WinSock2Available() { 5648 if (!initialized) { 5649 initialize(); 5650 } 5651 return _WSAStartup != NULL && 5652 _gethostbyname != NULL; 5653 } 5654 5655 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5656 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5657 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5658 5659 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5660 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5661 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5662 BOOL os::Advapi32Dll::initialized = FALSE; 5663 5664 void os::Advapi32Dll::initialize() { 5665 if (!initialized) { 5666 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5667 if (handle != NULL) { 5668 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5669 "AdjustTokenPrivileges"); 5670 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5671 "OpenProcessToken"); 5672 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5673 "LookupPrivilegeValueA"); 5674 } 5675 initialized = TRUE; 5676 } 5677 } 5678 5679 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5680 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5681 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5682 assert(initialized && _AdjustTokenPrivileges != NULL, 5683 "AdvapiAvailable() not yet called"); 5684 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5685 BufferLength, PreviousState, ReturnLength); 5686 } 5687 5688 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5689 PHANDLE TokenHandle) { 5690 assert(initialized && _OpenProcessToken != NULL, 5691 "AdvapiAvailable() not yet called"); 5692 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5693 } 5694 5695 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5696 assert(initialized && _LookupPrivilegeValue != NULL, 5697 "AdvapiAvailable() not yet called"); 5698 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5699 } 5700 5701 BOOL os::Advapi32Dll::AdvapiAvailable() { 5702 if (!initialized) { 5703 initialize(); 5704 } 5705 return _AdjustTokenPrivileges != NULL && 5706 _OpenProcessToken != NULL && 5707 _LookupPrivilegeValue != NULL; 5708 } 5709 5710 #endif 5711 5712 #ifndef PRODUCT 5713 5714 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5715 // contiguous memory block at a particular address. 5716 // The test first tries to find a good approximate address to allocate at by using the same 5717 // method to allocate some memory at any address. The test then tries to allocate memory in 5718 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5719 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5720 // the previously allocated memory is available for allocation. The only actual failure 5721 // that is reported is when the test tries to allocate at a particular location but gets a 5722 // different valid one. A NULL return value at this point is not considered an error but may 5723 // be legitimate. 5724 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5725 void TestReserveMemorySpecial_test() { 5726 if (!UseLargePages) { 5727 if (VerboseInternalVMTests) { 5728 gclog_or_tty->print("Skipping test because large pages are disabled"); 5729 } 5730 return; 5731 } 5732 // save current value of globals 5733 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5734 bool old_use_numa_interleaving = UseNUMAInterleaving; 5735 5736 // set globals to make sure we hit the correct code path 5737 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5738 5739 // do an allocation at an address selected by the OS to get a good one. 5740 const size_t large_allocation_size = os::large_page_size() * 4; 5741 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5742 if (result == NULL) { 5743 if (VerboseInternalVMTests) { 5744 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5745 large_allocation_size); 5746 } 5747 } else { 5748 os::release_memory_special(result, large_allocation_size); 5749 5750 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5751 // we managed to get it once. 5752 const size_t expected_allocation_size = os::large_page_size(); 5753 char* expected_location = result + os::large_page_size(); 5754 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5755 if (actual_location == NULL) { 5756 if (VerboseInternalVMTests) { 5757 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5758 expected_location, large_allocation_size); 5759 } 5760 } else { 5761 // release memory 5762 os::release_memory_special(actual_location, expected_allocation_size); 5763 // only now check, after releasing any memory to avoid any leaks. 5764 assert(actual_location == expected_location, 5765 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5766 expected_location, expected_allocation_size, actual_location)); 5767 } 5768 } 5769 5770 // restore globals 5771 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5772 UseNUMAInterleaving = old_use_numa_interleaving; 5773 } 5774 #endif // PRODUCT 5775