1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "mutex_windows.inline.hpp" 42 #include "oops/oop.inline.hpp" 43 #include "os_share_windows.hpp" 44 #include "os_windows.inline.hpp" 45 #include "prims/jniFastGetField.hpp" 46 #include "prims/jvm.h" 47 #include "prims/jvm_misc.hpp" 48 #include "runtime/arguments.hpp" 49 #include "runtime/atomic.inline.hpp" 50 #include "runtime/extendedPC.hpp" 51 #include "runtime/globals.hpp" 52 #include "runtime/interfaceSupport.hpp" 53 #include "runtime/java.hpp" 54 #include "runtime/javaCalls.hpp" 55 #include "runtime/mutexLocker.hpp" 56 #include "runtime/objectMonitor.hpp" 57 #include "runtime/orderAccess.inline.hpp" 58 #include "runtime/osThread.hpp" 59 #include "runtime/perfMemory.hpp" 60 #include "runtime/sharedRuntime.hpp" 61 #include "runtime/statSampler.hpp" 62 #include "runtime/stubRoutines.hpp" 63 #include "runtime/thread.inline.hpp" 64 #include "runtime/threadCritical.hpp" 65 #include "runtime/timer.hpp" 66 #include "runtime/vm_version.hpp" 67 #include "semaphore_windows.hpp" 68 #include "services/attachListener.hpp" 69 #include "services/memTracker.hpp" 70 #include "services/runtimeService.hpp" 71 #include "utilities/decoder.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/events.hpp" 74 #include "utilities/growableArray.hpp" 75 #include "utilities/vmError.hpp" 76 77 #ifdef _DEBUG 78 #include <crtdbg.h> 79 #endif 80 81 82 #include <windows.h> 83 #include <sys/types.h> 84 #include <sys/stat.h> 85 #include <sys/timeb.h> 86 #include <objidl.h> 87 #include <shlobj.h> 88 89 #include <malloc.h> 90 #include <signal.h> 91 #include <direct.h> 92 #include <errno.h> 93 #include <fcntl.h> 94 #include <io.h> 95 #include <process.h> // For _beginthreadex(), _endthreadex() 96 #include <imagehlp.h> // For os::dll_address_to_function_name 97 // for enumerating dll libraries 98 #include <vdmdbg.h> 99 100 // for timer info max values which include all bits 101 #define ALL_64_BITS CONST64(-1) 102 103 // For DLL loading/load error detection 104 // Values of PE COFF 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4 107 108 static HANDLE main_process; 109 static HANDLE main_thread; 110 static int main_thread_id; 111 112 static FILETIME process_creation_time; 113 static FILETIME process_exit_time; 114 static FILETIME process_user_time; 115 static FILETIME process_kernel_time; 116 117 #ifdef _M_IA64 118 #define __CPU__ ia64 119 #else 120 #ifdef _M_AMD64 121 #define __CPU__ amd64 122 #else 123 #define __CPU__ i486 124 #endif 125 #endif 126 127 // save DLL module handle, used by GetModuleFileName 128 129 HINSTANCE vm_lib_handle; 130 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 132 switch (reason) { 133 case DLL_PROCESS_ATTACH: 134 vm_lib_handle = hinst; 135 if (ForceTimeHighResolution) { 136 timeBeginPeriod(1L); 137 } 138 break; 139 case DLL_PROCESS_DETACH: 140 if (ForceTimeHighResolution) { 141 timeEndPeriod(1L); 142 } 143 break; 144 default: 145 break; 146 } 147 return true; 148 } 149 150 static inline double fileTimeAsDouble(FILETIME* time) { 151 const double high = (double) ((unsigned int) ~0); 152 const double split = 10000000.0; 153 double result = (time->dwLowDateTime / split) + 154 time->dwHighDateTime * (high/split); 155 return result; 156 } 157 158 // Implementation of os 159 160 bool os::unsetenv(const char* name) { 161 assert(name != NULL, "Null pointer"); 162 return (SetEnvironmentVariable(name, NULL) == TRUE); 163 } 164 165 // No setuid programs under Windows. 166 bool os::have_special_privileges() { 167 return false; 168 } 169 170 171 // This method is a periodic task to check for misbehaving JNI applications 172 // under CheckJNI, we can add any periodic checks here. 173 // For Windows at the moment does nothing 174 void os::run_periodic_checks() { 175 return; 176 } 177 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 183 void os::init_system_properties_values() { 184 // sysclasspath, java_home, dll_dir 185 { 186 char *home_path; 187 char *dll_path; 188 char *pslash; 189 char *bin = "\\bin"; 190 char home_dir[MAX_PATH + 1]; 191 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 192 193 if (alt_home_dir != NULL) { 194 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 195 home_dir[MAX_PATH] = '\0'; 196 } else { 197 os::jvm_path(home_dir, sizeof(home_dir)); 198 // Found the full path to jvm.dll. 199 // Now cut the path to <java_home>/jre if we can. 200 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 201 pslash = strrchr(home_dir, '\\'); 202 if (pslash != NULL) { 203 *pslash = '\0'; // get rid of \{client|server} 204 pslash = strrchr(home_dir, '\\'); 205 if (pslash != NULL) { 206 *pslash = '\0'; // get rid of \bin 207 } 208 } 209 } 210 211 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 212 if (home_path == NULL) { 213 return; 214 } 215 strcpy(home_path, home_dir); 216 Arguments::set_java_home(home_path); 217 FREE_C_HEAP_ARRAY(char, home_path); 218 219 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 220 mtInternal); 221 if (dll_path == NULL) { 222 return; 223 } 224 strcpy(dll_path, home_dir); 225 strcat(dll_path, bin); 226 Arguments::set_dll_dir(dll_path); 227 FREE_C_HEAP_ARRAY(char, dll_path); 228 229 if (!set_boot_path('\\', ';')) { 230 return; 231 } 232 } 233 234 // library_path 235 #define EXT_DIR "\\lib\\ext" 236 #define BIN_DIR "\\bin" 237 #define PACKAGE_DIR "\\Sun\\Java" 238 { 239 // Win32 library search order (See the documentation for LoadLibrary): 240 // 241 // 1. The directory from which application is loaded. 242 // 2. The system wide Java Extensions directory (Java only) 243 // 3. System directory (GetSystemDirectory) 244 // 4. Windows directory (GetWindowsDirectory) 245 // 5. The PATH environment variable 246 // 6. The current directory 247 248 char *library_path; 249 char tmp[MAX_PATH]; 250 char *path_str = ::getenv("PATH"); 251 252 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 253 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 254 255 library_path[0] = '\0'; 256 257 GetModuleFileName(NULL, tmp, sizeof(tmp)); 258 *(strrchr(tmp, '\\')) = '\0'; 259 strcat(library_path, tmp); 260 261 GetWindowsDirectory(tmp, sizeof(tmp)); 262 strcat(library_path, ";"); 263 strcat(library_path, tmp); 264 strcat(library_path, PACKAGE_DIR BIN_DIR); 265 266 GetSystemDirectory(tmp, sizeof(tmp)); 267 strcat(library_path, ";"); 268 strcat(library_path, tmp); 269 270 GetWindowsDirectory(tmp, sizeof(tmp)); 271 strcat(library_path, ";"); 272 strcat(library_path, tmp); 273 274 if (path_str) { 275 strcat(library_path, ";"); 276 strcat(library_path, path_str); 277 } 278 279 strcat(library_path, ";."); 280 281 Arguments::set_library_path(library_path); 282 FREE_C_HEAP_ARRAY(char, library_path); 283 } 284 285 // Default extensions directory 286 { 287 char path[MAX_PATH]; 288 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 289 GetWindowsDirectory(path, MAX_PATH); 290 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 291 path, PACKAGE_DIR, EXT_DIR); 292 Arguments::set_ext_dirs(buf); 293 } 294 #undef EXT_DIR 295 #undef BIN_DIR 296 #undef PACKAGE_DIR 297 298 #ifndef _WIN64 299 // set our UnhandledExceptionFilter and save any previous one 300 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 301 #endif 302 303 // Done 304 return; 305 } 306 307 void os::breakpoint() { 308 DebugBreak(); 309 } 310 311 // Invoked from the BREAKPOINT Macro 312 extern "C" void breakpoint() { 313 os::breakpoint(); 314 } 315 316 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 317 // So far, this method is only used by Native Memory Tracking, which is 318 // only supported on Windows XP or later. 319 // 320 int os::get_native_stack(address* stack, int frames, int toSkip) { 321 #ifdef _NMT_NOINLINE_ 322 toSkip++; 323 #endif 324 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 325 for (int index = captured; index < frames; index ++) { 326 stack[index] = NULL; 327 } 328 return captured; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) { 350 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 351 if (stack_bottom == (address)minfo.AllocationBase) { 352 stack_size += minfo.RegionSize; 353 } else { 354 break; 355 } 356 } 357 358 #ifdef _M_IA64 359 // IA64 has memory and register stacks 360 // 361 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 362 // at thread creation (1MB backing store growing upwards, 1MB memory stack 363 // growing downwards, 2MB summed up) 364 // 365 // ... 366 // ------- top of stack (high address) ----- 367 // | 368 // | 1MB 369 // | Backing Store (Register Stack) 370 // | 371 // | / \ 372 // | | 373 // | | 374 // | | 375 // ------------------------ stack base ----- 376 // | 1MB 377 // | Memory Stack 378 // | 379 // | | 380 // | | 381 // | | 382 // | \ / 383 // | 384 // ----- bottom of stack (low address) ----- 385 // ... 386 387 stack_size = stack_size / 2; 388 #endif 389 return stack_bottom + stack_size; 390 } 391 392 size_t os::current_stack_size() { 393 size_t sz; 394 MEMORY_BASIC_INFORMATION minfo; 395 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 396 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 397 return sz; 398 } 399 400 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 401 const struct tm* time_struct_ptr = localtime(clock); 402 if (time_struct_ptr != NULL) { 403 *res = *time_struct_ptr; 404 return res; 405 } 406 return NULL; 407 } 408 409 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 410 411 // Thread start routine for all new Java threads 412 static unsigned __stdcall java_start(Thread* thread) { 413 // Try to randomize the cache line index of hot stack frames. 414 // This helps when threads of the same stack traces evict each other's 415 // cache lines. The threads can be either from the same JVM instance, or 416 // from different JVM instances. The benefit is especially true for 417 // processors with hyperthreading technology. 418 static int counter = 0; 419 int pid = os::current_process_id(); 420 _alloca(((pid ^ counter++) & 7) * 128); 421 422 thread->initialize_thread_current(); 423 424 OSThread* osthr = thread->osthread(); 425 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 426 427 if (UseNUMA) { 428 int lgrp_id = os::numa_get_group_id(); 429 if (lgrp_id != -1) { 430 thread->set_lgrp_id(lgrp_id); 431 } 432 } 433 434 // Diagnostic code to investigate JDK-6573254 435 int res = 30115; // non-java thread 436 if (thread->is_Java_thread()) { 437 res = 20115; // java thread 438 } 439 440 log_debug(os)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 441 442 // Install a win32 structured exception handler around every thread created 443 // by VM, so VM can generate error dump when an exception occurred in non- 444 // Java thread (e.g. VM thread). 445 __try { 446 thread->run(); 447 } __except(topLevelExceptionFilter( 448 (_EXCEPTION_POINTERS*)_exception_info())) { 449 // Nothing to do. 450 } 451 452 log_debug(os)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 453 454 // One less thread is executing 455 // When the VMThread gets here, the main thread may have already exited 456 // which frees the CodeHeap containing the Atomic::add code 457 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 458 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 459 } 460 461 // Thread must not return from exit_process_or_thread(), but if it does, 462 // let it proceed to exit normally 463 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 464 } 465 466 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 467 int thread_id) { 468 // Allocate the OSThread object 469 OSThread* osthread = new OSThread(NULL, NULL); 470 if (osthread == NULL) return NULL; 471 472 // Initialize support for Java interrupts 473 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 474 if (interrupt_event == NULL) { 475 delete osthread; 476 return NULL; 477 } 478 osthread->set_interrupt_event(interrupt_event); 479 480 // Store info on the Win32 thread into the OSThread 481 osthread->set_thread_handle(thread_handle); 482 osthread->set_thread_id(thread_id); 483 484 if (UseNUMA) { 485 int lgrp_id = os::numa_get_group_id(); 486 if (lgrp_id != -1) { 487 thread->set_lgrp_id(lgrp_id); 488 } 489 } 490 491 // Initial thread state is INITIALIZED, not SUSPENDED 492 osthread->set_state(INITIALIZED); 493 494 return osthread; 495 } 496 497 498 bool os::create_attached_thread(JavaThread* thread) { 499 #ifdef ASSERT 500 thread->verify_not_published(); 501 #endif 502 HANDLE thread_h; 503 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 504 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 505 fatal("DuplicateHandle failed\n"); 506 } 507 OSThread* osthread = create_os_thread(thread, thread_h, 508 (int)current_thread_id()); 509 if (osthread == NULL) { 510 return false; 511 } 512 513 // Initial thread state is RUNNABLE 514 osthread->set_state(RUNNABLE); 515 516 thread->set_osthread(osthread); 517 518 log_debug(os)("Thread attached (tid: " UINTX_FORMAT ").", 519 os::current_thread_id()); 520 521 return true; 522 } 523 524 bool os::create_main_thread(JavaThread* thread) { 525 #ifdef ASSERT 526 thread->verify_not_published(); 527 #endif 528 if (_starting_thread == NULL) { 529 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 530 if (_starting_thread == NULL) { 531 return false; 532 } 533 } 534 535 // The primordial thread is runnable from the start) 536 _starting_thread->set_state(RUNNABLE); 537 538 thread->set_osthread(_starting_thread); 539 return true; 540 } 541 542 // Allocate and initialize a new OSThread 543 bool os::create_thread(Thread* thread, ThreadType thr_type, 544 size_t stack_size) { 545 unsigned thread_id; 546 547 // Allocate the OSThread object 548 OSThread* osthread = new OSThread(NULL, NULL); 549 if (osthread == NULL) { 550 return false; 551 } 552 553 // Initialize support for Java interrupts 554 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 555 if (interrupt_event == NULL) { 556 delete osthread; 557 return NULL; 558 } 559 osthread->set_interrupt_event(interrupt_event); 560 osthread->set_interrupted(false); 561 562 thread->set_osthread(osthread); 563 564 if (stack_size == 0) { 565 switch (thr_type) { 566 case os::java_thread: 567 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 568 if (JavaThread::stack_size_at_create() > 0) { 569 stack_size = JavaThread::stack_size_at_create(); 570 } 571 break; 572 case os::compiler_thread: 573 if (CompilerThreadStackSize > 0) { 574 stack_size = (size_t)(CompilerThreadStackSize * K); 575 break; 576 } // else fall through: 577 // use VMThreadStackSize if CompilerThreadStackSize is not defined 578 case os::vm_thread: 579 case os::pgc_thread: 580 case os::cgc_thread: 581 case os::watcher_thread: 582 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 583 break; 584 } 585 } 586 587 // Create the Win32 thread 588 // 589 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 590 // does not specify stack size. Instead, it specifies the size of 591 // initially committed space. The stack size is determined by 592 // PE header in the executable. If the committed "stack_size" is larger 593 // than default value in the PE header, the stack is rounded up to the 594 // nearest multiple of 1MB. For example if the launcher has default 595 // stack size of 320k, specifying any size less than 320k does not 596 // affect the actual stack size at all, it only affects the initial 597 // commitment. On the other hand, specifying 'stack_size' larger than 598 // default value may cause significant increase in memory usage, because 599 // not only the stack space will be rounded up to MB, but also the 600 // entire space is committed upfront. 601 // 602 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 603 // for CreateThread() that can treat 'stack_size' as stack size. However we 604 // are not supposed to call CreateThread() directly according to MSDN 605 // document because JVM uses C runtime library. The good news is that the 606 // flag appears to work with _beginthredex() as well. 607 608 HANDLE thread_handle = 609 (HANDLE)_beginthreadex(NULL, 610 (unsigned)stack_size, 611 (unsigned (__stdcall *)(void*)) java_start, 612 thread, 613 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 614 &thread_id); 615 616 if (thread_handle == NULL) { 617 log_warning(os)("Failed to start thread - _beginthreadex failed (%s).", 618 os::errno_name(errno)); 619 620 // Need to clean up stuff we've allocated so far 621 CloseHandle(osthread->interrupt_event()); 622 thread->set_osthread(NULL); 623 delete osthread; 624 return NULL; 625 } else { 626 log_debug(os)("Thread started (tid: %u)", thread_id); 627 } 628 629 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 630 631 // Store info on the Win32 thread into the OSThread 632 osthread->set_thread_handle(thread_handle); 633 osthread->set_thread_id(thread_id); 634 635 // Initial thread state is INITIALIZED, not SUSPENDED 636 osthread->set_state(INITIALIZED); 637 638 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 639 return true; 640 } 641 642 643 // Free Win32 resources related to the OSThread 644 void os::free_thread(OSThread* osthread) { 645 assert(osthread != NULL, "osthread not set"); 646 CloseHandle(osthread->thread_handle()); 647 CloseHandle(osthread->interrupt_event()); 648 delete osthread; 649 } 650 651 static jlong first_filetime; 652 static jlong initial_performance_count; 653 static jlong performance_frequency; 654 655 656 jlong as_long(LARGE_INTEGER x) { 657 jlong result = 0; // initialization to avoid warning 658 set_high(&result, x.HighPart); 659 set_low(&result, x.LowPart); 660 return result; 661 } 662 663 664 jlong os::elapsed_counter() { 665 LARGE_INTEGER count; 666 QueryPerformanceCounter(&count); 667 return as_long(count) - initial_performance_count; 668 } 669 670 671 jlong os::elapsed_frequency() { 672 return performance_frequency; 673 } 674 675 676 julong os::available_memory() { 677 return win32::available_memory(); 678 } 679 680 julong os::win32::available_memory() { 681 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 682 // value if total memory is larger than 4GB 683 MEMORYSTATUSEX ms; 684 ms.dwLength = sizeof(ms); 685 GlobalMemoryStatusEx(&ms); 686 687 return (julong)ms.ullAvailPhys; 688 } 689 690 julong os::physical_memory() { 691 return win32::physical_memory(); 692 } 693 694 bool os::has_allocatable_memory_limit(julong* limit) { 695 MEMORYSTATUSEX ms; 696 ms.dwLength = sizeof(ms); 697 GlobalMemoryStatusEx(&ms); 698 #ifdef _LP64 699 *limit = (julong)ms.ullAvailVirtual; 700 return true; 701 #else 702 // Limit to 1400m because of the 2gb address space wall 703 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 704 return true; 705 #endif 706 } 707 708 int os::active_processor_count() { 709 DWORD_PTR lpProcessAffinityMask = 0; 710 DWORD_PTR lpSystemAffinityMask = 0; 711 int proc_count = processor_count(); 712 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 713 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 714 // Nof active processors is number of bits in process affinity mask 715 int bitcount = 0; 716 while (lpProcessAffinityMask != 0) { 717 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 718 bitcount++; 719 } 720 return bitcount; 721 } else { 722 return proc_count; 723 } 724 } 725 726 void os::set_native_thread_name(const char *name) { 727 728 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 729 // 730 // Note that unfortunately this only works if the process 731 // is already attached to a debugger; debugger must observe 732 // the exception below to show the correct name. 733 734 const DWORD MS_VC_EXCEPTION = 0x406D1388; 735 struct { 736 DWORD dwType; // must be 0x1000 737 LPCSTR szName; // pointer to name (in user addr space) 738 DWORD dwThreadID; // thread ID (-1=caller thread) 739 DWORD dwFlags; // reserved for future use, must be zero 740 } info; 741 742 info.dwType = 0x1000; 743 info.szName = name; 744 info.dwThreadID = -1; 745 info.dwFlags = 0; 746 747 __try { 748 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 749 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 750 } 751 752 bool os::distribute_processes(uint length, uint* distribution) { 753 // Not yet implemented. 754 return false; 755 } 756 757 bool os::bind_to_processor(uint processor_id) { 758 // Not yet implemented. 759 return false; 760 } 761 762 void os::win32::initialize_performance_counter() { 763 LARGE_INTEGER count; 764 QueryPerformanceFrequency(&count); 765 performance_frequency = as_long(count); 766 QueryPerformanceCounter(&count); 767 initial_performance_count = as_long(count); 768 } 769 770 771 double os::elapsedTime() { 772 return (double) elapsed_counter() / (double) elapsed_frequency(); 773 } 774 775 776 // Windows format: 777 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 778 // Java format: 779 // Java standards require the number of milliseconds since 1/1/1970 780 781 // Constant offset - calculated using offset() 782 static jlong _offset = 116444736000000000; 783 // Fake time counter for reproducible results when debugging 784 static jlong fake_time = 0; 785 786 #ifdef ASSERT 787 // Just to be safe, recalculate the offset in debug mode 788 static jlong _calculated_offset = 0; 789 static int _has_calculated_offset = 0; 790 791 jlong offset() { 792 if (_has_calculated_offset) return _calculated_offset; 793 SYSTEMTIME java_origin; 794 java_origin.wYear = 1970; 795 java_origin.wMonth = 1; 796 java_origin.wDayOfWeek = 0; // ignored 797 java_origin.wDay = 1; 798 java_origin.wHour = 0; 799 java_origin.wMinute = 0; 800 java_origin.wSecond = 0; 801 java_origin.wMilliseconds = 0; 802 FILETIME jot; 803 if (!SystemTimeToFileTime(&java_origin, &jot)) { 804 fatal("Error = %d\nWindows error", GetLastError()); 805 } 806 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 807 _has_calculated_offset = 1; 808 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 809 return _calculated_offset; 810 } 811 #else 812 jlong offset() { 813 return _offset; 814 } 815 #endif 816 817 jlong windows_to_java_time(FILETIME wt) { 818 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 819 return (a - offset()) / 10000; 820 } 821 822 // Returns time ticks in (10th of micro seconds) 823 jlong windows_to_time_ticks(FILETIME wt) { 824 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 825 return (a - offset()); 826 } 827 828 FILETIME java_to_windows_time(jlong l) { 829 jlong a = (l * 10000) + offset(); 830 FILETIME result; 831 result.dwHighDateTime = high(a); 832 result.dwLowDateTime = low(a); 833 return result; 834 } 835 836 bool os::supports_vtime() { return true; } 837 bool os::enable_vtime() { return false; } 838 bool os::vtime_enabled() { return false; } 839 840 double os::elapsedVTime() { 841 FILETIME created; 842 FILETIME exited; 843 FILETIME kernel; 844 FILETIME user; 845 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 846 // the resolution of windows_to_java_time() should be sufficient (ms) 847 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 848 } else { 849 return elapsedTime(); 850 } 851 } 852 853 jlong os::javaTimeMillis() { 854 if (UseFakeTimers) { 855 return fake_time++; 856 } else { 857 FILETIME wt; 858 GetSystemTimeAsFileTime(&wt); 859 return windows_to_java_time(wt); 860 } 861 } 862 863 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 864 FILETIME wt; 865 GetSystemTimeAsFileTime(&wt); 866 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 867 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 868 seconds = secs; 869 nanos = jlong(ticks - (secs*10000000)) * 100; 870 } 871 872 jlong os::javaTimeNanos() { 873 LARGE_INTEGER current_count; 874 QueryPerformanceCounter(¤t_count); 875 double current = as_long(current_count); 876 double freq = performance_frequency; 877 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 878 return time; 879 } 880 881 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 882 jlong freq = performance_frequency; 883 if (freq < NANOSECS_PER_SEC) { 884 // the performance counter is 64 bits and we will 885 // be multiplying it -- so no wrap in 64 bits 886 info_ptr->max_value = ALL_64_BITS; 887 } else if (freq > NANOSECS_PER_SEC) { 888 // use the max value the counter can reach to 889 // determine the max value which could be returned 890 julong max_counter = (julong)ALL_64_BITS; 891 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 892 } else { 893 // the performance counter is 64 bits and we will 894 // be using it directly -- so no wrap in 64 bits 895 info_ptr->max_value = ALL_64_BITS; 896 } 897 898 // using a counter, so no skipping 899 info_ptr->may_skip_backward = false; 900 info_ptr->may_skip_forward = false; 901 902 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 903 } 904 905 char* os::local_time_string(char *buf, size_t buflen) { 906 SYSTEMTIME st; 907 GetLocalTime(&st); 908 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 909 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 910 return buf; 911 } 912 913 bool os::getTimesSecs(double* process_real_time, 914 double* process_user_time, 915 double* process_system_time) { 916 HANDLE h_process = GetCurrentProcess(); 917 FILETIME create_time, exit_time, kernel_time, user_time; 918 BOOL result = GetProcessTimes(h_process, 919 &create_time, 920 &exit_time, 921 &kernel_time, 922 &user_time); 923 if (result != 0) { 924 FILETIME wt; 925 GetSystemTimeAsFileTime(&wt); 926 jlong rtc_millis = windows_to_java_time(wt); 927 jlong user_millis = windows_to_java_time(user_time); 928 jlong system_millis = windows_to_java_time(kernel_time); 929 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 930 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 931 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 932 return true; 933 } else { 934 return false; 935 } 936 } 937 938 void os::shutdown() { 939 // allow PerfMemory to attempt cleanup of any persistent resources 940 perfMemory_exit(); 941 942 // flush buffered output, finish log files 943 ostream_abort(); 944 945 // Check for abort hook 946 abort_hook_t abort_hook = Arguments::abort_hook(); 947 if (abort_hook != NULL) { 948 abort_hook(); 949 } 950 } 951 952 953 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 954 PMINIDUMP_EXCEPTION_INFORMATION, 955 PMINIDUMP_USER_STREAM_INFORMATION, 956 PMINIDUMP_CALLBACK_INFORMATION); 957 958 static HANDLE dumpFile = NULL; 959 960 // Check if dump file can be created. 961 void os::check_dump_limit(char* buffer, size_t buffsz) { 962 bool status = true; 963 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 964 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 965 status = false; 966 } 967 968 #ifndef ASSERT 969 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 970 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 971 status = false; 972 } 973 #endif 974 975 if (status) { 976 const char* cwd = get_current_directory(NULL, 0); 977 int pid = current_process_id(); 978 if (cwd != NULL) { 979 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 980 } else { 981 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 982 } 983 984 if (dumpFile == NULL && 985 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 986 == INVALID_HANDLE_VALUE) { 987 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 988 status = false; 989 } 990 } 991 VMError::record_coredump_status(buffer, status); 992 } 993 994 void os::abort(bool dump_core, void* siginfo, const void* context) { 995 HINSTANCE dbghelp; 996 EXCEPTION_POINTERS ep; 997 MINIDUMP_EXCEPTION_INFORMATION mei; 998 MINIDUMP_EXCEPTION_INFORMATION* pmei; 999 1000 HANDLE hProcess = GetCurrentProcess(); 1001 DWORD processId = GetCurrentProcessId(); 1002 MINIDUMP_TYPE dumpType; 1003 1004 shutdown(); 1005 if (!dump_core || dumpFile == NULL) { 1006 if (dumpFile != NULL) { 1007 CloseHandle(dumpFile); 1008 } 1009 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1010 } 1011 1012 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1013 1014 if (dbghelp == NULL) { 1015 jio_fprintf(stderr, "Failed to load dbghelp.dll\n"); 1016 CloseHandle(dumpFile); 1017 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1018 } 1019 1020 _MiniDumpWriteDump = 1021 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1022 PMINIDUMP_EXCEPTION_INFORMATION, 1023 PMINIDUMP_USER_STREAM_INFORMATION, 1024 PMINIDUMP_CALLBACK_INFORMATION), 1025 GetProcAddress(dbghelp, 1026 "MiniDumpWriteDump")); 1027 1028 if (_MiniDumpWriteDump == NULL) { 1029 jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n"); 1030 CloseHandle(dumpFile); 1031 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1032 } 1033 1034 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1035 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1036 1037 if (siginfo != NULL && context != NULL) { 1038 ep.ContextRecord = (PCONTEXT) context; 1039 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1040 1041 mei.ThreadId = GetCurrentThreadId(); 1042 mei.ExceptionPointers = &ep; 1043 pmei = &mei; 1044 } else { 1045 pmei = NULL; 1046 } 1047 1048 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1049 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1050 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1051 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1052 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1053 } 1054 CloseHandle(dumpFile); 1055 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1056 } 1057 1058 // Die immediately, no exit hook, no abort hook, no cleanup. 1059 void os::die() { 1060 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1061 } 1062 1063 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1064 // * dirent_md.c 1.15 00/02/02 1065 // 1066 // The declarations for DIR and struct dirent are in jvm_win32.h. 1067 1068 // Caller must have already run dirname through JVM_NativePath, which removes 1069 // duplicate slashes and converts all instances of '/' into '\\'. 1070 1071 DIR * os::opendir(const char *dirname) { 1072 assert(dirname != NULL, "just checking"); // hotspot change 1073 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1074 DWORD fattr; // hotspot change 1075 char alt_dirname[4] = { 0, 0, 0, 0 }; 1076 1077 if (dirp == 0) { 1078 errno = ENOMEM; 1079 return 0; 1080 } 1081 1082 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1083 // as a directory in FindFirstFile(). We detect this case here and 1084 // prepend the current drive name. 1085 // 1086 if (dirname[1] == '\0' && dirname[0] == '\\') { 1087 alt_dirname[0] = _getdrive() + 'A' - 1; 1088 alt_dirname[1] = ':'; 1089 alt_dirname[2] = '\\'; 1090 alt_dirname[3] = '\0'; 1091 dirname = alt_dirname; 1092 } 1093 1094 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1095 if (dirp->path == 0) { 1096 free(dirp); 1097 errno = ENOMEM; 1098 return 0; 1099 } 1100 strcpy(dirp->path, dirname); 1101 1102 fattr = GetFileAttributes(dirp->path); 1103 if (fattr == 0xffffffff) { 1104 free(dirp->path); 1105 free(dirp); 1106 errno = ENOENT; 1107 return 0; 1108 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1109 free(dirp->path); 1110 free(dirp); 1111 errno = ENOTDIR; 1112 return 0; 1113 } 1114 1115 // Append "*.*", or possibly "\\*.*", to path 1116 if (dirp->path[1] == ':' && 1117 (dirp->path[2] == '\0' || 1118 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1119 // No '\\' needed for cases like "Z:" or "Z:\" 1120 strcat(dirp->path, "*.*"); 1121 } else { 1122 strcat(dirp->path, "\\*.*"); 1123 } 1124 1125 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1126 if (dirp->handle == INVALID_HANDLE_VALUE) { 1127 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1128 free(dirp->path); 1129 free(dirp); 1130 errno = EACCES; 1131 return 0; 1132 } 1133 } 1134 return dirp; 1135 } 1136 1137 // parameter dbuf unused on Windows 1138 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1139 assert(dirp != NULL, "just checking"); // hotspot change 1140 if (dirp->handle == INVALID_HANDLE_VALUE) { 1141 return 0; 1142 } 1143 1144 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1145 1146 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1147 if (GetLastError() == ERROR_INVALID_HANDLE) { 1148 errno = EBADF; 1149 return 0; 1150 } 1151 FindClose(dirp->handle); 1152 dirp->handle = INVALID_HANDLE_VALUE; 1153 } 1154 1155 return &dirp->dirent; 1156 } 1157 1158 int os::closedir(DIR *dirp) { 1159 assert(dirp != NULL, "just checking"); // hotspot change 1160 if (dirp->handle != INVALID_HANDLE_VALUE) { 1161 if (!FindClose(dirp->handle)) { 1162 errno = EBADF; 1163 return -1; 1164 } 1165 dirp->handle = INVALID_HANDLE_VALUE; 1166 } 1167 free(dirp->path); 1168 free(dirp); 1169 return 0; 1170 } 1171 1172 // This must be hard coded because it's the system's temporary 1173 // directory not the java application's temp directory, ala java.io.tmpdir. 1174 const char* os::get_temp_directory() { 1175 static char path_buf[MAX_PATH]; 1176 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1177 return path_buf; 1178 } else { 1179 path_buf[0] = '\0'; 1180 return path_buf; 1181 } 1182 } 1183 1184 static bool file_exists(const char* filename) { 1185 if (filename == NULL || strlen(filename) == 0) { 1186 return false; 1187 } 1188 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1189 } 1190 1191 bool os::dll_build_name(char *buffer, size_t buflen, 1192 const char* pname, const char* fname) { 1193 bool retval = false; 1194 const size_t pnamelen = pname ? strlen(pname) : 0; 1195 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1196 1197 // Return error on buffer overflow. 1198 if (pnamelen + strlen(fname) + 10 > buflen) { 1199 return retval; 1200 } 1201 1202 if (pnamelen == 0) { 1203 jio_snprintf(buffer, buflen, "%s.dll", fname); 1204 retval = true; 1205 } else if (c == ':' || c == '\\') { 1206 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1207 retval = true; 1208 } else if (strchr(pname, *os::path_separator()) != NULL) { 1209 int n; 1210 char** pelements = split_path(pname, &n); 1211 if (pelements == NULL) { 1212 return false; 1213 } 1214 for (int i = 0; i < n; i++) { 1215 char* path = pelements[i]; 1216 // Really shouldn't be NULL, but check can't hurt 1217 size_t plen = (path == NULL) ? 0 : strlen(path); 1218 if (plen == 0) { 1219 continue; // skip the empty path values 1220 } 1221 const char lastchar = path[plen - 1]; 1222 if (lastchar == ':' || lastchar == '\\') { 1223 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1224 } else { 1225 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1226 } 1227 if (file_exists(buffer)) { 1228 retval = true; 1229 break; 1230 } 1231 } 1232 // release the storage 1233 for (int i = 0; i < n; i++) { 1234 if (pelements[i] != NULL) { 1235 FREE_C_HEAP_ARRAY(char, pelements[i]); 1236 } 1237 } 1238 if (pelements != NULL) { 1239 FREE_C_HEAP_ARRAY(char*, pelements); 1240 } 1241 } else { 1242 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1243 retval = true; 1244 } 1245 return retval; 1246 } 1247 1248 // Needs to be in os specific directory because windows requires another 1249 // header file <direct.h> 1250 const char* os::get_current_directory(char *buf, size_t buflen) { 1251 int n = static_cast<int>(buflen); 1252 if (buflen > INT_MAX) n = INT_MAX; 1253 return _getcwd(buf, n); 1254 } 1255 1256 //----------------------------------------------------------- 1257 // Helper functions for fatal error handler 1258 #ifdef _WIN64 1259 // Helper routine which returns true if address in 1260 // within the NTDLL address space. 1261 // 1262 static bool _addr_in_ntdll(address addr) { 1263 HMODULE hmod; 1264 MODULEINFO minfo; 1265 1266 hmod = GetModuleHandle("NTDLL.DLL"); 1267 if (hmod == NULL) return false; 1268 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1269 &minfo, sizeof(MODULEINFO))) { 1270 return false; 1271 } 1272 1273 if ((addr >= minfo.lpBaseOfDll) && 1274 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1275 return true; 1276 } else { 1277 return false; 1278 } 1279 } 1280 #endif 1281 1282 struct _modinfo { 1283 address addr; 1284 char* full_path; // point to a char buffer 1285 int buflen; // size of the buffer 1286 address base_addr; 1287 }; 1288 1289 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1290 address top_address, void * param) { 1291 struct _modinfo *pmod = (struct _modinfo *)param; 1292 if (!pmod) return -1; 1293 1294 if (base_addr <= pmod->addr && 1295 top_address > pmod->addr) { 1296 // if a buffer is provided, copy path name to the buffer 1297 if (pmod->full_path) { 1298 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1299 } 1300 pmod->base_addr = base_addr; 1301 return 1; 1302 } 1303 return 0; 1304 } 1305 1306 bool os::dll_address_to_library_name(address addr, char* buf, 1307 int buflen, int* offset) { 1308 // buf is not optional, but offset is optional 1309 assert(buf != NULL, "sanity check"); 1310 1311 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1312 // return the full path to the DLL file, sometimes it returns path 1313 // to the corresponding PDB file (debug info); sometimes it only 1314 // returns partial path, which makes life painful. 1315 1316 struct _modinfo mi; 1317 mi.addr = addr; 1318 mi.full_path = buf; 1319 mi.buflen = buflen; 1320 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1321 // buf already contains path name 1322 if (offset) *offset = addr - mi.base_addr; 1323 return true; 1324 } 1325 1326 buf[0] = '\0'; 1327 if (offset) *offset = -1; 1328 return false; 1329 } 1330 1331 bool os::dll_address_to_function_name(address addr, char *buf, 1332 int buflen, int *offset, 1333 bool demangle) { 1334 // buf is not optional, but offset is optional 1335 assert(buf != NULL, "sanity check"); 1336 1337 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1338 return true; 1339 } 1340 if (offset != NULL) *offset = -1; 1341 buf[0] = '\0'; 1342 return false; 1343 } 1344 1345 // save the start and end address of jvm.dll into param[0] and param[1] 1346 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1347 address top_address, void * param) { 1348 if (!param) return -1; 1349 1350 if (base_addr <= (address)_locate_jvm_dll && 1351 top_address > (address)_locate_jvm_dll) { 1352 ((address*)param)[0] = base_addr; 1353 ((address*)param)[1] = top_address; 1354 return 1; 1355 } 1356 return 0; 1357 } 1358 1359 address vm_lib_location[2]; // start and end address of jvm.dll 1360 1361 // check if addr is inside jvm.dll 1362 bool os::address_is_in_vm(address addr) { 1363 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1364 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1365 assert(false, "Can't find jvm module."); 1366 return false; 1367 } 1368 } 1369 1370 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1371 } 1372 1373 // print module info; param is outputStream* 1374 static int _print_module(const char* fname, address base_address, 1375 address top_address, void* param) { 1376 if (!param) return -1; 1377 1378 outputStream* st = (outputStream*)param; 1379 1380 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1381 return 0; 1382 } 1383 1384 // Loads .dll/.so and 1385 // in case of error it checks if .dll/.so was built for the 1386 // same architecture as Hotspot is running on 1387 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1388 void * result = LoadLibrary(name); 1389 if (result != NULL) { 1390 return result; 1391 } 1392 1393 DWORD errcode = GetLastError(); 1394 if (errcode == ERROR_MOD_NOT_FOUND) { 1395 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1396 ebuf[ebuflen - 1] = '\0'; 1397 return NULL; 1398 } 1399 1400 // Parsing dll below 1401 // If we can read dll-info and find that dll was built 1402 // for an architecture other than Hotspot is running in 1403 // - then print to buffer "DLL was built for a different architecture" 1404 // else call os::lasterror to obtain system error message 1405 1406 // Read system error message into ebuf 1407 // It may or may not be overwritten below (in the for loop and just above) 1408 lasterror(ebuf, (size_t) ebuflen); 1409 ebuf[ebuflen - 1] = '\0'; 1410 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1411 if (fd < 0) { 1412 return NULL; 1413 } 1414 1415 uint32_t signature_offset; 1416 uint16_t lib_arch = 0; 1417 bool failed_to_get_lib_arch = 1418 ( // Go to position 3c in the dll 1419 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1420 || 1421 // Read location of signature 1422 (sizeof(signature_offset) != 1423 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1424 || 1425 // Go to COFF File Header in dll 1426 // that is located after "signature" (4 bytes long) 1427 (os::seek_to_file_offset(fd, 1428 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1429 || 1430 // Read field that contains code of architecture 1431 // that dll was built for 1432 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1433 ); 1434 1435 ::close(fd); 1436 if (failed_to_get_lib_arch) { 1437 // file i/o error - report os::lasterror(...) msg 1438 return NULL; 1439 } 1440 1441 typedef struct { 1442 uint16_t arch_code; 1443 char* arch_name; 1444 } arch_t; 1445 1446 static const arch_t arch_array[] = { 1447 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1448 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1449 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1450 }; 1451 #if (defined _M_IA64) 1452 static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64; 1453 #elif (defined _M_AMD64) 1454 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1455 #elif (defined _M_IX86) 1456 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1457 #else 1458 #error Method os::dll_load requires that one of following \ 1459 is defined :_M_IA64,_M_AMD64 or _M_IX86 1460 #endif 1461 1462 1463 // Obtain a string for printf operation 1464 // lib_arch_str shall contain string what platform this .dll was built for 1465 // running_arch_str shall string contain what platform Hotspot was built for 1466 char *running_arch_str = NULL, *lib_arch_str = NULL; 1467 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1468 if (lib_arch == arch_array[i].arch_code) { 1469 lib_arch_str = arch_array[i].arch_name; 1470 } 1471 if (running_arch == arch_array[i].arch_code) { 1472 running_arch_str = arch_array[i].arch_name; 1473 } 1474 } 1475 1476 assert(running_arch_str, 1477 "Didn't find running architecture code in arch_array"); 1478 1479 // If the architecture is right 1480 // but some other error took place - report os::lasterror(...) msg 1481 if (lib_arch == running_arch) { 1482 return NULL; 1483 } 1484 1485 if (lib_arch_str != NULL) { 1486 ::_snprintf(ebuf, ebuflen - 1, 1487 "Can't load %s-bit .dll on a %s-bit platform", 1488 lib_arch_str, running_arch_str); 1489 } else { 1490 // don't know what architecture this dll was build for 1491 ::_snprintf(ebuf, ebuflen - 1, 1492 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1493 lib_arch, running_arch_str); 1494 } 1495 1496 return NULL; 1497 } 1498 1499 void os::print_dll_info(outputStream *st) { 1500 st->print_cr("Dynamic libraries:"); 1501 get_loaded_modules_info(_print_module, (void *)st); 1502 } 1503 1504 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1505 HANDLE hProcess; 1506 1507 # define MAX_NUM_MODULES 128 1508 HMODULE modules[MAX_NUM_MODULES]; 1509 static char filename[MAX_PATH]; 1510 int result = 0; 1511 1512 int pid = os::current_process_id(); 1513 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1514 FALSE, pid); 1515 if (hProcess == NULL) return 0; 1516 1517 DWORD size_needed; 1518 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1519 CloseHandle(hProcess); 1520 return 0; 1521 } 1522 1523 // number of modules that are currently loaded 1524 int num_modules = size_needed / sizeof(HMODULE); 1525 1526 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1527 // Get Full pathname: 1528 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1529 filename[0] = '\0'; 1530 } 1531 1532 MODULEINFO modinfo; 1533 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1534 modinfo.lpBaseOfDll = NULL; 1535 modinfo.SizeOfImage = 0; 1536 } 1537 1538 // Invoke callback function 1539 result = callback(filename, (address)modinfo.lpBaseOfDll, 1540 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1541 if (result) break; 1542 } 1543 1544 CloseHandle(hProcess); 1545 return result; 1546 } 1547 1548 bool os::get_host_name(char* buf, size_t buflen) { 1549 DWORD size = (DWORD)buflen; 1550 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1551 } 1552 1553 void os::get_summary_os_info(char* buf, size_t buflen) { 1554 stringStream sst(buf, buflen); 1555 os::win32::print_windows_version(&sst); 1556 // chop off newline character 1557 char* nl = strchr(buf, '\n'); 1558 if (nl != NULL) *nl = '\0'; 1559 } 1560 1561 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1562 int ret = vsnprintf(buf, len, fmt, args); 1563 // Get the correct buffer size if buf is too small 1564 if (ret < 0) { 1565 return _vscprintf(fmt, args); 1566 } 1567 return ret; 1568 } 1569 1570 void os::print_os_info_brief(outputStream* st) { 1571 os::print_os_info(st); 1572 } 1573 1574 void os::print_os_info(outputStream* st) { 1575 #ifdef ASSERT 1576 char buffer[1024]; 1577 st->print("HostName: "); 1578 if (get_host_name(buffer, sizeof(buffer))) { 1579 st->print("%s ", buffer); 1580 } else { 1581 st->print("N/A "); 1582 } 1583 #endif 1584 st->print("OS:"); 1585 os::win32::print_windows_version(st); 1586 } 1587 1588 void os::win32::print_windows_version(outputStream* st) { 1589 OSVERSIONINFOEX osvi; 1590 VS_FIXEDFILEINFO *file_info; 1591 TCHAR kernel32_path[MAX_PATH]; 1592 UINT len, ret; 1593 1594 // Use the GetVersionEx information to see if we're on a server or 1595 // workstation edition of Windows. Starting with Windows 8.1 we can't 1596 // trust the OS version information returned by this API. 1597 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1598 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1599 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1600 st->print_cr("Call to GetVersionEx failed"); 1601 return; 1602 } 1603 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1604 1605 // Get the full path to \Windows\System32\kernel32.dll and use that for 1606 // determining what version of Windows we're running on. 1607 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1608 ret = GetSystemDirectory(kernel32_path, len); 1609 if (ret == 0 || ret > len) { 1610 st->print_cr("Call to GetSystemDirectory failed"); 1611 return; 1612 } 1613 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1614 1615 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1616 if (version_size == 0) { 1617 st->print_cr("Call to GetFileVersionInfoSize failed"); 1618 return; 1619 } 1620 1621 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1622 if (version_info == NULL) { 1623 st->print_cr("Failed to allocate version_info"); 1624 return; 1625 } 1626 1627 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1628 os::free(version_info); 1629 st->print_cr("Call to GetFileVersionInfo failed"); 1630 return; 1631 } 1632 1633 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1634 os::free(version_info); 1635 st->print_cr("Call to VerQueryValue failed"); 1636 return; 1637 } 1638 1639 int major_version = HIWORD(file_info->dwProductVersionMS); 1640 int minor_version = LOWORD(file_info->dwProductVersionMS); 1641 int build_number = HIWORD(file_info->dwProductVersionLS); 1642 int build_minor = LOWORD(file_info->dwProductVersionLS); 1643 int os_vers = major_version * 1000 + minor_version; 1644 os::free(version_info); 1645 1646 st->print(" Windows "); 1647 switch (os_vers) { 1648 1649 case 6000: 1650 if (is_workstation) { 1651 st->print("Vista"); 1652 } else { 1653 st->print("Server 2008"); 1654 } 1655 break; 1656 1657 case 6001: 1658 if (is_workstation) { 1659 st->print("7"); 1660 } else { 1661 st->print("Server 2008 R2"); 1662 } 1663 break; 1664 1665 case 6002: 1666 if (is_workstation) { 1667 st->print("8"); 1668 } else { 1669 st->print("Server 2012"); 1670 } 1671 break; 1672 1673 case 6003: 1674 if (is_workstation) { 1675 st->print("8.1"); 1676 } else { 1677 st->print("Server 2012 R2"); 1678 } 1679 break; 1680 1681 case 10000: 1682 if (is_workstation) { 1683 st->print("10"); 1684 } else { 1685 // The server version name of Windows 10 is not known at this time 1686 st->print("%d.%d", major_version, minor_version); 1687 } 1688 break; 1689 1690 default: 1691 // Unrecognized windows, print out its major and minor versions 1692 st->print("%d.%d", major_version, minor_version); 1693 break; 1694 } 1695 1696 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1697 // find out whether we are running on 64 bit processor or not 1698 SYSTEM_INFO si; 1699 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1700 GetNativeSystemInfo(&si); 1701 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1702 st->print(" , 64 bit"); 1703 } 1704 1705 st->print(" Build %d", build_number); 1706 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1707 st->cr(); 1708 } 1709 1710 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1711 // Nothing to do for now. 1712 } 1713 1714 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1715 HKEY key; 1716 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1717 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1718 if (status == ERROR_SUCCESS) { 1719 DWORD size = (DWORD)buflen; 1720 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1721 if (status != ERROR_SUCCESS) { 1722 strncpy(buf, "## __CPU__", buflen); 1723 } 1724 RegCloseKey(key); 1725 } else { 1726 // Put generic cpu info to return 1727 strncpy(buf, "## __CPU__", buflen); 1728 } 1729 } 1730 1731 void os::print_memory_info(outputStream* st) { 1732 st->print("Memory:"); 1733 st->print(" %dk page", os::vm_page_size()>>10); 1734 1735 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1736 // value if total memory is larger than 4GB 1737 MEMORYSTATUSEX ms; 1738 ms.dwLength = sizeof(ms); 1739 GlobalMemoryStatusEx(&ms); 1740 1741 st->print(", physical %uk", os::physical_memory() >> 10); 1742 st->print("(%uk free)", os::available_memory() >> 10); 1743 1744 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1745 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1746 st->cr(); 1747 } 1748 1749 void os::print_siginfo(outputStream *st, const void* siginfo) { 1750 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1751 st->print("siginfo:"); 1752 1753 char tmp[64]; 1754 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1755 strcpy(tmp, "EXCEPTION_??"); 1756 } 1757 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1758 1759 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1760 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1761 er->NumberParameters >= 2) { 1762 switch (er->ExceptionInformation[0]) { 1763 case 0: st->print(", reading address"); break; 1764 case 1: st->print(", writing address"); break; 1765 case 8: st->print(", data execution prevention violation at address"); break; 1766 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1767 er->ExceptionInformation[0]); 1768 } 1769 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1770 } else { 1771 int num = er->NumberParameters; 1772 if (num > 0) { 1773 st->print(", ExceptionInformation="); 1774 for (int i = 0; i < num; i++) { 1775 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1776 } 1777 } 1778 } 1779 st->cr(); 1780 } 1781 1782 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1783 // do nothing 1784 } 1785 1786 static char saved_jvm_path[MAX_PATH] = {0}; 1787 1788 // Find the full path to the current module, jvm.dll 1789 void os::jvm_path(char *buf, jint buflen) { 1790 // Error checking. 1791 if (buflen < MAX_PATH) { 1792 assert(false, "must use a large-enough buffer"); 1793 buf[0] = '\0'; 1794 return; 1795 } 1796 // Lazy resolve the path to current module. 1797 if (saved_jvm_path[0] != 0) { 1798 strcpy(buf, saved_jvm_path); 1799 return; 1800 } 1801 1802 buf[0] = '\0'; 1803 if (Arguments::sun_java_launcher_is_altjvm()) { 1804 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1805 // for a JAVA_HOME environment variable and fix up the path so it 1806 // looks like jvm.dll is installed there (append a fake suffix 1807 // hotspot/jvm.dll). 1808 char* java_home_var = ::getenv("JAVA_HOME"); 1809 if (java_home_var != NULL && java_home_var[0] != 0 && 1810 strlen(java_home_var) < (size_t)buflen) { 1811 strncpy(buf, java_home_var, buflen); 1812 1813 // determine if this is a legacy image or modules image 1814 // modules image doesn't have "jre" subdirectory 1815 size_t len = strlen(buf); 1816 char* jrebin_p = buf + len; 1817 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1818 if (0 != _access(buf, 0)) { 1819 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1820 } 1821 len = strlen(buf); 1822 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1823 } 1824 } 1825 1826 if (buf[0] == '\0') { 1827 GetModuleFileName(vm_lib_handle, buf, buflen); 1828 } 1829 strncpy(saved_jvm_path, buf, MAX_PATH); 1830 saved_jvm_path[MAX_PATH - 1] = '\0'; 1831 } 1832 1833 1834 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1835 #ifndef _WIN64 1836 st->print("_"); 1837 #endif 1838 } 1839 1840 1841 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1842 #ifndef _WIN64 1843 st->print("@%d", args_size * sizeof(int)); 1844 #endif 1845 } 1846 1847 // This method is a copy of JDK's sysGetLastErrorString 1848 // from src/windows/hpi/src/system_md.c 1849 1850 size_t os::lasterror(char* buf, size_t len) { 1851 DWORD errval; 1852 1853 if ((errval = GetLastError()) != 0) { 1854 // DOS error 1855 size_t n = (size_t)FormatMessage( 1856 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1857 NULL, 1858 errval, 1859 0, 1860 buf, 1861 (DWORD)len, 1862 NULL); 1863 if (n > 3) { 1864 // Drop final '.', CR, LF 1865 if (buf[n - 1] == '\n') n--; 1866 if (buf[n - 1] == '\r') n--; 1867 if (buf[n - 1] == '.') n--; 1868 buf[n] = '\0'; 1869 } 1870 return n; 1871 } 1872 1873 if (errno != 0) { 1874 // C runtime error that has no corresponding DOS error code 1875 const char* s = strerror(errno); 1876 size_t n = strlen(s); 1877 if (n >= len) n = len - 1; 1878 strncpy(buf, s, n); 1879 buf[n] = '\0'; 1880 return n; 1881 } 1882 1883 return 0; 1884 } 1885 1886 int os::get_last_error() { 1887 DWORD error = GetLastError(); 1888 if (error == 0) { 1889 error = errno; 1890 } 1891 return (int)error; 1892 } 1893 1894 WindowsSemaphore::WindowsSemaphore(uint value) { 1895 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1896 1897 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1898 } 1899 1900 WindowsSemaphore::~WindowsSemaphore() { 1901 ::CloseHandle(_semaphore); 1902 } 1903 1904 void WindowsSemaphore::signal(uint count) { 1905 if (count > 0) { 1906 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1907 1908 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1909 } 1910 } 1911 1912 void WindowsSemaphore::wait() { 1913 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1914 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1915 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1916 } 1917 1918 // sun.misc.Signal 1919 // NOTE that this is a workaround for an apparent kernel bug where if 1920 // a signal handler for SIGBREAK is installed then that signal handler 1921 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1922 // See bug 4416763. 1923 static void (*sigbreakHandler)(int) = NULL; 1924 1925 static void UserHandler(int sig, void *siginfo, void *context) { 1926 os::signal_notify(sig); 1927 // We need to reinstate the signal handler each time... 1928 os::signal(sig, (void*)UserHandler); 1929 } 1930 1931 void* os::user_handler() { 1932 return (void*) UserHandler; 1933 } 1934 1935 void* os::signal(int signal_number, void* handler) { 1936 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1937 void (*oldHandler)(int) = sigbreakHandler; 1938 sigbreakHandler = (void (*)(int)) handler; 1939 return (void*) oldHandler; 1940 } else { 1941 return (void*)::signal(signal_number, (void (*)(int))handler); 1942 } 1943 } 1944 1945 void os::signal_raise(int signal_number) { 1946 raise(signal_number); 1947 } 1948 1949 // The Win32 C runtime library maps all console control events other than ^C 1950 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1951 // logoff, and shutdown events. We therefore install our own console handler 1952 // that raises SIGTERM for the latter cases. 1953 // 1954 static BOOL WINAPI consoleHandler(DWORD event) { 1955 switch (event) { 1956 case CTRL_C_EVENT: 1957 if (is_error_reported()) { 1958 // Ctrl-C is pressed during error reporting, likely because the error 1959 // handler fails to abort. Let VM die immediately. 1960 os::die(); 1961 } 1962 1963 os::signal_raise(SIGINT); 1964 return TRUE; 1965 break; 1966 case CTRL_BREAK_EVENT: 1967 if (sigbreakHandler != NULL) { 1968 (*sigbreakHandler)(SIGBREAK); 1969 } 1970 return TRUE; 1971 break; 1972 case CTRL_LOGOFF_EVENT: { 1973 // Don't terminate JVM if it is running in a non-interactive session, 1974 // such as a service process. 1975 USEROBJECTFLAGS flags; 1976 HANDLE handle = GetProcessWindowStation(); 1977 if (handle != NULL && 1978 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1979 sizeof(USEROBJECTFLAGS), NULL)) { 1980 // If it is a non-interactive session, let next handler to deal 1981 // with it. 1982 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1983 return FALSE; 1984 } 1985 } 1986 } 1987 case CTRL_CLOSE_EVENT: 1988 case CTRL_SHUTDOWN_EVENT: 1989 os::signal_raise(SIGTERM); 1990 return TRUE; 1991 break; 1992 default: 1993 break; 1994 } 1995 return FALSE; 1996 } 1997 1998 // The following code is moved from os.cpp for making this 1999 // code platform specific, which it is by its very nature. 2000 2001 // Return maximum OS signal used + 1 for internal use only 2002 // Used as exit signal for signal_thread 2003 int os::sigexitnum_pd() { 2004 return NSIG; 2005 } 2006 2007 // a counter for each possible signal value, including signal_thread exit signal 2008 static volatile jint pending_signals[NSIG+1] = { 0 }; 2009 static HANDLE sig_sem = NULL; 2010 2011 void os::signal_init_pd() { 2012 // Initialize signal structures 2013 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2014 2015 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2016 2017 // Programs embedding the VM do not want it to attempt to receive 2018 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2019 // shutdown hooks mechanism introduced in 1.3. For example, when 2020 // the VM is run as part of a Windows NT service (i.e., a servlet 2021 // engine in a web server), the correct behavior is for any console 2022 // control handler to return FALSE, not TRUE, because the OS's 2023 // "final" handler for such events allows the process to continue if 2024 // it is a service (while terminating it if it is not a service). 2025 // To make this behavior uniform and the mechanism simpler, we 2026 // completely disable the VM's usage of these console events if -Xrs 2027 // (=ReduceSignalUsage) is specified. This means, for example, that 2028 // the CTRL-BREAK thread dump mechanism is also disabled in this 2029 // case. See bugs 4323062, 4345157, and related bugs. 2030 2031 if (!ReduceSignalUsage) { 2032 // Add a CTRL-C handler 2033 SetConsoleCtrlHandler(consoleHandler, TRUE); 2034 } 2035 } 2036 2037 void os::signal_notify(int signal_number) { 2038 BOOL ret; 2039 if (sig_sem != NULL) { 2040 Atomic::inc(&pending_signals[signal_number]); 2041 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2042 assert(ret != 0, "ReleaseSemaphore() failed"); 2043 } 2044 } 2045 2046 static int check_pending_signals(bool wait_for_signal) { 2047 DWORD ret; 2048 while (true) { 2049 for (int i = 0; i < NSIG + 1; i++) { 2050 jint n = pending_signals[i]; 2051 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2052 return i; 2053 } 2054 } 2055 if (!wait_for_signal) { 2056 return -1; 2057 } 2058 2059 JavaThread *thread = JavaThread::current(); 2060 2061 ThreadBlockInVM tbivm(thread); 2062 2063 bool threadIsSuspended; 2064 do { 2065 thread->set_suspend_equivalent(); 2066 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2067 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2068 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2069 2070 // were we externally suspended while we were waiting? 2071 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2072 if (threadIsSuspended) { 2073 // The semaphore has been incremented, but while we were waiting 2074 // another thread suspended us. We don't want to continue running 2075 // while suspended because that would surprise the thread that 2076 // suspended us. 2077 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2078 assert(ret != 0, "ReleaseSemaphore() failed"); 2079 2080 thread->java_suspend_self(); 2081 } 2082 } while (threadIsSuspended); 2083 } 2084 } 2085 2086 int os::signal_lookup() { 2087 return check_pending_signals(false); 2088 } 2089 2090 int os::signal_wait() { 2091 return check_pending_signals(true); 2092 } 2093 2094 // Implicit OS exception handling 2095 2096 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2097 address handler) { 2098 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2099 // Save pc in thread 2100 #ifdef _M_IA64 2101 // Do not blow up if no thread info available. 2102 if (thread) { 2103 // Saving PRECISE pc (with slot information) in thread. 2104 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2105 // Convert precise PC into "Unix" format 2106 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2107 thread->set_saved_exception_pc((address)precise_pc); 2108 } 2109 // Set pc to handler 2110 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2111 // Clear out psr.ri (= Restart Instruction) in order to continue 2112 // at the beginning of the target bundle. 2113 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2114 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2115 #else 2116 #ifdef _M_AMD64 2117 // Do not blow up if no thread info available. 2118 if (thread) { 2119 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2120 } 2121 // Set pc to handler 2122 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2123 #else 2124 // Do not blow up if no thread info available. 2125 if (thread) { 2126 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2127 } 2128 // Set pc to handler 2129 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2130 #endif 2131 #endif 2132 2133 // Continue the execution 2134 return EXCEPTION_CONTINUE_EXECUTION; 2135 } 2136 2137 2138 // Used for PostMortemDump 2139 extern "C" void safepoints(); 2140 extern "C" void find(int x); 2141 extern "C" void events(); 2142 2143 // According to Windows API documentation, an illegal instruction sequence should generate 2144 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2145 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2146 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2147 2148 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2149 2150 // From "Execution Protection in the Windows Operating System" draft 0.35 2151 // Once a system header becomes available, the "real" define should be 2152 // included or copied here. 2153 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2154 2155 // Handle NAT Bit consumption on IA64. 2156 #ifdef _M_IA64 2157 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2158 #endif 2159 2160 // Windows Vista/2008 heap corruption check 2161 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2162 2163 #define def_excpt(val) #val, val 2164 2165 struct siglabel { 2166 char *name; 2167 int number; 2168 }; 2169 2170 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2171 // C++ compiler contain this error code. Because this is a compiler-generated 2172 // error, the code is not listed in the Win32 API header files. 2173 // The code is actually a cryptic mnemonic device, with the initial "E" 2174 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2175 // ASCII values of "msc". 2176 2177 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2178 2179 2180 struct siglabel exceptlabels[] = { 2181 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2182 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2183 def_excpt(EXCEPTION_BREAKPOINT), 2184 def_excpt(EXCEPTION_SINGLE_STEP), 2185 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2186 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2187 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2188 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2189 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2190 def_excpt(EXCEPTION_FLT_OVERFLOW), 2191 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2192 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2193 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2194 def_excpt(EXCEPTION_INT_OVERFLOW), 2195 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2196 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2197 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2198 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2199 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2200 def_excpt(EXCEPTION_STACK_OVERFLOW), 2201 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2202 def_excpt(EXCEPTION_GUARD_PAGE), 2203 def_excpt(EXCEPTION_INVALID_HANDLE), 2204 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2205 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2206 #ifdef _M_IA64 2207 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2208 #endif 2209 NULL, 0 2210 }; 2211 2212 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2213 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2214 if (exceptlabels[i].number == exception_code) { 2215 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2216 return buf; 2217 } 2218 } 2219 2220 return NULL; 2221 } 2222 2223 //----------------------------------------------------------------------------- 2224 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2225 // handle exception caused by idiv; should only happen for -MinInt/-1 2226 // (division by zero is handled explicitly) 2227 #ifdef _M_IA64 2228 assert(0, "Fix Handle_IDiv_Exception"); 2229 #else 2230 #ifdef _M_AMD64 2231 PCONTEXT ctx = exceptionInfo->ContextRecord; 2232 address pc = (address)ctx->Rip; 2233 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2234 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2235 if (pc[0] == 0xF7) { 2236 // set correct result values and continue after idiv instruction 2237 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2238 } else { 2239 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2240 } 2241 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2242 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2243 // idiv opcode (0xF7). 2244 ctx->Rdx = (DWORD)0; // remainder 2245 // Continue the execution 2246 #else 2247 PCONTEXT ctx = exceptionInfo->ContextRecord; 2248 address pc = (address)ctx->Eip; 2249 assert(pc[0] == 0xF7, "not an idiv opcode"); 2250 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2251 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2252 // set correct result values and continue after idiv instruction 2253 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2254 ctx->Eax = (DWORD)min_jint; // result 2255 ctx->Edx = (DWORD)0; // remainder 2256 // Continue the execution 2257 #endif 2258 #endif 2259 return EXCEPTION_CONTINUE_EXECUTION; 2260 } 2261 2262 //----------------------------------------------------------------------------- 2263 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2264 PCONTEXT ctx = exceptionInfo->ContextRecord; 2265 #ifndef _WIN64 2266 // handle exception caused by native method modifying control word 2267 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2268 2269 switch (exception_code) { 2270 case EXCEPTION_FLT_DENORMAL_OPERAND: 2271 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2272 case EXCEPTION_FLT_INEXACT_RESULT: 2273 case EXCEPTION_FLT_INVALID_OPERATION: 2274 case EXCEPTION_FLT_OVERFLOW: 2275 case EXCEPTION_FLT_STACK_CHECK: 2276 case EXCEPTION_FLT_UNDERFLOW: 2277 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2278 if (fp_control_word != ctx->FloatSave.ControlWord) { 2279 // Restore FPCW and mask out FLT exceptions 2280 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2281 // Mask out pending FLT exceptions 2282 ctx->FloatSave.StatusWord &= 0xffffff00; 2283 return EXCEPTION_CONTINUE_EXECUTION; 2284 } 2285 } 2286 2287 if (prev_uef_handler != NULL) { 2288 // We didn't handle this exception so pass it to the previous 2289 // UnhandledExceptionFilter. 2290 return (prev_uef_handler)(exceptionInfo); 2291 } 2292 #else // !_WIN64 2293 // On Windows, the mxcsr control bits are non-volatile across calls 2294 // See also CR 6192333 2295 // 2296 jint MxCsr = INITIAL_MXCSR; 2297 // we can't use StubRoutines::addr_mxcsr_std() 2298 // because in Win64 mxcsr is not saved there 2299 if (MxCsr != ctx->MxCsr) { 2300 ctx->MxCsr = MxCsr; 2301 return EXCEPTION_CONTINUE_EXECUTION; 2302 } 2303 #endif // !_WIN64 2304 2305 return EXCEPTION_CONTINUE_SEARCH; 2306 } 2307 2308 static inline void report_error(Thread* t, DWORD exception_code, 2309 address addr, void* siginfo, void* context) { 2310 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2311 2312 // If UseOsErrorReporting, this will return here and save the error file 2313 // somewhere where we can find it in the minidump. 2314 } 2315 2316 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2317 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2318 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2319 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2320 if (Interpreter::contains(pc)) { 2321 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2322 if (!fr->is_first_java_frame()) { 2323 assert(fr->safe_for_sender(thread), "Safety check"); 2324 *fr = fr->java_sender(); 2325 } 2326 } else { 2327 // more complex code with compiled code 2328 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2329 CodeBlob* cb = CodeCache::find_blob(pc); 2330 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2331 // Not sure where the pc points to, fallback to default 2332 // stack overflow handling 2333 return false; 2334 } else { 2335 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2336 // in compiled code, the stack banging is performed just after the return pc 2337 // has been pushed on the stack 2338 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2339 if (!fr->is_java_frame()) { 2340 assert(fr->safe_for_sender(thread), "Safety check"); 2341 *fr = fr->java_sender(); 2342 } 2343 } 2344 } 2345 assert(fr->is_java_frame(), "Safety check"); 2346 return true; 2347 } 2348 2349 //----------------------------------------------------------------------------- 2350 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2351 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2352 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2353 #ifdef _M_IA64 2354 // On Itanium, we need the "precise pc", which has the slot number coded 2355 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2356 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2357 // Convert the pc to "Unix format", which has the slot number coded 2358 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2359 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2360 // information is saved in the Unix format. 2361 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2362 #else 2363 #ifdef _M_AMD64 2364 address pc = (address) exceptionInfo->ContextRecord->Rip; 2365 #else 2366 address pc = (address) exceptionInfo->ContextRecord->Eip; 2367 #endif 2368 #endif 2369 Thread* t = Thread::current_or_null_safe(); 2370 2371 // Handle SafeFetch32 and SafeFetchN exceptions. 2372 if (StubRoutines::is_safefetch_fault(pc)) { 2373 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2374 } 2375 2376 #ifndef _WIN64 2377 // Execution protection violation - win32 running on AMD64 only 2378 // Handled first to avoid misdiagnosis as a "normal" access violation; 2379 // This is safe to do because we have a new/unique ExceptionInformation 2380 // code for this condition. 2381 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2382 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2383 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2384 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2385 2386 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2387 int page_size = os::vm_page_size(); 2388 2389 // Make sure the pc and the faulting address are sane. 2390 // 2391 // If an instruction spans a page boundary, and the page containing 2392 // the beginning of the instruction is executable but the following 2393 // page is not, the pc and the faulting address might be slightly 2394 // different - we still want to unguard the 2nd page in this case. 2395 // 2396 // 15 bytes seems to be a (very) safe value for max instruction size. 2397 bool pc_is_near_addr = 2398 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2399 bool instr_spans_page_boundary = 2400 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2401 (intptr_t) page_size) > 0); 2402 2403 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2404 static volatile address last_addr = 2405 (address) os::non_memory_address_word(); 2406 2407 // In conservative mode, don't unguard unless the address is in the VM 2408 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2409 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2410 2411 // Set memory to RWX and retry 2412 address page_start = 2413 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2414 bool res = os::protect_memory((char*) page_start, page_size, 2415 os::MEM_PROT_RWX); 2416 2417 if (PrintMiscellaneous && Verbose) { 2418 char buf[256]; 2419 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2420 "at " INTPTR_FORMAT 2421 ", unguarding " INTPTR_FORMAT ": %s", addr, 2422 page_start, (res ? "success" : strerror(errno))); 2423 tty->print_raw_cr(buf); 2424 } 2425 2426 // Set last_addr so if we fault again at the same address, we don't 2427 // end up in an endless loop. 2428 // 2429 // There are two potential complications here. Two threads trapping 2430 // at the same address at the same time could cause one of the 2431 // threads to think it already unguarded, and abort the VM. Likely 2432 // very rare. 2433 // 2434 // The other race involves two threads alternately trapping at 2435 // different addresses and failing to unguard the page, resulting in 2436 // an endless loop. This condition is probably even more unlikely 2437 // than the first. 2438 // 2439 // Although both cases could be avoided by using locks or thread 2440 // local last_addr, these solutions are unnecessary complication: 2441 // this handler is a best-effort safety net, not a complete solution. 2442 // It is disabled by default and should only be used as a workaround 2443 // in case we missed any no-execute-unsafe VM code. 2444 2445 last_addr = addr; 2446 2447 return EXCEPTION_CONTINUE_EXECUTION; 2448 } 2449 } 2450 2451 // Last unguard failed or not unguarding 2452 tty->print_raw_cr("Execution protection violation"); 2453 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2454 exceptionInfo->ContextRecord); 2455 return EXCEPTION_CONTINUE_SEARCH; 2456 } 2457 } 2458 #endif // _WIN64 2459 2460 // Check to see if we caught the safepoint code in the 2461 // process of write protecting the memory serialization page. 2462 // It write enables the page immediately after protecting it 2463 // so just return. 2464 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2465 JavaThread* thread = (JavaThread*) t; 2466 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2467 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2468 if (os::is_memory_serialize_page(thread, addr)) { 2469 // Block current thread until the memory serialize page permission restored. 2470 os::block_on_serialize_page_trap(); 2471 return EXCEPTION_CONTINUE_EXECUTION; 2472 } 2473 } 2474 2475 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2476 VM_Version::is_cpuinfo_segv_addr(pc)) { 2477 // Verify that OS save/restore AVX registers. 2478 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2479 } 2480 2481 if (t != NULL && t->is_Java_thread()) { 2482 JavaThread* thread = (JavaThread*) t; 2483 bool in_java = thread->thread_state() == _thread_in_Java; 2484 2485 // Handle potential stack overflows up front. 2486 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2487 #ifdef _M_IA64 2488 // Use guard page for register stack. 2489 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2490 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2491 // Check for a register stack overflow on Itanium 2492 if (thread->addr_inside_register_stack_red_zone(addr)) { 2493 // Fatal red zone violation happens if the Java program 2494 // catches a StackOverflow error and does so much processing 2495 // that it runs beyond the unprotected yellow guard zone. As 2496 // a result, we are out of here. 2497 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2498 } else if(thread->addr_inside_register_stack(addr)) { 2499 // Disable the yellow zone which sets the state that 2500 // we've got a stack overflow problem. 2501 if (thread->stack_yellow_reserved_zone_enabled()) { 2502 thread->disable_stack_yellow_reserved_zone(); 2503 } 2504 // Give us some room to process the exception. 2505 thread->disable_register_stack_guard(); 2506 // Tracing with +Verbose. 2507 if (Verbose) { 2508 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2509 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2510 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2511 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2512 thread->register_stack_base(), 2513 thread->register_stack_base() + thread->stack_size()); 2514 } 2515 2516 // Reguard the permanent register stack red zone just to be sure. 2517 // We saw Windows silently disabling this without telling us. 2518 thread->enable_register_stack_red_zone(); 2519 2520 return Handle_Exception(exceptionInfo, 2521 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2522 } 2523 #endif 2524 if (thread->stack_guards_enabled()) { 2525 if (_thread_in_Java) { 2526 frame fr; 2527 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2528 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2529 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2530 assert(fr.is_java_frame(), "Must be a Java frame"); 2531 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2532 } 2533 } 2534 // Yellow zone violation. The o/s has unprotected the first yellow 2535 // zone page for us. Note: must call disable_stack_yellow_zone to 2536 // update the enabled status, even if the zone contains only one page. 2537 thread->disable_stack_yellow_reserved_zone(); 2538 // If not in java code, return and hope for the best. 2539 return in_java 2540 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2541 : EXCEPTION_CONTINUE_EXECUTION; 2542 } else { 2543 // Fatal red zone violation. 2544 thread->disable_stack_red_zone(); 2545 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2546 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2547 exceptionInfo->ContextRecord); 2548 return EXCEPTION_CONTINUE_SEARCH; 2549 } 2550 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2551 // Either stack overflow or null pointer exception. 2552 if (in_java) { 2553 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2554 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2555 address stack_end = thread->stack_end(); 2556 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2557 // Stack overflow. 2558 assert(!os::uses_stack_guard_pages(), 2559 "should be caught by red zone code above."); 2560 return Handle_Exception(exceptionInfo, 2561 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2562 } 2563 // Check for safepoint polling and implicit null 2564 // We only expect null pointers in the stubs (vtable) 2565 // the rest are checked explicitly now. 2566 CodeBlob* cb = CodeCache::find_blob(pc); 2567 if (cb != NULL) { 2568 if (os::is_poll_address(addr)) { 2569 address stub = SharedRuntime::get_poll_stub(pc); 2570 return Handle_Exception(exceptionInfo, stub); 2571 } 2572 } 2573 { 2574 #ifdef _WIN64 2575 // If it's a legal stack address map the entire region in 2576 // 2577 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2578 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2579 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2580 addr = (address)((uintptr_t)addr & 2581 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2582 os::commit_memory((char *)addr, thread->stack_base() - addr, 2583 !ExecMem); 2584 return EXCEPTION_CONTINUE_EXECUTION; 2585 } else 2586 #endif 2587 { 2588 // Null pointer exception. 2589 #ifdef _M_IA64 2590 // Process implicit null checks in compiled code. Note: Implicit null checks 2591 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2592 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2593 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2594 // Handle implicit null check in UEP method entry 2595 if (cb && (cb->is_frame_complete_at(pc) || 2596 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2597 if (Verbose) { 2598 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2599 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2600 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2601 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2602 *(bundle_start + 1), *bundle_start); 2603 } 2604 return Handle_Exception(exceptionInfo, 2605 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2606 } 2607 } 2608 2609 // Implicit null checks were processed above. Hence, we should not reach 2610 // here in the usual case => die! 2611 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2612 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2613 exceptionInfo->ContextRecord); 2614 return EXCEPTION_CONTINUE_SEARCH; 2615 2616 #else // !IA64 2617 2618 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2619 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2620 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2621 } 2622 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2623 exceptionInfo->ContextRecord); 2624 return EXCEPTION_CONTINUE_SEARCH; 2625 #endif 2626 } 2627 } 2628 } 2629 2630 #ifdef _WIN64 2631 // Special care for fast JNI field accessors. 2632 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2633 // in and the heap gets shrunk before the field access. 2634 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2635 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2636 if (addr != (address)-1) { 2637 return Handle_Exception(exceptionInfo, addr); 2638 } 2639 } 2640 #endif 2641 2642 // Stack overflow or null pointer exception in native code. 2643 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2644 exceptionInfo->ContextRecord); 2645 return EXCEPTION_CONTINUE_SEARCH; 2646 } // /EXCEPTION_ACCESS_VIOLATION 2647 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2648 #if defined _M_IA64 2649 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2650 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2651 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2652 2653 // Compiled method patched to be non entrant? Following conditions must apply: 2654 // 1. must be first instruction in bundle 2655 // 2. must be a break instruction with appropriate code 2656 if ((((uint64_t) pc & 0x0F) == 0) && 2657 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2658 return Handle_Exception(exceptionInfo, 2659 (address)SharedRuntime::get_handle_wrong_method_stub()); 2660 } 2661 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2662 #endif 2663 2664 2665 if (in_java) { 2666 switch (exception_code) { 2667 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2668 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2669 2670 case EXCEPTION_INT_OVERFLOW: 2671 return Handle_IDiv_Exception(exceptionInfo); 2672 2673 } // switch 2674 } 2675 if (((thread->thread_state() == _thread_in_Java) || 2676 (thread->thread_state() == _thread_in_native)) && 2677 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2678 LONG result=Handle_FLT_Exception(exceptionInfo); 2679 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2680 } 2681 } 2682 2683 if (exception_code != EXCEPTION_BREAKPOINT) { 2684 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2685 exceptionInfo->ContextRecord); 2686 } 2687 return EXCEPTION_CONTINUE_SEARCH; 2688 } 2689 2690 #ifndef _WIN64 2691 // Special care for fast JNI accessors. 2692 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2693 // the heap gets shrunk before the field access. 2694 // Need to install our own structured exception handler since native code may 2695 // install its own. 2696 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2697 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2698 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2699 address pc = (address) exceptionInfo->ContextRecord->Eip; 2700 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2701 if (addr != (address)-1) { 2702 return Handle_Exception(exceptionInfo, addr); 2703 } 2704 } 2705 return EXCEPTION_CONTINUE_SEARCH; 2706 } 2707 2708 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2709 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2710 jobject obj, \ 2711 jfieldID fieldID) { \ 2712 __try { \ 2713 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2714 obj, \ 2715 fieldID); \ 2716 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2717 _exception_info())) { \ 2718 } \ 2719 return 0; \ 2720 } 2721 2722 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2723 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2724 DEFINE_FAST_GETFIELD(jchar, char, Char) 2725 DEFINE_FAST_GETFIELD(jshort, short, Short) 2726 DEFINE_FAST_GETFIELD(jint, int, Int) 2727 DEFINE_FAST_GETFIELD(jlong, long, Long) 2728 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2729 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2730 2731 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2732 switch (type) { 2733 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2734 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2735 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2736 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2737 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2738 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2739 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2740 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2741 default: ShouldNotReachHere(); 2742 } 2743 return (address)-1; 2744 } 2745 #endif 2746 2747 // Virtual Memory 2748 2749 int os::vm_page_size() { return os::win32::vm_page_size(); } 2750 int os::vm_allocation_granularity() { 2751 return os::win32::vm_allocation_granularity(); 2752 } 2753 2754 // Windows large page support is available on Windows 2003. In order to use 2755 // large page memory, the administrator must first assign additional privilege 2756 // to the user: 2757 // + select Control Panel -> Administrative Tools -> Local Security Policy 2758 // + select Local Policies -> User Rights Assignment 2759 // + double click "Lock pages in memory", add users and/or groups 2760 // + reboot 2761 // Note the above steps are needed for administrator as well, as administrators 2762 // by default do not have the privilege to lock pages in memory. 2763 // 2764 // Note about Windows 2003: although the API supports committing large page 2765 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2766 // scenario, I found through experiment it only uses large page if the entire 2767 // memory region is reserved and committed in a single VirtualAlloc() call. 2768 // This makes Windows large page support more or less like Solaris ISM, in 2769 // that the entire heap must be committed upfront. This probably will change 2770 // in the future, if so the code below needs to be revisited. 2771 2772 #ifndef MEM_LARGE_PAGES 2773 #define MEM_LARGE_PAGES 0x20000000 2774 #endif 2775 2776 static HANDLE _hProcess; 2777 static HANDLE _hToken; 2778 2779 // Container for NUMA node list info 2780 class NUMANodeListHolder { 2781 private: 2782 int *_numa_used_node_list; // allocated below 2783 int _numa_used_node_count; 2784 2785 void free_node_list() { 2786 if (_numa_used_node_list != NULL) { 2787 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2788 } 2789 } 2790 2791 public: 2792 NUMANodeListHolder() { 2793 _numa_used_node_count = 0; 2794 _numa_used_node_list = NULL; 2795 // do rest of initialization in build routine (after function pointers are set up) 2796 } 2797 2798 ~NUMANodeListHolder() { 2799 free_node_list(); 2800 } 2801 2802 bool build() { 2803 DWORD_PTR proc_aff_mask; 2804 DWORD_PTR sys_aff_mask; 2805 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2806 ULONG highest_node_number; 2807 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2808 free_node_list(); 2809 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2810 for (unsigned int i = 0; i <= highest_node_number; i++) { 2811 ULONGLONG proc_mask_numa_node; 2812 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2813 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2814 _numa_used_node_list[_numa_used_node_count++] = i; 2815 } 2816 } 2817 return (_numa_used_node_count > 1); 2818 } 2819 2820 int get_count() { return _numa_used_node_count; } 2821 int get_node_list_entry(int n) { 2822 // for indexes out of range, returns -1 2823 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2824 } 2825 2826 } numa_node_list_holder; 2827 2828 2829 2830 static size_t _large_page_size = 0; 2831 2832 static bool request_lock_memory_privilege() { 2833 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2834 os::current_process_id()); 2835 2836 LUID luid; 2837 if (_hProcess != NULL && 2838 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2839 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2840 2841 TOKEN_PRIVILEGES tp; 2842 tp.PrivilegeCount = 1; 2843 tp.Privileges[0].Luid = luid; 2844 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2845 2846 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2847 // privilege. Check GetLastError() too. See MSDN document. 2848 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2849 (GetLastError() == ERROR_SUCCESS)) { 2850 return true; 2851 } 2852 } 2853 2854 return false; 2855 } 2856 2857 static void cleanup_after_large_page_init() { 2858 if (_hProcess) CloseHandle(_hProcess); 2859 _hProcess = NULL; 2860 if (_hToken) CloseHandle(_hToken); 2861 _hToken = NULL; 2862 } 2863 2864 static bool numa_interleaving_init() { 2865 bool success = false; 2866 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2867 2868 // print a warning if UseNUMAInterleaving flag is specified on command line 2869 bool warn_on_failure = use_numa_interleaving_specified; 2870 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2871 2872 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2873 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2874 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2875 2876 if (numa_node_list_holder.build()) { 2877 if (PrintMiscellaneous && Verbose) { 2878 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2879 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2880 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2881 } 2882 tty->print("\n"); 2883 } 2884 success = true; 2885 } else { 2886 WARN("Process does not cover multiple NUMA nodes."); 2887 } 2888 if (!success) { 2889 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2890 } 2891 return success; 2892 #undef WARN 2893 } 2894 2895 // this routine is used whenever we need to reserve a contiguous VA range 2896 // but we need to make separate VirtualAlloc calls for each piece of the range 2897 // Reasons for doing this: 2898 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2899 // * UseNUMAInterleaving requires a separate node for each piece 2900 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2901 DWORD prot, 2902 bool should_inject_error = false) { 2903 char * p_buf; 2904 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2905 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2906 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2907 2908 // first reserve enough address space in advance since we want to be 2909 // able to break a single contiguous virtual address range into multiple 2910 // large page commits but WS2003 does not allow reserving large page space 2911 // so we just use 4K pages for reserve, this gives us a legal contiguous 2912 // address space. then we will deallocate that reservation, and re alloc 2913 // using large pages 2914 const size_t size_of_reserve = bytes + chunk_size; 2915 if (bytes > size_of_reserve) { 2916 // Overflowed. 2917 return NULL; 2918 } 2919 p_buf = (char *) VirtualAlloc(addr, 2920 size_of_reserve, // size of Reserve 2921 MEM_RESERVE, 2922 PAGE_READWRITE); 2923 // If reservation failed, return NULL 2924 if (p_buf == NULL) return NULL; 2925 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2926 os::release_memory(p_buf, bytes + chunk_size); 2927 2928 // we still need to round up to a page boundary (in case we are using large pages) 2929 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2930 // instead we handle this in the bytes_to_rq computation below 2931 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2932 2933 // now go through and allocate one chunk at a time until all bytes are 2934 // allocated 2935 size_t bytes_remaining = bytes; 2936 // An overflow of align_size_up() would have been caught above 2937 // in the calculation of size_of_reserve. 2938 char * next_alloc_addr = p_buf; 2939 HANDLE hProc = GetCurrentProcess(); 2940 2941 #ifdef ASSERT 2942 // Variable for the failure injection 2943 long ran_num = os::random(); 2944 size_t fail_after = ran_num % bytes; 2945 #endif 2946 2947 int count=0; 2948 while (bytes_remaining) { 2949 // select bytes_to_rq to get to the next chunk_size boundary 2950 2951 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2952 // Note allocate and commit 2953 char * p_new; 2954 2955 #ifdef ASSERT 2956 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2957 #else 2958 const bool inject_error_now = false; 2959 #endif 2960 2961 if (inject_error_now) { 2962 p_new = NULL; 2963 } else { 2964 if (!UseNUMAInterleaving) { 2965 p_new = (char *) VirtualAlloc(next_alloc_addr, 2966 bytes_to_rq, 2967 flags, 2968 prot); 2969 } else { 2970 // get the next node to use from the used_node_list 2971 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2972 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2973 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2974 } 2975 } 2976 2977 if (p_new == NULL) { 2978 // Free any allocated pages 2979 if (next_alloc_addr > p_buf) { 2980 // Some memory was committed so release it. 2981 size_t bytes_to_release = bytes - bytes_remaining; 2982 // NMT has yet to record any individual blocks, so it 2983 // need to create a dummy 'reserve' record to match 2984 // the release. 2985 MemTracker::record_virtual_memory_reserve((address)p_buf, 2986 bytes_to_release, CALLER_PC); 2987 os::release_memory(p_buf, bytes_to_release); 2988 } 2989 #ifdef ASSERT 2990 if (should_inject_error) { 2991 if (TracePageSizes && Verbose) { 2992 tty->print_cr("Reserving pages individually failed."); 2993 } 2994 } 2995 #endif 2996 return NULL; 2997 } 2998 2999 bytes_remaining -= bytes_to_rq; 3000 next_alloc_addr += bytes_to_rq; 3001 count++; 3002 } 3003 // Although the memory is allocated individually, it is returned as one. 3004 // NMT records it as one block. 3005 if ((flags & MEM_COMMIT) != 0) { 3006 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 3007 } else { 3008 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 3009 } 3010 3011 // made it this far, success 3012 return p_buf; 3013 } 3014 3015 3016 3017 void os::large_page_init() { 3018 if (!UseLargePages) return; 3019 3020 // print a warning if any large page related flag is specified on command line 3021 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3022 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3023 bool success = false; 3024 3025 #define WARN(msg) if (warn_on_failure) { warning(msg); } 3026 if (request_lock_memory_privilege()) { 3027 size_t s = GetLargePageMinimum(); 3028 if (s) { 3029 #if defined(IA32) || defined(AMD64) 3030 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3031 WARN("JVM cannot use large pages bigger than 4mb."); 3032 } else { 3033 #endif 3034 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3035 _large_page_size = LargePageSizeInBytes; 3036 } else { 3037 _large_page_size = s; 3038 } 3039 success = true; 3040 #if defined(IA32) || defined(AMD64) 3041 } 3042 #endif 3043 } else { 3044 WARN("Large page is not supported by the processor."); 3045 } 3046 } else { 3047 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3048 } 3049 #undef WARN 3050 3051 const size_t default_page_size = (size_t) vm_page_size(); 3052 if (success && _large_page_size > default_page_size) { 3053 _page_sizes[0] = _large_page_size; 3054 _page_sizes[1] = default_page_size; 3055 _page_sizes[2] = 0; 3056 } 3057 3058 cleanup_after_large_page_init(); 3059 UseLargePages = success; 3060 } 3061 3062 // On win32, one cannot release just a part of reserved memory, it's an 3063 // all or nothing deal. When we split a reservation, we must break the 3064 // reservation into two reservations. 3065 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3066 bool realloc) { 3067 if (size > 0) { 3068 release_memory(base, size); 3069 if (realloc) { 3070 reserve_memory(split, base); 3071 } 3072 if (size != split) { 3073 reserve_memory(size - split, base + split); 3074 } 3075 } 3076 } 3077 3078 // Multiple threads can race in this code but it's not possible to unmap small sections of 3079 // virtual space to get requested alignment, like posix-like os's. 3080 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3081 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3082 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3083 "Alignment must be a multiple of allocation granularity (page size)"); 3084 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3085 3086 size_t extra_size = size + alignment; 3087 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3088 3089 char* aligned_base = NULL; 3090 3091 do { 3092 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3093 if (extra_base == NULL) { 3094 return NULL; 3095 } 3096 // Do manual alignment 3097 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3098 3099 os::release_memory(extra_base, extra_size); 3100 3101 aligned_base = os::reserve_memory(size, aligned_base); 3102 3103 } while (aligned_base == NULL); 3104 3105 return aligned_base; 3106 } 3107 3108 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3109 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3110 "reserve alignment"); 3111 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3112 char* res; 3113 // note that if UseLargePages is on, all the areas that require interleaving 3114 // will go thru reserve_memory_special rather than thru here. 3115 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3116 if (!use_individual) { 3117 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3118 } else { 3119 elapsedTimer reserveTimer; 3120 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3121 // in numa interleaving, we have to allocate pages individually 3122 // (well really chunks of NUMAInterleaveGranularity size) 3123 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3124 if (res == NULL) { 3125 warning("NUMA page allocation failed"); 3126 } 3127 if (Verbose && PrintMiscellaneous) { 3128 reserveTimer.stop(); 3129 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3130 reserveTimer.milliseconds(), reserveTimer.ticks()); 3131 } 3132 } 3133 assert(res == NULL || addr == NULL || addr == res, 3134 "Unexpected address from reserve."); 3135 3136 return res; 3137 } 3138 3139 // Reserve memory at an arbitrary address, only if that area is 3140 // available (and not reserved for something else). 3141 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3142 // Windows os::reserve_memory() fails of the requested address range is 3143 // not avilable. 3144 return reserve_memory(bytes, requested_addr); 3145 } 3146 3147 size_t os::large_page_size() { 3148 return _large_page_size; 3149 } 3150 3151 bool os::can_commit_large_page_memory() { 3152 // Windows only uses large page memory when the entire region is reserved 3153 // and committed in a single VirtualAlloc() call. This may change in the 3154 // future, but with Windows 2003 it's not possible to commit on demand. 3155 return false; 3156 } 3157 3158 bool os::can_execute_large_page_memory() { 3159 return true; 3160 } 3161 3162 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3163 bool exec) { 3164 assert(UseLargePages, "only for large pages"); 3165 3166 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3167 return NULL; // Fallback to small pages. 3168 } 3169 3170 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3171 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3172 3173 // with large pages, there are two cases where we need to use Individual Allocation 3174 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3175 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3176 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3177 if (TracePageSizes && Verbose) { 3178 tty->print_cr("Reserving large pages individually."); 3179 } 3180 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3181 if (p_buf == NULL) { 3182 // give an appropriate warning message 3183 if (UseNUMAInterleaving) { 3184 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3185 } 3186 if (UseLargePagesIndividualAllocation) { 3187 warning("Individually allocated large pages failed, " 3188 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3189 } 3190 return NULL; 3191 } 3192 3193 return p_buf; 3194 3195 } else { 3196 if (TracePageSizes && Verbose) { 3197 tty->print_cr("Reserving large pages in a single large chunk."); 3198 } 3199 // normal policy just allocate it all at once 3200 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3201 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3202 if (res != NULL) { 3203 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3204 } 3205 3206 return res; 3207 } 3208 } 3209 3210 bool os::release_memory_special(char* base, size_t bytes) { 3211 assert(base != NULL, "Sanity check"); 3212 return release_memory(base, bytes); 3213 } 3214 3215 void os::print_statistics() { 3216 } 3217 3218 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3219 int err = os::get_last_error(); 3220 char buf[256]; 3221 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3222 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3223 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3224 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3225 } 3226 3227 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3228 if (bytes == 0) { 3229 // Don't bother the OS with noops. 3230 return true; 3231 } 3232 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3233 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3234 // Don't attempt to print anything if the OS call fails. We're 3235 // probably low on resources, so the print itself may cause crashes. 3236 3237 // unless we have NUMAInterleaving enabled, the range of a commit 3238 // is always within a reserve covered by a single VirtualAlloc 3239 // in that case we can just do a single commit for the requested size 3240 if (!UseNUMAInterleaving) { 3241 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3242 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3243 return false; 3244 } 3245 if (exec) { 3246 DWORD oldprot; 3247 // Windows doc says to use VirtualProtect to get execute permissions 3248 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3249 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3250 return false; 3251 } 3252 } 3253 return true; 3254 } else { 3255 3256 // when NUMAInterleaving is enabled, the commit might cover a range that 3257 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3258 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3259 // returns represents the number of bytes that can be committed in one step. 3260 size_t bytes_remaining = bytes; 3261 char * next_alloc_addr = addr; 3262 while (bytes_remaining > 0) { 3263 MEMORY_BASIC_INFORMATION alloc_info; 3264 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3265 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3266 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3267 PAGE_READWRITE) == NULL) { 3268 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3269 exec);) 3270 return false; 3271 } 3272 if (exec) { 3273 DWORD oldprot; 3274 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3275 PAGE_EXECUTE_READWRITE, &oldprot)) { 3276 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3277 exec);) 3278 return false; 3279 } 3280 } 3281 bytes_remaining -= bytes_to_rq; 3282 next_alloc_addr += bytes_to_rq; 3283 } 3284 } 3285 // if we made it this far, return true 3286 return true; 3287 } 3288 3289 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3290 bool exec) { 3291 // alignment_hint is ignored on this OS 3292 return pd_commit_memory(addr, size, exec); 3293 } 3294 3295 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3296 const char* mesg) { 3297 assert(mesg != NULL, "mesg must be specified"); 3298 if (!pd_commit_memory(addr, size, exec)) { 3299 warn_fail_commit_memory(addr, size, exec); 3300 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3301 } 3302 } 3303 3304 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3305 size_t alignment_hint, bool exec, 3306 const char* mesg) { 3307 // alignment_hint is ignored on this OS 3308 pd_commit_memory_or_exit(addr, size, exec, mesg); 3309 } 3310 3311 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3312 if (bytes == 0) { 3313 // Don't bother the OS with noops. 3314 return true; 3315 } 3316 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3317 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3318 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3319 } 3320 3321 bool os::pd_release_memory(char* addr, size_t bytes) { 3322 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3323 } 3324 3325 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3326 return os::commit_memory(addr, size, !ExecMem); 3327 } 3328 3329 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3330 return os::uncommit_memory(addr, size); 3331 } 3332 3333 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3334 uint count = 0; 3335 bool ret = false; 3336 size_t bytes_remaining = bytes; 3337 char * next_protect_addr = addr; 3338 3339 // Use VirtualQuery() to get the chunk size. 3340 while (bytes_remaining) { 3341 MEMORY_BASIC_INFORMATION alloc_info; 3342 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3343 return false; 3344 } 3345 3346 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3347 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3348 // but we don't distinguish here as both cases are protected by same API. 3349 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3350 warning("Failed protecting pages individually for chunk #%u", count); 3351 if (!ret) { 3352 return false; 3353 } 3354 3355 bytes_remaining -= bytes_to_protect; 3356 next_protect_addr += bytes_to_protect; 3357 count++; 3358 } 3359 return ret; 3360 } 3361 3362 // Set protections specified 3363 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3364 bool is_committed) { 3365 unsigned int p = 0; 3366 switch (prot) { 3367 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3368 case MEM_PROT_READ: p = PAGE_READONLY; break; 3369 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3370 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3371 default: 3372 ShouldNotReachHere(); 3373 } 3374 3375 DWORD old_status; 3376 3377 // Strange enough, but on Win32 one can change protection only for committed 3378 // memory, not a big deal anyway, as bytes less or equal than 64K 3379 if (!is_committed) { 3380 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3381 "cannot commit protection page"); 3382 } 3383 // One cannot use os::guard_memory() here, as on Win32 guard page 3384 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3385 // 3386 // Pages in the region become guard pages. Any attempt to access a guard page 3387 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3388 // the guard page status. Guard pages thus act as a one-time access alarm. 3389 bool ret; 3390 if (UseNUMAInterleaving) { 3391 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3392 // so we must protect the chunks individually. 3393 ret = protect_pages_individually(addr, bytes, p, &old_status); 3394 } else { 3395 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3396 } 3397 #ifdef ASSERT 3398 if (!ret) { 3399 int err = os::get_last_error(); 3400 char buf[256]; 3401 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3402 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3403 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3404 buf_len != 0 ? buf : "<no_error_string>", err); 3405 } 3406 #endif 3407 return ret; 3408 } 3409 3410 bool os::guard_memory(char* addr, size_t bytes) { 3411 DWORD old_status; 3412 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3413 } 3414 3415 bool os::unguard_memory(char* addr, size_t bytes) { 3416 DWORD old_status; 3417 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3418 } 3419 3420 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3421 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3422 void os::numa_make_global(char *addr, size_t bytes) { } 3423 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3424 bool os::numa_topology_changed() { return false; } 3425 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3426 int os::numa_get_group_id() { return 0; } 3427 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3428 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3429 // Provide an answer for UMA systems 3430 ids[0] = 0; 3431 return 1; 3432 } else { 3433 // check for size bigger than actual groups_num 3434 size = MIN2(size, numa_get_groups_num()); 3435 for (int i = 0; i < (int)size; i++) { 3436 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3437 } 3438 return size; 3439 } 3440 } 3441 3442 bool os::get_page_info(char *start, page_info* info) { 3443 return false; 3444 } 3445 3446 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3447 page_info* page_found) { 3448 return end; 3449 } 3450 3451 char* os::non_memory_address_word() { 3452 // Must never look like an address returned by reserve_memory, 3453 // even in its subfields (as defined by the CPU immediate fields, 3454 // if the CPU splits constants across multiple instructions). 3455 return (char*)-1; 3456 } 3457 3458 #define MAX_ERROR_COUNT 100 3459 #define SYS_THREAD_ERROR 0xffffffffUL 3460 3461 void os::pd_start_thread(Thread* thread) { 3462 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3463 // Returns previous suspend state: 3464 // 0: Thread was not suspended 3465 // 1: Thread is running now 3466 // >1: Thread is still suspended. 3467 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3468 } 3469 3470 class HighResolutionInterval : public CHeapObj<mtThread> { 3471 // The default timer resolution seems to be 10 milliseconds. 3472 // (Where is this written down?) 3473 // If someone wants to sleep for only a fraction of the default, 3474 // then we set the timer resolution down to 1 millisecond for 3475 // the duration of their interval. 3476 // We carefully set the resolution back, since otherwise we 3477 // seem to incur an overhead (3%?) that we don't need. 3478 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3479 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3480 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3481 // timeBeginPeriod() if the relative error exceeded some threshold. 3482 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3483 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3484 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3485 // resolution timers running. 3486 private: 3487 jlong resolution; 3488 public: 3489 HighResolutionInterval(jlong ms) { 3490 resolution = ms % 10L; 3491 if (resolution != 0) { 3492 MMRESULT result = timeBeginPeriod(1L); 3493 } 3494 } 3495 ~HighResolutionInterval() { 3496 if (resolution != 0) { 3497 MMRESULT result = timeEndPeriod(1L); 3498 } 3499 resolution = 0L; 3500 } 3501 }; 3502 3503 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3504 jlong limit = (jlong) MAXDWORD; 3505 3506 while (ms > limit) { 3507 int res; 3508 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3509 return res; 3510 } 3511 ms -= limit; 3512 } 3513 3514 assert(thread == Thread::current(), "thread consistency check"); 3515 OSThread* osthread = thread->osthread(); 3516 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3517 int result; 3518 if (interruptable) { 3519 assert(thread->is_Java_thread(), "must be java thread"); 3520 JavaThread *jt = (JavaThread *) thread; 3521 ThreadBlockInVM tbivm(jt); 3522 3523 jt->set_suspend_equivalent(); 3524 // cleared by handle_special_suspend_equivalent_condition() or 3525 // java_suspend_self() via check_and_wait_while_suspended() 3526 3527 HANDLE events[1]; 3528 events[0] = osthread->interrupt_event(); 3529 HighResolutionInterval *phri=NULL; 3530 if (!ForceTimeHighResolution) { 3531 phri = new HighResolutionInterval(ms); 3532 } 3533 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3534 result = OS_TIMEOUT; 3535 } else { 3536 ResetEvent(osthread->interrupt_event()); 3537 osthread->set_interrupted(false); 3538 result = OS_INTRPT; 3539 } 3540 delete phri; //if it is NULL, harmless 3541 3542 // were we externally suspended while we were waiting? 3543 jt->check_and_wait_while_suspended(); 3544 } else { 3545 assert(!thread->is_Java_thread(), "must not be java thread"); 3546 Sleep((long) ms); 3547 result = OS_TIMEOUT; 3548 } 3549 return result; 3550 } 3551 3552 // Short sleep, direct OS call. 3553 // 3554 // ms = 0, means allow others (if any) to run. 3555 // 3556 void os::naked_short_sleep(jlong ms) { 3557 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3558 Sleep(ms); 3559 } 3560 3561 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3562 void os::infinite_sleep() { 3563 while (true) { // sleep forever ... 3564 Sleep(100000); // ... 100 seconds at a time 3565 } 3566 } 3567 3568 typedef BOOL (WINAPI * STTSignature)(void); 3569 3570 void os::naked_yield() { 3571 // Consider passing back the return value from SwitchToThread(). 3572 SwitchToThread(); 3573 } 3574 3575 // Win32 only gives you access to seven real priorities at a time, 3576 // so we compress Java's ten down to seven. It would be better 3577 // if we dynamically adjusted relative priorities. 3578 3579 int os::java_to_os_priority[CriticalPriority + 1] = { 3580 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3581 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3582 THREAD_PRIORITY_LOWEST, // 2 3583 THREAD_PRIORITY_BELOW_NORMAL, // 3 3584 THREAD_PRIORITY_BELOW_NORMAL, // 4 3585 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3586 THREAD_PRIORITY_NORMAL, // 6 3587 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3588 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3589 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3590 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3591 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3592 }; 3593 3594 int prio_policy1[CriticalPriority + 1] = { 3595 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3596 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3597 THREAD_PRIORITY_LOWEST, // 2 3598 THREAD_PRIORITY_BELOW_NORMAL, // 3 3599 THREAD_PRIORITY_BELOW_NORMAL, // 4 3600 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3601 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3602 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3603 THREAD_PRIORITY_HIGHEST, // 8 3604 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3605 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3606 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3607 }; 3608 3609 static int prio_init() { 3610 // If ThreadPriorityPolicy is 1, switch tables 3611 if (ThreadPriorityPolicy == 1) { 3612 int i; 3613 for (i = 0; i < CriticalPriority + 1; i++) { 3614 os::java_to_os_priority[i] = prio_policy1[i]; 3615 } 3616 } 3617 if (UseCriticalJavaThreadPriority) { 3618 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3619 } 3620 return 0; 3621 } 3622 3623 OSReturn os::set_native_priority(Thread* thread, int priority) { 3624 if (!UseThreadPriorities) return OS_OK; 3625 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3626 return ret ? OS_OK : OS_ERR; 3627 } 3628 3629 OSReturn os::get_native_priority(const Thread* const thread, 3630 int* priority_ptr) { 3631 if (!UseThreadPriorities) { 3632 *priority_ptr = java_to_os_priority[NormPriority]; 3633 return OS_OK; 3634 } 3635 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3636 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3637 assert(false, "GetThreadPriority failed"); 3638 return OS_ERR; 3639 } 3640 *priority_ptr = os_prio; 3641 return OS_OK; 3642 } 3643 3644 3645 // Hint to the underlying OS that a task switch would not be good. 3646 // Void return because it's a hint and can fail. 3647 void os::hint_no_preempt() {} 3648 3649 void os::interrupt(Thread* thread) { 3650 assert(!thread->is_Java_thread() || Thread::current() == thread || 3651 Threads_lock->owned_by_self(), 3652 "possibility of dangling Thread pointer"); 3653 3654 OSThread* osthread = thread->osthread(); 3655 osthread->set_interrupted(true); 3656 // More than one thread can get here with the same value of osthread, 3657 // resulting in multiple notifications. We do, however, want the store 3658 // to interrupted() to be visible to other threads before we post 3659 // the interrupt event. 3660 OrderAccess::release(); 3661 SetEvent(osthread->interrupt_event()); 3662 // For JSR166: unpark after setting status 3663 if (thread->is_Java_thread()) { 3664 ((JavaThread*)thread)->parker()->unpark(); 3665 } 3666 3667 ParkEvent * ev = thread->_ParkEvent; 3668 if (ev != NULL) ev->unpark(); 3669 } 3670 3671 3672 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3673 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3674 "possibility of dangling Thread pointer"); 3675 3676 OSThread* osthread = thread->osthread(); 3677 // There is no synchronization between the setting of the interrupt 3678 // and it being cleared here. It is critical - see 6535709 - that 3679 // we only clear the interrupt state, and reset the interrupt event, 3680 // if we are going to report that we were indeed interrupted - else 3681 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3682 // depending on the timing. By checking thread interrupt event to see 3683 // if the thread gets real interrupt thus prevent spurious wakeup. 3684 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3685 if (interrupted && clear_interrupted) { 3686 osthread->set_interrupted(false); 3687 ResetEvent(osthread->interrupt_event()); 3688 } // Otherwise leave the interrupted state alone 3689 3690 return interrupted; 3691 } 3692 3693 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3694 ExtendedPC os::get_thread_pc(Thread* thread) { 3695 CONTEXT context; 3696 context.ContextFlags = CONTEXT_CONTROL; 3697 HANDLE handle = thread->osthread()->thread_handle(); 3698 #ifdef _M_IA64 3699 assert(0, "Fix get_thread_pc"); 3700 return ExtendedPC(NULL); 3701 #else 3702 if (GetThreadContext(handle, &context)) { 3703 #ifdef _M_AMD64 3704 return ExtendedPC((address) context.Rip); 3705 #else 3706 return ExtendedPC((address) context.Eip); 3707 #endif 3708 } else { 3709 return ExtendedPC(NULL); 3710 } 3711 #endif 3712 } 3713 3714 // GetCurrentThreadId() returns DWORD 3715 intx os::current_thread_id() { return GetCurrentThreadId(); } 3716 3717 static int _initial_pid = 0; 3718 3719 int os::current_process_id() { 3720 return (_initial_pid ? _initial_pid : _getpid()); 3721 } 3722 3723 int os::win32::_vm_page_size = 0; 3724 int os::win32::_vm_allocation_granularity = 0; 3725 int os::win32::_processor_type = 0; 3726 // Processor level is not available on non-NT systems, use vm_version instead 3727 int os::win32::_processor_level = 0; 3728 julong os::win32::_physical_memory = 0; 3729 size_t os::win32::_default_stack_size = 0; 3730 3731 intx os::win32::_os_thread_limit = 0; 3732 volatile intx os::win32::_os_thread_count = 0; 3733 3734 bool os::win32::_is_windows_server = false; 3735 3736 // 6573254 3737 // Currently, the bug is observed across all the supported Windows releases, 3738 // including the latest one (as of this writing - Windows Server 2012 R2) 3739 bool os::win32::_has_exit_bug = true; 3740 3741 void os::win32::initialize_system_info() { 3742 SYSTEM_INFO si; 3743 GetSystemInfo(&si); 3744 _vm_page_size = si.dwPageSize; 3745 _vm_allocation_granularity = si.dwAllocationGranularity; 3746 _processor_type = si.dwProcessorType; 3747 _processor_level = si.wProcessorLevel; 3748 set_processor_count(si.dwNumberOfProcessors); 3749 3750 MEMORYSTATUSEX ms; 3751 ms.dwLength = sizeof(ms); 3752 3753 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3754 // dwMemoryLoad (% of memory in use) 3755 GlobalMemoryStatusEx(&ms); 3756 _physical_memory = ms.ullTotalPhys; 3757 3758 OSVERSIONINFOEX oi; 3759 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3760 GetVersionEx((OSVERSIONINFO*)&oi); 3761 switch (oi.dwPlatformId) { 3762 case VER_PLATFORM_WIN32_NT: 3763 { 3764 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3765 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3766 oi.wProductType == VER_NT_SERVER) { 3767 _is_windows_server = true; 3768 } 3769 } 3770 break; 3771 default: fatal("Unknown platform"); 3772 } 3773 3774 _default_stack_size = os::current_stack_size(); 3775 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3776 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3777 "stack size not a multiple of page size"); 3778 3779 initialize_performance_counter(); 3780 } 3781 3782 3783 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3784 int ebuflen) { 3785 char path[MAX_PATH]; 3786 DWORD size; 3787 DWORD pathLen = (DWORD)sizeof(path); 3788 HINSTANCE result = NULL; 3789 3790 // only allow library name without path component 3791 assert(strchr(name, '\\') == NULL, "path not allowed"); 3792 assert(strchr(name, ':') == NULL, "path not allowed"); 3793 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3794 jio_snprintf(ebuf, ebuflen, 3795 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3796 return NULL; 3797 } 3798 3799 // search system directory 3800 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3801 if (size >= pathLen) { 3802 return NULL; // truncated 3803 } 3804 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3805 return NULL; // truncated 3806 } 3807 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3808 return result; 3809 } 3810 } 3811 3812 // try Windows directory 3813 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3814 if (size >= pathLen) { 3815 return NULL; // truncated 3816 } 3817 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3818 return NULL; // truncated 3819 } 3820 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3821 return result; 3822 } 3823 } 3824 3825 jio_snprintf(ebuf, ebuflen, 3826 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3827 return NULL; 3828 } 3829 3830 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3831 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3832 3833 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3834 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3835 return TRUE; 3836 } 3837 3838 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3839 // Basic approach: 3840 // - Each exiting thread registers its intent to exit and then does so. 3841 // - A thread trying to terminate the process must wait for all 3842 // threads currently exiting to complete their exit. 3843 3844 if (os::win32::has_exit_bug()) { 3845 // The array holds handles of the threads that have started exiting by calling 3846 // _endthreadex(). 3847 // Should be large enough to avoid blocking the exiting thread due to lack of 3848 // a free slot. 3849 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3850 static int handle_count = 0; 3851 3852 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3853 static CRITICAL_SECTION crit_sect; 3854 static volatile jint process_exiting = 0; 3855 int i, j; 3856 DWORD res; 3857 HANDLE hproc, hthr; 3858 3859 // The first thread that reached this point, initializes the critical section. 3860 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3861 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3862 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3863 if (what != EPT_THREAD) { 3864 // Atomically set process_exiting before the critical section 3865 // to increase the visibility between racing threads. 3866 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3867 } 3868 EnterCriticalSection(&crit_sect); 3869 3870 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3871 // Remove from the array those handles of the threads that have completed exiting. 3872 for (i = 0, j = 0; i < handle_count; ++i) { 3873 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3874 if (res == WAIT_TIMEOUT) { 3875 handles[j++] = handles[i]; 3876 } else { 3877 if (res == WAIT_FAILED) { 3878 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3879 GetLastError(), __FILE__, __LINE__); 3880 } 3881 // Don't keep the handle, if we failed waiting for it. 3882 CloseHandle(handles[i]); 3883 } 3884 } 3885 3886 // If there's no free slot in the array of the kept handles, we'll have to 3887 // wait until at least one thread completes exiting. 3888 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3889 // Raise the priority of the oldest exiting thread to increase its chances 3890 // to complete sooner. 3891 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3892 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3893 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3894 i = (res - WAIT_OBJECT_0); 3895 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3896 for (; i < handle_count; ++i) { 3897 handles[i] = handles[i + 1]; 3898 } 3899 } else { 3900 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3901 (res == WAIT_FAILED ? "failed" : "timed out"), 3902 GetLastError(), __FILE__, __LINE__); 3903 // Don't keep handles, if we failed waiting for them. 3904 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3905 CloseHandle(handles[i]); 3906 } 3907 handle_count = 0; 3908 } 3909 } 3910 3911 // Store a duplicate of the current thread handle in the array of handles. 3912 hproc = GetCurrentProcess(); 3913 hthr = GetCurrentThread(); 3914 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3915 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3916 warning("DuplicateHandle failed (%u) in %s: %d\n", 3917 GetLastError(), __FILE__, __LINE__); 3918 } else { 3919 ++handle_count; 3920 } 3921 3922 // The current exiting thread has stored its handle in the array, and now 3923 // should leave the critical section before calling _endthreadex(). 3924 3925 } else if (what != EPT_THREAD && handle_count > 0) { 3926 jlong start_time, finish_time, timeout_left; 3927 // Before ending the process, make sure all the threads that had called 3928 // _endthreadex() completed. 3929 3930 // Set the priority level of the current thread to the same value as 3931 // the priority level of exiting threads. 3932 // This is to ensure it will be given a fair chance to execute if 3933 // the timeout expires. 3934 hthr = GetCurrentThread(); 3935 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3936 start_time = os::javaTimeNanos(); 3937 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3938 for (i = 0; ; ) { 3939 int portion_count = handle_count - i; 3940 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3941 portion_count = MAXIMUM_WAIT_OBJECTS; 3942 } 3943 for (j = 0; j < portion_count; ++j) { 3944 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3945 } 3946 timeout_left = (finish_time - start_time) / 1000000L; 3947 if (timeout_left < 0) { 3948 timeout_left = 0; 3949 } 3950 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3951 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3952 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3953 (res == WAIT_FAILED ? "failed" : "timed out"), 3954 GetLastError(), __FILE__, __LINE__); 3955 // Reset portion_count so we close the remaining 3956 // handles due to this error. 3957 portion_count = handle_count - i; 3958 } 3959 for (j = 0; j < portion_count; ++j) { 3960 CloseHandle(handles[i + j]); 3961 } 3962 if ((i += portion_count) >= handle_count) { 3963 break; 3964 } 3965 start_time = os::javaTimeNanos(); 3966 } 3967 handle_count = 0; 3968 } 3969 3970 LeaveCriticalSection(&crit_sect); 3971 } 3972 3973 if (OrderAccess::load_acquire(&process_exiting) != 0 && 3974 process_exiting != (jint)GetCurrentThreadId()) { 3975 // Some other thread is about to call exit(), so we 3976 // don't let the current thread proceed to exit() or _endthreadex() 3977 while (true) { 3978 SuspendThread(GetCurrentThread()); 3979 // Avoid busy-wait loop, if SuspendThread() failed. 3980 Sleep(EXIT_TIMEOUT); 3981 } 3982 } 3983 } 3984 3985 // We are here if either 3986 // - there's no 'race at exit' bug on this OS release; 3987 // - initialization of the critical section failed (unlikely); 3988 // - the current thread has stored its handle and left the critical section; 3989 // - the process-exiting thread has raised the flag and left the critical section. 3990 if (what == EPT_THREAD) { 3991 _endthreadex((unsigned)exit_code); 3992 } else if (what == EPT_PROCESS) { 3993 ::exit(exit_code); 3994 } else { 3995 _exit(exit_code); 3996 } 3997 3998 // Should not reach here 3999 return exit_code; 4000 } 4001 4002 #undef EXIT_TIMEOUT 4003 4004 void os::win32::setmode_streams() { 4005 _setmode(_fileno(stdin), _O_BINARY); 4006 _setmode(_fileno(stdout), _O_BINARY); 4007 _setmode(_fileno(stderr), _O_BINARY); 4008 } 4009 4010 4011 bool os::is_debugger_attached() { 4012 return IsDebuggerPresent() ? true : false; 4013 } 4014 4015 4016 void os::wait_for_keypress_at_exit(void) { 4017 if (PauseAtExit) { 4018 fprintf(stderr, "Press any key to continue...\n"); 4019 fgetc(stdin); 4020 } 4021 } 4022 4023 4024 bool os::message_box(const char* title, const char* message) { 4025 int result = MessageBox(NULL, message, title, 4026 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 4027 return result == IDYES; 4028 } 4029 4030 #ifndef PRODUCT 4031 #ifndef _WIN64 4032 // Helpers to check whether NX protection is enabled 4033 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 4034 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 4035 pex->ExceptionRecord->NumberParameters > 0 && 4036 pex->ExceptionRecord->ExceptionInformation[0] == 4037 EXCEPTION_INFO_EXEC_VIOLATION) { 4038 return EXCEPTION_EXECUTE_HANDLER; 4039 } 4040 return EXCEPTION_CONTINUE_SEARCH; 4041 } 4042 4043 void nx_check_protection() { 4044 // If NX is enabled we'll get an exception calling into code on the stack 4045 char code[] = { (char)0xC3 }; // ret 4046 void *code_ptr = (void *)code; 4047 __try { 4048 __asm call code_ptr 4049 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4050 tty->print_raw_cr("NX protection detected."); 4051 } 4052 } 4053 #endif // _WIN64 4054 #endif // PRODUCT 4055 4056 // This is called _before_ the global arguments have been parsed 4057 void os::init(void) { 4058 _initial_pid = _getpid(); 4059 4060 init_random(1234567); 4061 4062 win32::initialize_system_info(); 4063 win32::setmode_streams(); 4064 init_page_sizes((size_t) win32::vm_page_size()); 4065 4066 // This may be overridden later when argument processing is done. 4067 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 4068 4069 // Initialize main_process and main_thread 4070 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4071 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4072 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4073 fatal("DuplicateHandle failed\n"); 4074 } 4075 main_thread_id = (int) GetCurrentThreadId(); 4076 4077 // initialize fast thread access - only used for 32-bit 4078 win32::initialize_thread_ptr_offset(); 4079 } 4080 4081 // To install functions for atexit processing 4082 extern "C" { 4083 static void perfMemory_exit_helper() { 4084 perfMemory_exit(); 4085 } 4086 } 4087 4088 static jint initSock(); 4089 4090 // this is called _after_ the global arguments have been parsed 4091 jint os::init_2(void) { 4092 // Allocate a single page and mark it as readable for safepoint polling 4093 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4094 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4095 4096 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4097 guarantee(return_page != NULL, "Commit Failed for polling page"); 4098 4099 os::set_polling_page(polling_page); 4100 4101 #ifndef PRODUCT 4102 if (Verbose && PrintMiscellaneous) { 4103 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4104 (intptr_t)polling_page); 4105 } 4106 #endif 4107 4108 if (!UseMembar) { 4109 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4110 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4111 4112 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4113 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4114 4115 os::set_memory_serialize_page(mem_serialize_page); 4116 4117 #ifndef PRODUCT 4118 if (Verbose && PrintMiscellaneous) { 4119 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4120 (intptr_t)mem_serialize_page); 4121 } 4122 #endif 4123 } 4124 4125 // Setup Windows Exceptions 4126 4127 // for debugging float code generation bugs 4128 if (ForceFloatExceptions) { 4129 #ifndef _WIN64 4130 static long fp_control_word = 0; 4131 __asm { fstcw fp_control_word } 4132 // see Intel PPro Manual, Vol. 2, p 7-16 4133 const long precision = 0x20; 4134 const long underflow = 0x10; 4135 const long overflow = 0x08; 4136 const long zero_div = 0x04; 4137 const long denorm = 0x02; 4138 const long invalid = 0x01; 4139 fp_control_word |= invalid; 4140 __asm { fldcw fp_control_word } 4141 #endif 4142 } 4143 4144 // If stack_commit_size is 0, windows will reserve the default size, 4145 // but only commit a small portion of it. 4146 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4147 size_t default_reserve_size = os::win32::default_stack_size(); 4148 size_t actual_reserve_size = stack_commit_size; 4149 if (stack_commit_size < default_reserve_size) { 4150 // If stack_commit_size == 0, we want this too 4151 actual_reserve_size = default_reserve_size; 4152 } 4153 4154 // Check minimum allowable stack size for thread creation and to initialize 4155 // the java system classes, including StackOverflowError - depends on page 4156 // size. Add a page for compiler2 recursion in main thread. 4157 // Add in 2*BytesPerWord times page size to account for VM stack during 4158 // class initialization depending on 32 or 64 bit VM. 4159 size_t min_stack_allowed = 4160 (size_t)(JavaThread::stack_yellow_zone_size() + JavaThread::stack_red_zone_size() + 4161 JavaThread::stack_shadow_zone_size() + 4162 (2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size()); 4163 if (actual_reserve_size < min_stack_allowed) { 4164 tty->print_cr("\nThe stack size specified is too small, " 4165 "Specify at least %dk", 4166 min_stack_allowed / K); 4167 return JNI_ERR; 4168 } 4169 4170 JavaThread::set_stack_size_at_create(stack_commit_size); 4171 4172 // Calculate theoretical max. size of Threads to guard gainst artifical 4173 // out-of-memory situations, where all available address-space has been 4174 // reserved by thread stacks. 4175 assert(actual_reserve_size != 0, "Must have a stack"); 4176 4177 // Calculate the thread limit when we should start doing Virtual Memory 4178 // banging. Currently when the threads will have used all but 200Mb of space. 4179 // 4180 // TODO: consider performing a similar calculation for commit size instead 4181 // as reserve size, since on a 64-bit platform we'll run into that more 4182 // often than running out of virtual memory space. We can use the 4183 // lower value of the two calculations as the os_thread_limit. 4184 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4185 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4186 4187 // at exit methods are called in the reverse order of their registration. 4188 // there is no limit to the number of functions registered. atexit does 4189 // not set errno. 4190 4191 if (PerfAllowAtExitRegistration) { 4192 // only register atexit functions if PerfAllowAtExitRegistration is set. 4193 // atexit functions can be delayed until process exit time, which 4194 // can be problematic for embedded VM situations. Embedded VMs should 4195 // call DestroyJavaVM() to assure that VM resources are released. 4196 4197 // note: perfMemory_exit_helper atexit function may be removed in 4198 // the future if the appropriate cleanup code can be added to the 4199 // VM_Exit VMOperation's doit method. 4200 if (atexit(perfMemory_exit_helper) != 0) { 4201 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4202 } 4203 } 4204 4205 #ifndef _WIN64 4206 // Print something if NX is enabled (win32 on AMD64) 4207 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4208 #endif 4209 4210 // initialize thread priority policy 4211 prio_init(); 4212 4213 if (UseNUMA && !ForceNUMA) { 4214 UseNUMA = false; // We don't fully support this yet 4215 } 4216 4217 if (UseNUMAInterleaving) { 4218 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4219 bool success = numa_interleaving_init(); 4220 if (!success) UseNUMAInterleaving = false; 4221 } 4222 4223 if (initSock() != JNI_OK) { 4224 return JNI_ERR; 4225 } 4226 4227 return JNI_OK; 4228 } 4229 4230 // Mark the polling page as unreadable 4231 void os::make_polling_page_unreadable(void) { 4232 DWORD old_status; 4233 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4234 PAGE_NOACCESS, &old_status)) { 4235 fatal("Could not disable polling page"); 4236 } 4237 } 4238 4239 // Mark the polling page as readable 4240 void os::make_polling_page_readable(void) { 4241 DWORD old_status; 4242 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4243 PAGE_READONLY, &old_status)) { 4244 fatal("Could not enable polling page"); 4245 } 4246 } 4247 4248 4249 int os::stat(const char *path, struct stat *sbuf) { 4250 char pathbuf[MAX_PATH]; 4251 if (strlen(path) > MAX_PATH - 1) { 4252 errno = ENAMETOOLONG; 4253 return -1; 4254 } 4255 os::native_path(strcpy(pathbuf, path)); 4256 int ret = ::stat(pathbuf, sbuf); 4257 if (sbuf != NULL && UseUTCFileTimestamp) { 4258 // Fix for 6539723. st_mtime returned from stat() is dependent on 4259 // the system timezone and so can return different values for the 4260 // same file if/when daylight savings time changes. This adjustment 4261 // makes sure the same timestamp is returned regardless of the TZ. 4262 // 4263 // See: 4264 // http://msdn.microsoft.com/library/ 4265 // default.asp?url=/library/en-us/sysinfo/base/ 4266 // time_zone_information_str.asp 4267 // and 4268 // http://msdn.microsoft.com/library/default.asp?url= 4269 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4270 // 4271 // NOTE: there is a insidious bug here: If the timezone is changed 4272 // after the call to stat() but before 'GetTimeZoneInformation()', then 4273 // the adjustment we do here will be wrong and we'll return the wrong 4274 // value (which will likely end up creating an invalid class data 4275 // archive). Absent a better API for this, or some time zone locking 4276 // mechanism, we'll have to live with this risk. 4277 TIME_ZONE_INFORMATION tz; 4278 DWORD tzid = GetTimeZoneInformation(&tz); 4279 int daylightBias = 4280 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4281 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4282 } 4283 return ret; 4284 } 4285 4286 4287 #define FT2INT64(ft) \ 4288 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4289 4290 4291 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4292 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4293 // of a thread. 4294 // 4295 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4296 // the fast estimate available on the platform. 4297 4298 // current_thread_cpu_time() is not optimized for Windows yet 4299 jlong os::current_thread_cpu_time() { 4300 // return user + sys since the cost is the same 4301 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4302 } 4303 4304 jlong os::thread_cpu_time(Thread* thread) { 4305 // consistent with what current_thread_cpu_time() returns. 4306 return os::thread_cpu_time(thread, true /* user+sys */); 4307 } 4308 4309 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4310 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4311 } 4312 4313 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4314 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4315 // If this function changes, os::is_thread_cpu_time_supported() should too 4316 FILETIME CreationTime; 4317 FILETIME ExitTime; 4318 FILETIME KernelTime; 4319 FILETIME UserTime; 4320 4321 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4322 &ExitTime, &KernelTime, &UserTime) == 0) { 4323 return -1; 4324 } else if (user_sys_cpu_time) { 4325 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4326 } else { 4327 return FT2INT64(UserTime) * 100; 4328 } 4329 } 4330 4331 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4332 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4333 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4334 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4335 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4336 } 4337 4338 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4339 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4340 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4341 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4342 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4343 } 4344 4345 bool os::is_thread_cpu_time_supported() { 4346 // see os::thread_cpu_time 4347 FILETIME CreationTime; 4348 FILETIME ExitTime; 4349 FILETIME KernelTime; 4350 FILETIME UserTime; 4351 4352 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4353 &KernelTime, &UserTime) == 0) { 4354 return false; 4355 } else { 4356 return true; 4357 } 4358 } 4359 4360 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4361 // It does have primitives (PDH API) to get CPU usage and run queue length. 4362 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4363 // If we wanted to implement loadavg on Windows, we have a few options: 4364 // 4365 // a) Query CPU usage and run queue length and "fake" an answer by 4366 // returning the CPU usage if it's under 100%, and the run queue 4367 // length otherwise. It turns out that querying is pretty slow 4368 // on Windows, on the order of 200 microseconds on a fast machine. 4369 // Note that on the Windows the CPU usage value is the % usage 4370 // since the last time the API was called (and the first call 4371 // returns 100%), so we'd have to deal with that as well. 4372 // 4373 // b) Sample the "fake" answer using a sampling thread and store 4374 // the answer in a global variable. The call to loadavg would 4375 // just return the value of the global, avoiding the slow query. 4376 // 4377 // c) Sample a better answer using exponential decay to smooth the 4378 // value. This is basically the algorithm used by UNIX kernels. 4379 // 4380 // Note that sampling thread starvation could affect both (b) and (c). 4381 int os::loadavg(double loadavg[], int nelem) { 4382 return -1; 4383 } 4384 4385 4386 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4387 bool os::dont_yield() { 4388 return DontYieldALot; 4389 } 4390 4391 // This method is a slightly reworked copy of JDK's sysOpen 4392 // from src/windows/hpi/src/sys_api_md.c 4393 4394 int os::open(const char *path, int oflag, int mode) { 4395 char pathbuf[MAX_PATH]; 4396 4397 if (strlen(path) > MAX_PATH - 1) { 4398 errno = ENAMETOOLONG; 4399 return -1; 4400 } 4401 os::native_path(strcpy(pathbuf, path)); 4402 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4403 } 4404 4405 FILE* os::open(int fd, const char* mode) { 4406 return ::_fdopen(fd, mode); 4407 } 4408 4409 // Is a (classpath) directory empty? 4410 bool os::dir_is_empty(const char* path) { 4411 WIN32_FIND_DATA fd; 4412 HANDLE f = FindFirstFile(path, &fd); 4413 if (f == INVALID_HANDLE_VALUE) { 4414 return true; 4415 } 4416 FindClose(f); 4417 return false; 4418 } 4419 4420 // create binary file, rewriting existing file if required 4421 int os::create_binary_file(const char* path, bool rewrite_existing) { 4422 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4423 if (!rewrite_existing) { 4424 oflags |= _O_EXCL; 4425 } 4426 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4427 } 4428 4429 // return current position of file pointer 4430 jlong os::current_file_offset(int fd) { 4431 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4432 } 4433 4434 // move file pointer to the specified offset 4435 jlong os::seek_to_file_offset(int fd, jlong offset) { 4436 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4437 } 4438 4439 4440 jlong os::lseek(int fd, jlong offset, int whence) { 4441 return (jlong) ::_lseeki64(fd, offset, whence); 4442 } 4443 4444 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4445 OVERLAPPED ov; 4446 DWORD nread; 4447 BOOL result; 4448 4449 ZeroMemory(&ov, sizeof(ov)); 4450 ov.Offset = (DWORD)offset; 4451 ov.OffsetHigh = (DWORD)(offset >> 32); 4452 4453 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4454 4455 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4456 4457 return result ? nread : 0; 4458 } 4459 4460 4461 // This method is a slightly reworked copy of JDK's sysNativePath 4462 // from src/windows/hpi/src/path_md.c 4463 4464 // Convert a pathname to native format. On win32, this involves forcing all 4465 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4466 // sometimes rejects '/') and removing redundant separators. The input path is 4467 // assumed to have been converted into the character encoding used by the local 4468 // system. Because this might be a double-byte encoding, care is taken to 4469 // treat double-byte lead characters correctly. 4470 // 4471 // This procedure modifies the given path in place, as the result is never 4472 // longer than the original. There is no error return; this operation always 4473 // succeeds. 4474 char * os::native_path(char *path) { 4475 char *src = path, *dst = path, *end = path; 4476 char *colon = NULL; // If a drive specifier is found, this will 4477 // point to the colon following the drive letter 4478 4479 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4480 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4481 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4482 4483 // Check for leading separators 4484 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4485 while (isfilesep(*src)) { 4486 src++; 4487 } 4488 4489 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4490 // Remove leading separators if followed by drive specifier. This 4491 // hack is necessary to support file URLs containing drive 4492 // specifiers (e.g., "file://c:/path"). As a side effect, 4493 // "/c:/path" can be used as an alternative to "c:/path". 4494 *dst++ = *src++; 4495 colon = dst; 4496 *dst++ = ':'; 4497 src++; 4498 } else { 4499 src = path; 4500 if (isfilesep(src[0]) && isfilesep(src[1])) { 4501 // UNC pathname: Retain first separator; leave src pointed at 4502 // second separator so that further separators will be collapsed 4503 // into the second separator. The result will be a pathname 4504 // beginning with "\\\\" followed (most likely) by a host name. 4505 src = dst = path + 1; 4506 path[0] = '\\'; // Force first separator to '\\' 4507 } 4508 } 4509 4510 end = dst; 4511 4512 // Remove redundant separators from remainder of path, forcing all 4513 // separators to be '\\' rather than '/'. Also, single byte space 4514 // characters are removed from the end of the path because those 4515 // are not legal ending characters on this operating system. 4516 // 4517 while (*src != '\0') { 4518 if (isfilesep(*src)) { 4519 *dst++ = '\\'; src++; 4520 while (isfilesep(*src)) src++; 4521 if (*src == '\0') { 4522 // Check for trailing separator 4523 end = dst; 4524 if (colon == dst - 2) break; // "z:\\" 4525 if (dst == path + 1) break; // "\\" 4526 if (dst == path + 2 && isfilesep(path[0])) { 4527 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4528 // beginning of a UNC pathname. Even though it is not, by 4529 // itself, a valid UNC pathname, we leave it as is in order 4530 // to be consistent with the path canonicalizer as well 4531 // as the win32 APIs, which treat this case as an invalid 4532 // UNC pathname rather than as an alias for the root 4533 // directory of the current drive. 4534 break; 4535 } 4536 end = --dst; // Path does not denote a root directory, so 4537 // remove trailing separator 4538 break; 4539 } 4540 end = dst; 4541 } else { 4542 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4543 *dst++ = *src++; 4544 if (*src) *dst++ = *src++; 4545 end = dst; 4546 } else { // Copy a single-byte character 4547 char c = *src++; 4548 *dst++ = c; 4549 // Space is not a legal ending character 4550 if (c != ' ') end = dst; 4551 } 4552 } 4553 } 4554 4555 *end = '\0'; 4556 4557 // For "z:", add "." to work around a bug in the C runtime library 4558 if (colon == dst - 1) { 4559 path[2] = '.'; 4560 path[3] = '\0'; 4561 } 4562 4563 return path; 4564 } 4565 4566 // This code is a copy of JDK's sysSetLength 4567 // from src/windows/hpi/src/sys_api_md.c 4568 4569 int os::ftruncate(int fd, jlong length) { 4570 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4571 long high = (long)(length >> 32); 4572 DWORD ret; 4573 4574 if (h == (HANDLE)(-1)) { 4575 return -1; 4576 } 4577 4578 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4579 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4580 return -1; 4581 } 4582 4583 if (::SetEndOfFile(h) == FALSE) { 4584 return -1; 4585 } 4586 4587 return 0; 4588 } 4589 4590 4591 // This code is a copy of JDK's sysSync 4592 // from src/windows/hpi/src/sys_api_md.c 4593 // except for the legacy workaround for a bug in Win 98 4594 4595 int os::fsync(int fd) { 4596 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4597 4598 if ((!::FlushFileBuffers(handle)) && 4599 (GetLastError() != ERROR_ACCESS_DENIED)) { 4600 // from winerror.h 4601 return -1; 4602 } 4603 return 0; 4604 } 4605 4606 static int nonSeekAvailable(int, long *); 4607 static int stdinAvailable(int, long *); 4608 4609 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4610 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4611 4612 // This code is a copy of JDK's sysAvailable 4613 // from src/windows/hpi/src/sys_api_md.c 4614 4615 int os::available(int fd, jlong *bytes) { 4616 jlong cur, end; 4617 struct _stati64 stbuf64; 4618 4619 if (::_fstati64(fd, &stbuf64) >= 0) { 4620 int mode = stbuf64.st_mode; 4621 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4622 int ret; 4623 long lpbytes; 4624 if (fd == 0) { 4625 ret = stdinAvailable(fd, &lpbytes); 4626 } else { 4627 ret = nonSeekAvailable(fd, &lpbytes); 4628 } 4629 (*bytes) = (jlong)(lpbytes); 4630 return ret; 4631 } 4632 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4633 return FALSE; 4634 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4635 return FALSE; 4636 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4637 return FALSE; 4638 } 4639 *bytes = end - cur; 4640 return TRUE; 4641 } else { 4642 return FALSE; 4643 } 4644 } 4645 4646 // This code is a copy of JDK's nonSeekAvailable 4647 // from src/windows/hpi/src/sys_api_md.c 4648 4649 static int nonSeekAvailable(int fd, long *pbytes) { 4650 // This is used for available on non-seekable devices 4651 // (like both named and anonymous pipes, such as pipes 4652 // connected to an exec'd process). 4653 // Standard Input is a special case. 4654 HANDLE han; 4655 4656 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4657 return FALSE; 4658 } 4659 4660 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4661 // PeekNamedPipe fails when at EOF. In that case we 4662 // simply make *pbytes = 0 which is consistent with the 4663 // behavior we get on Solaris when an fd is at EOF. 4664 // The only alternative is to raise an Exception, 4665 // which isn't really warranted. 4666 // 4667 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4668 return FALSE; 4669 } 4670 *pbytes = 0; 4671 } 4672 return TRUE; 4673 } 4674 4675 #define MAX_INPUT_EVENTS 2000 4676 4677 // This code is a copy of JDK's stdinAvailable 4678 // from src/windows/hpi/src/sys_api_md.c 4679 4680 static int stdinAvailable(int fd, long *pbytes) { 4681 HANDLE han; 4682 DWORD numEventsRead = 0; // Number of events read from buffer 4683 DWORD numEvents = 0; // Number of events in buffer 4684 DWORD i = 0; // Loop index 4685 DWORD curLength = 0; // Position marker 4686 DWORD actualLength = 0; // Number of bytes readable 4687 BOOL error = FALSE; // Error holder 4688 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4689 4690 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4691 return FALSE; 4692 } 4693 4694 // Construct an array of input records in the console buffer 4695 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4696 if (error == 0) { 4697 return nonSeekAvailable(fd, pbytes); 4698 } 4699 4700 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4701 if (numEvents > MAX_INPUT_EVENTS) { 4702 numEvents = MAX_INPUT_EVENTS; 4703 } 4704 4705 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4706 if (lpBuffer == NULL) { 4707 return FALSE; 4708 } 4709 4710 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4711 if (error == 0) { 4712 os::free(lpBuffer); 4713 return FALSE; 4714 } 4715 4716 // Examine input records for the number of bytes available 4717 for (i=0; i<numEvents; i++) { 4718 if (lpBuffer[i].EventType == KEY_EVENT) { 4719 4720 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4721 &(lpBuffer[i].Event); 4722 if (keyRecord->bKeyDown == TRUE) { 4723 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4724 curLength++; 4725 if (*keyPressed == '\r') { 4726 actualLength = curLength; 4727 } 4728 } 4729 } 4730 } 4731 4732 if (lpBuffer != NULL) { 4733 os::free(lpBuffer); 4734 } 4735 4736 *pbytes = (long) actualLength; 4737 return TRUE; 4738 } 4739 4740 // Map a block of memory. 4741 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4742 char *addr, size_t bytes, bool read_only, 4743 bool allow_exec) { 4744 HANDLE hFile; 4745 char* base; 4746 4747 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4748 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4749 if (hFile == NULL) { 4750 if (PrintMiscellaneous && Verbose) { 4751 DWORD err = GetLastError(); 4752 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4753 } 4754 return NULL; 4755 } 4756 4757 if (allow_exec) { 4758 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4759 // unless it comes from a PE image (which the shared archive is not.) 4760 // Even VirtualProtect refuses to give execute access to mapped memory 4761 // that was not previously executable. 4762 // 4763 // Instead, stick the executable region in anonymous memory. Yuck. 4764 // Penalty is that ~4 pages will not be shareable - in the future 4765 // we might consider DLLizing the shared archive with a proper PE 4766 // header so that mapping executable + sharing is possible. 4767 4768 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4769 PAGE_READWRITE); 4770 if (base == NULL) { 4771 if (PrintMiscellaneous && Verbose) { 4772 DWORD err = GetLastError(); 4773 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4774 } 4775 CloseHandle(hFile); 4776 return NULL; 4777 } 4778 4779 DWORD bytes_read; 4780 OVERLAPPED overlapped; 4781 overlapped.Offset = (DWORD)file_offset; 4782 overlapped.OffsetHigh = 0; 4783 overlapped.hEvent = NULL; 4784 // ReadFile guarantees that if the return value is true, the requested 4785 // number of bytes were read before returning. 4786 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4787 if (!res) { 4788 if (PrintMiscellaneous && Verbose) { 4789 DWORD err = GetLastError(); 4790 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4791 } 4792 release_memory(base, bytes); 4793 CloseHandle(hFile); 4794 return NULL; 4795 } 4796 } else { 4797 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4798 NULL /* file_name */); 4799 if (hMap == NULL) { 4800 if (PrintMiscellaneous && Verbose) { 4801 DWORD err = GetLastError(); 4802 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4803 } 4804 CloseHandle(hFile); 4805 return NULL; 4806 } 4807 4808 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4809 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4810 (DWORD)bytes, addr); 4811 if (base == NULL) { 4812 if (PrintMiscellaneous && Verbose) { 4813 DWORD err = GetLastError(); 4814 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4815 } 4816 CloseHandle(hMap); 4817 CloseHandle(hFile); 4818 return NULL; 4819 } 4820 4821 if (CloseHandle(hMap) == 0) { 4822 if (PrintMiscellaneous && Verbose) { 4823 DWORD err = GetLastError(); 4824 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4825 } 4826 CloseHandle(hFile); 4827 return base; 4828 } 4829 } 4830 4831 if (allow_exec) { 4832 DWORD old_protect; 4833 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4834 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4835 4836 if (!res) { 4837 if (PrintMiscellaneous && Verbose) { 4838 DWORD err = GetLastError(); 4839 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4840 } 4841 // Don't consider this a hard error, on IA32 even if the 4842 // VirtualProtect fails, we should still be able to execute 4843 CloseHandle(hFile); 4844 return base; 4845 } 4846 } 4847 4848 if (CloseHandle(hFile) == 0) { 4849 if (PrintMiscellaneous && Verbose) { 4850 DWORD err = GetLastError(); 4851 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4852 } 4853 return base; 4854 } 4855 4856 return base; 4857 } 4858 4859 4860 // Remap a block of memory. 4861 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4862 char *addr, size_t bytes, bool read_only, 4863 bool allow_exec) { 4864 // This OS does not allow existing memory maps to be remapped so we 4865 // have to unmap the memory before we remap it. 4866 if (!os::unmap_memory(addr, bytes)) { 4867 return NULL; 4868 } 4869 4870 // There is a very small theoretical window between the unmap_memory() 4871 // call above and the map_memory() call below where a thread in native 4872 // code may be able to access an address that is no longer mapped. 4873 4874 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4875 read_only, allow_exec); 4876 } 4877 4878 4879 // Unmap a block of memory. 4880 // Returns true=success, otherwise false. 4881 4882 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4883 MEMORY_BASIC_INFORMATION mem_info; 4884 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4885 if (PrintMiscellaneous && Verbose) { 4886 DWORD err = GetLastError(); 4887 tty->print_cr("VirtualQuery() failed: GetLastError->%ld.", err); 4888 } 4889 return false; 4890 } 4891 4892 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4893 // Instead, executable region was allocated using VirtualAlloc(). See 4894 // pd_map_memory() above. 4895 // 4896 // The following flags should match the 'exec_access' flages used for 4897 // VirtualProtect() in pd_map_memory(). 4898 if (mem_info.Protect == PAGE_EXECUTE_READ || 4899 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4900 return pd_release_memory(addr, bytes); 4901 } 4902 4903 BOOL result = UnmapViewOfFile(addr); 4904 if (result == 0) { 4905 if (PrintMiscellaneous && Verbose) { 4906 DWORD err = GetLastError(); 4907 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4908 } 4909 return false; 4910 } 4911 return true; 4912 } 4913 4914 void os::pause() { 4915 char filename[MAX_PATH]; 4916 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4917 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4918 } else { 4919 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4920 } 4921 4922 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4923 if (fd != -1) { 4924 struct stat buf; 4925 ::close(fd); 4926 while (::stat(filename, &buf) == 0) { 4927 Sleep(100); 4928 } 4929 } else { 4930 jio_fprintf(stderr, 4931 "Could not open pause file '%s', continuing immediately.\n", filename); 4932 } 4933 } 4934 4935 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4936 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4937 } 4938 4939 // See the caveats for this class in os_windows.hpp 4940 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4941 // into this method and returns false. If no OS EXCEPTION was raised, returns 4942 // true. 4943 // The callback is supposed to provide the method that should be protected. 4944 // 4945 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4946 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4947 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4948 "crash_protection already set?"); 4949 4950 bool success = true; 4951 __try { 4952 WatcherThread::watcher_thread()->set_crash_protection(this); 4953 cb.call(); 4954 } __except(EXCEPTION_EXECUTE_HANDLER) { 4955 // only for protection, nothing to do 4956 success = false; 4957 } 4958 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4959 return success; 4960 } 4961 4962 // An Event wraps a win32 "CreateEvent" kernel handle. 4963 // 4964 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4965 // 4966 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4967 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4968 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4969 // In addition, an unpark() operation might fetch the handle field, but the 4970 // event could recycle between the fetch and the SetEvent() operation. 4971 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4972 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4973 // on an stale but recycled handle would be harmless, but in practice this might 4974 // confuse other non-Sun code, so it's not a viable approach. 4975 // 4976 // 2: Once a win32 event handle is associated with an Event, it remains associated 4977 // with the Event. The event handle is never closed. This could be construed 4978 // as handle leakage, but only up to the maximum # of threads that have been extant 4979 // at any one time. This shouldn't be an issue, as windows platforms typically 4980 // permit a process to have hundreds of thousands of open handles. 4981 // 4982 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4983 // and release unused handles. 4984 // 4985 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4986 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4987 // 4988 // 5. Use an RCU-like mechanism (Read-Copy Update). 4989 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4990 // 4991 // We use (2). 4992 // 4993 // TODO-FIXME: 4994 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4995 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4996 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4997 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4998 // into a single win32 CreateEvent() handle. 4999 // 5000 // Assumption: 5001 // Only one parker can exist on an event, which is why we allocate 5002 // them per-thread. Multiple unparkers can coexist. 5003 // 5004 // _Event transitions in park() 5005 // -1 => -1 : illegal 5006 // 1 => 0 : pass - return immediately 5007 // 0 => -1 : block; then set _Event to 0 before returning 5008 // 5009 // _Event transitions in unpark() 5010 // 0 => 1 : just return 5011 // 1 => 1 : just return 5012 // -1 => either 0 or 1; must signal target thread 5013 // That is, we can safely transition _Event from -1 to either 5014 // 0 or 1. 5015 // 5016 // _Event serves as a restricted-range semaphore. 5017 // -1 : thread is blocked, i.e. there is a waiter 5018 // 0 : neutral: thread is running or ready, 5019 // could have been signaled after a wait started 5020 // 1 : signaled - thread is running or ready 5021 // 5022 // Another possible encoding of _Event would be with 5023 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5024 // 5025 5026 int os::PlatformEvent::park(jlong Millis) { 5027 // Transitions for _Event: 5028 // -1 => -1 : illegal 5029 // 1 => 0 : pass - return immediately 5030 // 0 => -1 : block; then set _Event to 0 before returning 5031 5032 guarantee(_ParkHandle != NULL , "Invariant"); 5033 guarantee(Millis > 0 , "Invariant"); 5034 5035 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 5036 // the initial park() operation. 5037 // Consider: use atomic decrement instead of CAS-loop 5038 5039 int v; 5040 for (;;) { 5041 v = _Event; 5042 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5043 } 5044 guarantee((v == 0) || (v == 1), "invariant"); 5045 if (v != 0) return OS_OK; 5046 5047 // Do this the hard way by blocking ... 5048 // TODO: consider a brief spin here, gated on the success of recent 5049 // spin attempts by this thread. 5050 // 5051 // We decompose long timeouts into series of shorter timed waits. 5052 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5053 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5054 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5055 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5056 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5057 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5058 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5059 // for the already waited time. This policy does not admit any new outcomes. 5060 // In the future, however, we might want to track the accumulated wait time and 5061 // adjust Millis accordingly if we encounter a spurious wakeup. 5062 5063 const int MAXTIMEOUT = 0x10000000; 5064 DWORD rv = WAIT_TIMEOUT; 5065 while (_Event < 0 && Millis > 0) { 5066 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5067 if (Millis > MAXTIMEOUT) { 5068 prd = MAXTIMEOUT; 5069 } 5070 rv = ::WaitForSingleObject(_ParkHandle, prd); 5071 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5072 if (rv == WAIT_TIMEOUT) { 5073 Millis -= prd; 5074 } 5075 } 5076 v = _Event; 5077 _Event = 0; 5078 // see comment at end of os::PlatformEvent::park() below: 5079 OrderAccess::fence(); 5080 // If we encounter a nearly simultanous timeout expiry and unpark() 5081 // we return OS_OK indicating we awoke via unpark(). 5082 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5083 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5084 } 5085 5086 void os::PlatformEvent::park() { 5087 // Transitions for _Event: 5088 // -1 => -1 : illegal 5089 // 1 => 0 : pass - return immediately 5090 // 0 => -1 : block; then set _Event to 0 before returning 5091 5092 guarantee(_ParkHandle != NULL, "Invariant"); 5093 // Invariant: Only the thread associated with the Event/PlatformEvent 5094 // may call park(). 5095 // Consider: use atomic decrement instead of CAS-loop 5096 int v; 5097 for (;;) { 5098 v = _Event; 5099 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5100 } 5101 guarantee((v == 0) || (v == 1), "invariant"); 5102 if (v != 0) return; 5103 5104 // Do this the hard way by blocking ... 5105 // TODO: consider a brief spin here, gated on the success of recent 5106 // spin attempts by this thread. 5107 while (_Event < 0) { 5108 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5109 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5110 } 5111 5112 // Usually we'll find _Event == 0 at this point, but as 5113 // an optional optimization we clear it, just in case can 5114 // multiple unpark() operations drove _Event up to 1. 5115 _Event = 0; 5116 OrderAccess::fence(); 5117 guarantee(_Event >= 0, "invariant"); 5118 } 5119 5120 void os::PlatformEvent::unpark() { 5121 guarantee(_ParkHandle != NULL, "Invariant"); 5122 5123 // Transitions for _Event: 5124 // 0 => 1 : just return 5125 // 1 => 1 : just return 5126 // -1 => either 0 or 1; must signal target thread 5127 // That is, we can safely transition _Event from -1 to either 5128 // 0 or 1. 5129 // See also: "Semaphores in Plan 9" by Mullender & Cox 5130 // 5131 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5132 // that it will take two back-to-back park() calls for the owning 5133 // thread to block. This has the benefit of forcing a spurious return 5134 // from the first park() call after an unpark() call which will help 5135 // shake out uses of park() and unpark() without condition variables. 5136 5137 if (Atomic::xchg(1, &_Event) >= 0) return; 5138 5139 ::SetEvent(_ParkHandle); 5140 } 5141 5142 5143 // JSR166 5144 // ------------------------------------------------------- 5145 5146 // The Windows implementation of Park is very straightforward: Basic 5147 // operations on Win32 Events turn out to have the right semantics to 5148 // use them directly. We opportunistically resuse the event inherited 5149 // from Monitor. 5150 5151 void Parker::park(bool isAbsolute, jlong time) { 5152 guarantee(_ParkEvent != NULL, "invariant"); 5153 // First, demultiplex/decode time arguments 5154 if (time < 0) { // don't wait 5155 return; 5156 } else if (time == 0 && !isAbsolute) { 5157 time = INFINITE; 5158 } else if (isAbsolute) { 5159 time -= os::javaTimeMillis(); // convert to relative time 5160 if (time <= 0) { // already elapsed 5161 return; 5162 } 5163 } else { // relative 5164 time /= 1000000; // Must coarsen from nanos to millis 5165 if (time == 0) { // Wait for the minimal time unit if zero 5166 time = 1; 5167 } 5168 } 5169 5170 JavaThread* thread = JavaThread::current(); 5171 5172 // Don't wait if interrupted or already triggered 5173 if (Thread::is_interrupted(thread, false) || 5174 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5175 ResetEvent(_ParkEvent); 5176 return; 5177 } else { 5178 ThreadBlockInVM tbivm(thread); 5179 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5180 thread->set_suspend_equivalent(); 5181 5182 WaitForSingleObject(_ParkEvent, time); 5183 ResetEvent(_ParkEvent); 5184 5185 // If externally suspended while waiting, re-suspend 5186 if (thread->handle_special_suspend_equivalent_condition()) { 5187 thread->java_suspend_self(); 5188 } 5189 } 5190 } 5191 5192 void Parker::unpark() { 5193 guarantee(_ParkEvent != NULL, "invariant"); 5194 SetEvent(_ParkEvent); 5195 } 5196 5197 // Run the specified command in a separate process. Return its exit value, 5198 // or -1 on failure (e.g. can't create a new process). 5199 int os::fork_and_exec(char* cmd) { 5200 STARTUPINFO si; 5201 PROCESS_INFORMATION pi; 5202 5203 memset(&si, 0, sizeof(si)); 5204 si.cb = sizeof(si); 5205 memset(&pi, 0, sizeof(pi)); 5206 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5207 cmd, // command line 5208 NULL, // process security attribute 5209 NULL, // thread security attribute 5210 TRUE, // inherits system handles 5211 0, // no creation flags 5212 NULL, // use parent's environment block 5213 NULL, // use parent's starting directory 5214 &si, // (in) startup information 5215 &pi); // (out) process information 5216 5217 if (rslt) { 5218 // Wait until child process exits. 5219 WaitForSingleObject(pi.hProcess, INFINITE); 5220 5221 DWORD exit_code; 5222 GetExitCodeProcess(pi.hProcess, &exit_code); 5223 5224 // Close process and thread handles. 5225 CloseHandle(pi.hProcess); 5226 CloseHandle(pi.hThread); 5227 5228 return (int)exit_code; 5229 } else { 5230 return -1; 5231 } 5232 } 5233 5234 //-------------------------------------------------------------------------------------------------- 5235 // Non-product code 5236 5237 static int mallocDebugIntervalCounter = 0; 5238 static int mallocDebugCounter = 0; 5239 bool os::check_heap(bool force) { 5240 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5241 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5242 // Note: HeapValidate executes two hardware breakpoints when it finds something 5243 // wrong; at these points, eax contains the address of the offending block (I think). 5244 // To get to the exlicit error message(s) below, just continue twice. 5245 // 5246 // Note: we want to check the CRT heap, which is not necessarily located in the 5247 // process default heap. 5248 HANDLE heap = (HANDLE) _get_heap_handle(); 5249 if (!heap) { 5250 return true; 5251 } 5252 5253 // If we fail to lock the heap, then gflags.exe has been used 5254 // or some other special heap flag has been set that prevents 5255 // locking. We don't try to walk a heap we can't lock. 5256 if (HeapLock(heap) != 0) { 5257 PROCESS_HEAP_ENTRY phe; 5258 phe.lpData = NULL; 5259 while (HeapWalk(heap, &phe) != 0) { 5260 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5261 !HeapValidate(heap, 0, phe.lpData)) { 5262 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5263 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5264 HeapUnlock(heap); 5265 fatal("corrupted C heap"); 5266 } 5267 } 5268 DWORD err = GetLastError(); 5269 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5270 HeapUnlock(heap); 5271 fatal("heap walk aborted with error %d", err); 5272 } 5273 HeapUnlock(heap); 5274 } 5275 mallocDebugIntervalCounter = 0; 5276 } 5277 return true; 5278 } 5279 5280 5281 bool os::find(address addr, outputStream* st) { 5282 int offset = -1; 5283 bool result = false; 5284 char buf[256]; 5285 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5286 st->print(PTR_FORMAT " ", addr); 5287 if (strlen(buf) < sizeof(buf) - 1) { 5288 char* p = strrchr(buf, '\\'); 5289 if (p) { 5290 st->print("%s", p + 1); 5291 } else { 5292 st->print("%s", buf); 5293 } 5294 } else { 5295 // The library name is probably truncated. Let's omit the library name. 5296 // See also JDK-8147512. 5297 } 5298 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5299 st->print("::%s + 0x%x", buf, offset); 5300 } 5301 st->cr(); 5302 result = true; 5303 } 5304 return result; 5305 } 5306 5307 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5308 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5309 5310 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5311 JavaThread* thread = JavaThread::current(); 5312 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5313 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5314 5315 if (os::is_memory_serialize_page(thread, addr)) { 5316 return EXCEPTION_CONTINUE_EXECUTION; 5317 } 5318 } 5319 5320 return EXCEPTION_CONTINUE_SEARCH; 5321 } 5322 5323 // We don't build a headless jre for Windows 5324 bool os::is_headless_jre() { return false; } 5325 5326 static jint initSock() { 5327 WSADATA wsadata; 5328 5329 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5330 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5331 ::GetLastError()); 5332 return JNI_ERR; 5333 } 5334 return JNI_OK; 5335 } 5336 5337 struct hostent* os::get_host_by_name(char* name) { 5338 return (struct hostent*)gethostbyname(name); 5339 } 5340 5341 int os::socket_close(int fd) { 5342 return ::closesocket(fd); 5343 } 5344 5345 int os::socket(int domain, int type, int protocol) { 5346 return ::socket(domain, type, protocol); 5347 } 5348 5349 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5350 return ::connect(fd, him, len); 5351 } 5352 5353 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5354 return ::recv(fd, buf, (int)nBytes, flags); 5355 } 5356 5357 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5358 return ::send(fd, buf, (int)nBytes, flags); 5359 } 5360 5361 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5362 return ::send(fd, buf, (int)nBytes, flags); 5363 } 5364 5365 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5366 #if defined(IA32) 5367 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5368 #elif defined (AMD64) 5369 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5370 #endif 5371 5372 // returns true if thread could be suspended, 5373 // false otherwise 5374 static bool do_suspend(HANDLE* h) { 5375 if (h != NULL) { 5376 if (SuspendThread(*h) != ~0) { 5377 return true; 5378 } 5379 } 5380 return false; 5381 } 5382 5383 // resume the thread 5384 // calling resume on an active thread is a no-op 5385 static void do_resume(HANDLE* h) { 5386 if (h != NULL) { 5387 ResumeThread(*h); 5388 } 5389 } 5390 5391 // retrieve a suspend/resume context capable handle 5392 // from the tid. Caller validates handle return value. 5393 void get_thread_handle_for_extended_context(HANDLE* h, 5394 OSThread::thread_id_t tid) { 5395 if (h != NULL) { 5396 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5397 } 5398 } 5399 5400 // Thread sampling implementation 5401 // 5402 void os::SuspendedThreadTask::internal_do_task() { 5403 CONTEXT ctxt; 5404 HANDLE h = NULL; 5405 5406 // get context capable handle for thread 5407 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5408 5409 // sanity 5410 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5411 return; 5412 } 5413 5414 // suspend the thread 5415 if (do_suspend(&h)) { 5416 ctxt.ContextFlags = sampling_context_flags; 5417 // get thread context 5418 GetThreadContext(h, &ctxt); 5419 SuspendedThreadTaskContext context(_thread, &ctxt); 5420 // pass context to Thread Sampling impl 5421 do_task(context); 5422 // resume thread 5423 do_resume(&h); 5424 } 5425 5426 // close handle 5427 CloseHandle(h); 5428 } 5429 5430 bool os::start_debugging(char *buf, int buflen) { 5431 int len = (int)strlen(buf); 5432 char *p = &buf[len]; 5433 5434 jio_snprintf(p, buflen-len, 5435 "\n\n" 5436 "Do you want to debug the problem?\n\n" 5437 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5438 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5439 "Otherwise, select 'No' to abort...", 5440 os::current_process_id(), os::current_thread_id()); 5441 5442 bool yes = os::message_box("Unexpected Error", buf); 5443 5444 if (yes) { 5445 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5446 // exception. If VM is running inside a debugger, the debugger will 5447 // catch the exception. Otherwise, the breakpoint exception will reach 5448 // the default windows exception handler, which can spawn a debugger and 5449 // automatically attach to the dying VM. 5450 os::breakpoint(); 5451 yes = false; 5452 } 5453 return yes; 5454 } 5455 5456 void* os::get_default_process_handle() { 5457 return (void*)GetModuleHandle(NULL); 5458 } 5459 5460 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5461 // which is used to find statically linked in agents. 5462 // Additionally for windows, takes into account __stdcall names. 5463 // Parameters: 5464 // sym_name: Symbol in library we are looking for 5465 // lib_name: Name of library to look in, NULL for shared libs. 5466 // is_absolute_path == true if lib_name is absolute path to agent 5467 // such as "C:/a/b/L.dll" 5468 // == false if only the base name of the library is passed in 5469 // such as "L" 5470 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5471 bool is_absolute_path) { 5472 char *agent_entry_name; 5473 size_t len; 5474 size_t name_len; 5475 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5476 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5477 const char *start; 5478 5479 if (lib_name != NULL) { 5480 len = name_len = strlen(lib_name); 5481 if (is_absolute_path) { 5482 // Need to strip path, prefix and suffix 5483 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5484 lib_name = ++start; 5485 } else { 5486 // Need to check for drive prefix 5487 if ((start = strchr(lib_name, ':')) != NULL) { 5488 lib_name = ++start; 5489 } 5490 } 5491 if (len <= (prefix_len + suffix_len)) { 5492 return NULL; 5493 } 5494 lib_name += prefix_len; 5495 name_len = strlen(lib_name) - suffix_len; 5496 } 5497 } 5498 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5499 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5500 if (agent_entry_name == NULL) { 5501 return NULL; 5502 } 5503 if (lib_name != NULL) { 5504 const char *p = strrchr(sym_name, '@'); 5505 if (p != NULL && p != sym_name) { 5506 // sym_name == _Agent_OnLoad@XX 5507 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5508 agent_entry_name[(p-sym_name)] = '\0'; 5509 // agent_entry_name == _Agent_OnLoad 5510 strcat(agent_entry_name, "_"); 5511 strncat(agent_entry_name, lib_name, name_len); 5512 strcat(agent_entry_name, p); 5513 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5514 } else { 5515 strcpy(agent_entry_name, sym_name); 5516 strcat(agent_entry_name, "_"); 5517 strncat(agent_entry_name, lib_name, name_len); 5518 } 5519 } else { 5520 strcpy(agent_entry_name, sym_name); 5521 } 5522 return agent_entry_name; 5523 } 5524 5525 #ifndef PRODUCT 5526 5527 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5528 // contiguous memory block at a particular address. 5529 // The test first tries to find a good approximate address to allocate at by using the same 5530 // method to allocate some memory at any address. The test then tries to allocate memory in 5531 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5532 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5533 // the previously allocated memory is available for allocation. The only actual failure 5534 // that is reported is when the test tries to allocate at a particular location but gets a 5535 // different valid one. A NULL return value at this point is not considered an error but may 5536 // be legitimate. 5537 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5538 void TestReserveMemorySpecial_test() { 5539 if (!UseLargePages) { 5540 if (VerboseInternalVMTests) { 5541 tty->print("Skipping test because large pages are disabled"); 5542 } 5543 return; 5544 } 5545 // save current value of globals 5546 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5547 bool old_use_numa_interleaving = UseNUMAInterleaving; 5548 5549 // set globals to make sure we hit the correct code path 5550 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5551 5552 // do an allocation at an address selected by the OS to get a good one. 5553 const size_t large_allocation_size = os::large_page_size() * 4; 5554 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5555 if (result == NULL) { 5556 if (VerboseInternalVMTests) { 5557 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5558 large_allocation_size); 5559 } 5560 } else { 5561 os::release_memory_special(result, large_allocation_size); 5562 5563 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5564 // we managed to get it once. 5565 const size_t expected_allocation_size = os::large_page_size(); 5566 char* expected_location = result + os::large_page_size(); 5567 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5568 if (actual_location == NULL) { 5569 if (VerboseInternalVMTests) { 5570 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5571 expected_location, large_allocation_size); 5572 } 5573 } else { 5574 // release memory 5575 os::release_memory_special(actual_location, expected_allocation_size); 5576 // only now check, after releasing any memory to avoid any leaks. 5577 assert(actual_location == expected_location, 5578 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5579 expected_location, expected_allocation_size, actual_location); 5580 } 5581 } 5582 5583 // restore globals 5584 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5585 UseNUMAInterleaving = old_use_numa_interleaving; 5586 } 5587 #endif // PRODUCT 5588 5589 /* 5590 All the defined signal names for Windows. 5591 5592 NOTE that not all of these names are accepted by FindSignal! 5593 5594 For various reasons some of these may be rejected at runtime. 5595 5596 Here are the names currently accepted by a user of sun.misc.Signal with 5597 1.4.1 (ignoring potential interaction with use of chaining, etc): 5598 5599 (LIST TBD) 5600 5601 */ 5602 int os::get_signal_number(const char* name) { 5603 static const struct { 5604 char* name; 5605 int number; 5606 } siglabels [] = 5607 // derived from version 6.0 VC98/include/signal.h 5608 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5609 "FPE", SIGFPE, // floating point exception 5610 "SEGV", SIGSEGV, // segment violation 5611 "INT", SIGINT, // interrupt 5612 "TERM", SIGTERM, // software term signal from kill 5613 "BREAK", SIGBREAK, // Ctrl-Break sequence 5614 "ILL", SIGILL}; // illegal instruction 5615 for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++) 5616 if(!strcmp(name, siglabels[i].name)) 5617 return siglabels[i].number; 5618 return -1; 5619 } 5620 5621 // Fast current thread access 5622 5623 int os::win32::_thread_ptr_offset = 0; 5624 5625 static void call_wrapper_dummy() {} 5626 5627 // We need to call the os_exception_wrapper once so that it sets 5628 // up the offset from FS of the thread pointer. 5629 void os::win32::initialize_thread_ptr_offset() { 5630 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5631 NULL, NULL, NULL, NULL); 5632 }