1 /* 2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/decoder.hpp" 71 #include "utilities/defaultStream.hpp" 72 #include "utilities/events.hpp" 73 #include "utilities/growableArray.hpp" 74 #include "utilities/vmError.hpp" 75 76 #ifdef _DEBUG 77 #include <crtdbg.h> 78 #endif 79 80 81 #include <windows.h> 82 #include <sys/types.h> 83 #include <sys/stat.h> 84 #include <sys/timeb.h> 85 #include <objidl.h> 86 #include <shlobj.h> 87 88 #include <malloc.h> 89 #include <signal.h> 90 #include <direct.h> 91 #include <errno.h> 92 #include <fcntl.h> 93 #include <io.h> 94 #include <process.h> // For _beginthreadex(), _endthreadex() 95 #include <imagehlp.h> // For os::dll_address_to_function_name 96 // for enumerating dll libraries 97 #include <vdmdbg.h> 98 99 // for timer info max values which include all bits 100 #define ALL_64_BITS CONST64(-1) 101 102 // For DLL loading/load error detection 103 // Values of PE COFF 104 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 105 #define IMAGE_FILE_SIGNATURE_LENGTH 4 106 107 static HANDLE main_process; 108 static HANDLE main_thread; 109 static int main_thread_id; 110 111 static FILETIME process_creation_time; 112 static FILETIME process_exit_time; 113 static FILETIME process_user_time; 114 static FILETIME process_kernel_time; 115 116 #ifdef _M_IA64 117 #define __CPU__ ia64 118 #else 119 #ifdef _M_AMD64 120 #define __CPU__ amd64 121 #else 122 #define __CPU__ i486 123 #endif 124 #endif 125 126 // save DLL module handle, used by GetModuleFileName 127 128 HINSTANCE vm_lib_handle; 129 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 131 switch (reason) { 132 case DLL_PROCESS_ATTACH: 133 vm_lib_handle = hinst; 134 if (ForceTimeHighResolution) { 135 timeBeginPeriod(1L); 136 } 137 break; 138 case DLL_PROCESS_DETACH: 139 if (ForceTimeHighResolution) { 140 timeEndPeriod(1L); 141 } 142 break; 143 default: 144 break; 145 } 146 return true; 147 } 148 149 static inline double fileTimeAsDouble(FILETIME* time) { 150 const double high = (double) ((unsigned int) ~0); 151 const double split = 10000000.0; 152 double result = (time->dwLowDateTime / split) + 153 time->dwHighDateTime * (high/split); 154 return result; 155 } 156 157 // Implementation of os 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 // previous UnhandledExceptionFilter, if there is one 178 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 179 180 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 181 182 void os::init_system_properties_values() { 183 // sysclasspath, java_home, dll_dir 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH + 1]; 190 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 191 192 if (alt_home_dir != NULL) { 193 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 194 home_dir[MAX_PATH] = '\0'; 195 } else { 196 os::jvm_path(home_dir, sizeof(home_dir)); 197 // Found the full path to jvm.dll. 198 // Now cut the path to <java_home>/jre if we can. 199 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) { 202 *pslash = '\0'; // get rid of \{client|server} 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \bin 206 } 207 } 208 } 209 210 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 211 if (home_path == NULL) { 212 return; 213 } 214 strcpy(home_path, home_dir); 215 Arguments::set_java_home(home_path); 216 FREE_C_HEAP_ARRAY(char, home_path); 217 218 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 219 mtInternal); 220 if (dll_path == NULL) { 221 return; 222 } 223 strcpy(dll_path, home_dir); 224 strcat(dll_path, bin); 225 Arguments::set_dll_dir(dll_path); 226 FREE_C_HEAP_ARRAY(char, dll_path); 227 228 if (!set_boot_path('\\', ';')) { 229 return; 230 } 231 } 232 233 // library_path 234 #define EXT_DIR "\\lib\\ext" 235 #define BIN_DIR "\\bin" 236 #define PACKAGE_DIR "\\Sun\\Java" 237 { 238 // Win32 library search order (See the documentation for LoadLibrary): 239 // 240 // 1. The directory from which application is loaded. 241 // 2. The system wide Java Extensions directory (Java only) 242 // 3. System directory (GetSystemDirectory) 243 // 4. Windows directory (GetWindowsDirectory) 244 // 5. The PATH environment variable 245 // 6. The current directory 246 247 char *library_path; 248 char tmp[MAX_PATH]; 249 char *path_str = ::getenv("PATH"); 250 251 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 252 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 253 254 library_path[0] = '\0'; 255 256 GetModuleFileName(NULL, tmp, sizeof(tmp)); 257 *(strrchr(tmp, '\\')) = '\0'; 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 strcat(library_path, PACKAGE_DIR BIN_DIR); 264 265 GetSystemDirectory(tmp, sizeof(tmp)); 266 strcat(library_path, ";"); 267 strcat(library_path, tmp); 268 269 GetWindowsDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 if (path_str) { 274 strcat(library_path, ";"); 275 strcat(library_path, path_str); 276 } 277 278 strcat(library_path, ";."); 279 280 Arguments::set_library_path(library_path); 281 FREE_C_HEAP_ARRAY(char, library_path); 282 } 283 284 // Default extensions directory 285 { 286 char path[MAX_PATH]; 287 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 288 GetWindowsDirectory(path, MAX_PATH); 289 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 290 path, PACKAGE_DIR, EXT_DIR); 291 Arguments::set_ext_dirs(buf); 292 } 293 #undef EXT_DIR 294 #undef BIN_DIR 295 #undef PACKAGE_DIR 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 316 // So far, this method is only used by Native Memory Tracking, which is 317 // only supported on Windows XP or later. 318 // 319 int os::get_native_stack(address* stack, int frames, int toSkip) { 320 #ifdef _NMT_NOINLINE_ 321 toSkip++; 322 #endif 323 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 324 for (int index = captured; index < frames; index ++) { 325 stack[index] = NULL; 326 } 327 return captured; 328 } 329 330 331 // os::current_stack_base() 332 // 333 // Returns the base of the stack, which is the stack's 334 // starting address. This function must be called 335 // while running on the stack of the thread being queried. 336 337 address os::current_stack_base() { 338 MEMORY_BASIC_INFORMATION minfo; 339 address stack_bottom; 340 size_t stack_size; 341 342 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 343 stack_bottom = (address)minfo.AllocationBase; 344 stack_size = minfo.RegionSize; 345 346 // Add up the sizes of all the regions with the same 347 // AllocationBase. 348 while (1) { 349 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 350 if (stack_bottom == (address)minfo.AllocationBase) { 351 stack_size += minfo.RegionSize; 352 } else { 353 break; 354 } 355 } 356 357 #ifdef _M_IA64 358 // IA64 has memory and register stacks 359 // 360 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 361 // at thread creation (1MB backing store growing upwards, 1MB memory stack 362 // growing downwards, 2MB summed up) 363 // 364 // ... 365 // ------- top of stack (high address) ----- 366 // | 367 // | 1MB 368 // | Backing Store (Register Stack) 369 // | 370 // | / \ 371 // | | 372 // | | 373 // | | 374 // ------------------------ stack base ----- 375 // | 1MB 376 // | Memory Stack 377 // | 378 // | | 379 // | | 380 // | | 381 // | \ / 382 // | 383 // ----- bottom of stack (low address) ----- 384 // ... 385 386 stack_size = stack_size / 2; 387 #endif 388 return stack_bottom + stack_size; 389 } 390 391 size_t os::current_stack_size() { 392 size_t sz; 393 MEMORY_BASIC_INFORMATION minfo; 394 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 395 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 396 return sz; 397 } 398 399 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 400 const struct tm* time_struct_ptr = localtime(clock); 401 if (time_struct_ptr != NULL) { 402 *res = *time_struct_ptr; 403 return res; 404 } 405 return NULL; 406 } 407 408 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 409 410 // Thread start routine for all new Java threads 411 static unsigned __stdcall java_start(Thread* thread) { 412 // Try to randomize the cache line index of hot stack frames. 413 // This helps when threads of the same stack traces evict each other's 414 // cache lines. The threads can be either from the same JVM instance, or 415 // from different JVM instances. The benefit is especially true for 416 // processors with hyperthreading technology. 417 static int counter = 0; 418 int pid = os::current_process_id(); 419 _alloca(((pid ^ counter++) & 7) * 128); 420 421 thread->initialize_thread_current(); 422 423 OSThread* osthr = thread->osthread(); 424 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 425 426 if (UseNUMA) { 427 int lgrp_id = os::numa_get_group_id(); 428 if (lgrp_id != -1) { 429 thread->set_lgrp_id(lgrp_id); 430 } 431 } 432 433 // Diagnostic code to investigate JDK-6573254 434 int res = 30115; // non-java thread 435 if (thread->is_Java_thread()) { 436 res = 20115; // java thread 437 } 438 439 // Install a win32 structured exception handler around every thread created 440 // by VM, so VM can generate error dump when an exception occurred in non- 441 // Java thread (e.g. VM thread). 442 __try { 443 thread->run(); 444 } __except(topLevelExceptionFilter( 445 (_EXCEPTION_POINTERS*)_exception_info())) { 446 // Nothing to do. 447 } 448 449 // One less thread is executing 450 // When the VMThread gets here, the main thread may have already exited 451 // which frees the CodeHeap containing the Atomic::add code 452 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 453 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 454 } 455 456 // Thread must not return from exit_process_or_thread(), but if it does, 457 // let it proceed to exit normally 458 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 459 } 460 461 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 462 int thread_id) { 463 // Allocate the OSThread object 464 OSThread* osthread = new OSThread(NULL, NULL); 465 if (osthread == NULL) return NULL; 466 467 // Initialize support for Java interrupts 468 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 469 if (interrupt_event == NULL) { 470 delete osthread; 471 return NULL; 472 } 473 osthread->set_interrupt_event(interrupt_event); 474 475 // Store info on the Win32 thread into the OSThread 476 osthread->set_thread_handle(thread_handle); 477 osthread->set_thread_id(thread_id); 478 479 if (UseNUMA) { 480 int lgrp_id = os::numa_get_group_id(); 481 if (lgrp_id != -1) { 482 thread->set_lgrp_id(lgrp_id); 483 } 484 } 485 486 // Initial thread state is INITIALIZED, not SUSPENDED 487 osthread->set_state(INITIALIZED); 488 489 return osthread; 490 } 491 492 493 bool os::create_attached_thread(JavaThread* thread) { 494 #ifdef ASSERT 495 thread->verify_not_published(); 496 #endif 497 HANDLE thread_h; 498 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 499 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 500 fatal("DuplicateHandle failed\n"); 501 } 502 OSThread* osthread = create_os_thread(thread, thread_h, 503 (int)current_thread_id()); 504 if (osthread == NULL) { 505 return false; 506 } 507 508 // Initial thread state is RUNNABLE 509 osthread->set_state(RUNNABLE); 510 511 thread->set_osthread(osthread); 512 return true; 513 } 514 515 bool os::create_main_thread(JavaThread* thread) { 516 #ifdef ASSERT 517 thread->verify_not_published(); 518 #endif 519 if (_starting_thread == NULL) { 520 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 521 if (_starting_thread == NULL) { 522 return false; 523 } 524 } 525 526 // The primordial thread is runnable from the start) 527 _starting_thread->set_state(RUNNABLE); 528 529 thread->set_osthread(_starting_thread); 530 return true; 531 } 532 533 // Allocate and initialize a new OSThread 534 bool os::create_thread(Thread* thread, ThreadType thr_type, 535 size_t stack_size) { 536 unsigned thread_id; 537 538 // Allocate the OSThread object 539 OSThread* osthread = new OSThread(NULL, NULL); 540 if (osthread == NULL) { 541 return false; 542 } 543 544 // Initialize support for Java interrupts 545 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 546 if (interrupt_event == NULL) { 547 delete osthread; 548 return NULL; 549 } 550 osthread->set_interrupt_event(interrupt_event); 551 osthread->set_interrupted(false); 552 553 thread->set_osthread(osthread); 554 555 if (stack_size == 0) { 556 switch (thr_type) { 557 case os::java_thread: 558 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 559 if (JavaThread::stack_size_at_create() > 0) { 560 stack_size = JavaThread::stack_size_at_create(); 561 } 562 break; 563 case os::compiler_thread: 564 if (CompilerThreadStackSize > 0) { 565 stack_size = (size_t)(CompilerThreadStackSize * K); 566 break; 567 } // else fall through: 568 // use VMThreadStackSize if CompilerThreadStackSize is not defined 569 case os::vm_thread: 570 case os::pgc_thread: 571 case os::cgc_thread: 572 case os::watcher_thread: 573 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 574 break; 575 } 576 } 577 578 // Create the Win32 thread 579 // 580 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 581 // does not specify stack size. Instead, it specifies the size of 582 // initially committed space. The stack size is determined by 583 // PE header in the executable. If the committed "stack_size" is larger 584 // than default value in the PE header, the stack is rounded up to the 585 // nearest multiple of 1MB. For example if the launcher has default 586 // stack size of 320k, specifying any size less than 320k does not 587 // affect the actual stack size at all, it only affects the initial 588 // commitment. On the other hand, specifying 'stack_size' larger than 589 // default value may cause significant increase in memory usage, because 590 // not only the stack space will be rounded up to MB, but also the 591 // entire space is committed upfront. 592 // 593 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 594 // for CreateThread() that can treat 'stack_size' as stack size. However we 595 // are not supposed to call CreateThread() directly according to MSDN 596 // document because JVM uses C runtime library. The good news is that the 597 // flag appears to work with _beginthredex() as well. 598 599 HANDLE thread_handle = 600 (HANDLE)_beginthreadex(NULL, 601 (unsigned)stack_size, 602 (unsigned (__stdcall *)(void*)) java_start, 603 thread, 604 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 605 &thread_id); 606 607 if (thread_handle == NULL) { 608 // Need to clean up stuff we've allocated so far 609 CloseHandle(osthread->interrupt_event()); 610 thread->set_osthread(NULL); 611 delete osthread; 612 return NULL; 613 } 614 615 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 616 617 // Store info on the Win32 thread into the OSThread 618 osthread->set_thread_handle(thread_handle); 619 osthread->set_thread_id(thread_id); 620 621 // Initial thread state is INITIALIZED, not SUSPENDED 622 osthread->set_state(INITIALIZED); 623 624 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 625 return true; 626 } 627 628 629 // Free Win32 resources related to the OSThread 630 void os::free_thread(OSThread* osthread) { 631 assert(osthread != NULL, "osthread not set"); 632 CloseHandle(osthread->thread_handle()); 633 CloseHandle(osthread->interrupt_event()); 634 delete osthread; 635 } 636 637 static jlong first_filetime; 638 static jlong initial_performance_count; 639 static jlong performance_frequency; 640 641 642 jlong as_long(LARGE_INTEGER x) { 643 jlong result = 0; // initialization to avoid warning 644 set_high(&result, x.HighPart); 645 set_low(&result, x.LowPart); 646 return result; 647 } 648 649 650 jlong os::elapsed_counter() { 651 LARGE_INTEGER count; 652 QueryPerformanceCounter(&count); 653 return as_long(count) - initial_performance_count; 654 } 655 656 657 jlong os::elapsed_frequency() { 658 return performance_frequency; 659 } 660 661 662 julong os::available_memory() { 663 return win32::available_memory(); 664 } 665 666 julong os::win32::available_memory() { 667 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 668 // value if total memory is larger than 4GB 669 MEMORYSTATUSEX ms; 670 ms.dwLength = sizeof(ms); 671 GlobalMemoryStatusEx(&ms); 672 673 return (julong)ms.ullAvailPhys; 674 } 675 676 julong os::physical_memory() { 677 return win32::physical_memory(); 678 } 679 680 bool os::has_allocatable_memory_limit(julong* limit) { 681 MEMORYSTATUSEX ms; 682 ms.dwLength = sizeof(ms); 683 GlobalMemoryStatusEx(&ms); 684 #ifdef _LP64 685 *limit = (julong)ms.ullAvailVirtual; 686 return true; 687 #else 688 // Limit to 1400m because of the 2gb address space wall 689 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 690 return true; 691 #endif 692 } 693 694 int os::active_processor_count() { 695 DWORD_PTR lpProcessAffinityMask = 0; 696 DWORD_PTR lpSystemAffinityMask = 0; 697 int proc_count = processor_count(); 698 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 699 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 700 // Nof active processors is number of bits in process affinity mask 701 int bitcount = 0; 702 while (lpProcessAffinityMask != 0) { 703 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 704 bitcount++; 705 } 706 return bitcount; 707 } else { 708 return proc_count; 709 } 710 } 711 712 void os::set_native_thread_name(const char *name) { 713 714 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 715 // 716 // Note that unfortunately this only works if the process 717 // is already attached to a debugger; debugger must observe 718 // the exception below to show the correct name. 719 720 const DWORD MS_VC_EXCEPTION = 0x406D1388; 721 struct { 722 DWORD dwType; // must be 0x1000 723 LPCSTR szName; // pointer to name (in user addr space) 724 DWORD dwThreadID; // thread ID (-1=caller thread) 725 DWORD dwFlags; // reserved for future use, must be zero 726 } info; 727 728 info.dwType = 0x1000; 729 info.szName = name; 730 info.dwThreadID = -1; 731 info.dwFlags = 0; 732 733 __try { 734 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 735 } __except(EXCEPTION_CONTINUE_EXECUTION) {} 736 } 737 738 bool os::distribute_processes(uint length, uint* distribution) { 739 // Not yet implemented. 740 return false; 741 } 742 743 bool os::bind_to_processor(uint processor_id) { 744 // Not yet implemented. 745 return false; 746 } 747 748 void os::win32::initialize_performance_counter() { 749 LARGE_INTEGER count; 750 QueryPerformanceFrequency(&count); 751 performance_frequency = as_long(count); 752 QueryPerformanceCounter(&count); 753 initial_performance_count = as_long(count); 754 } 755 756 757 double os::elapsedTime() { 758 return (double) elapsed_counter() / (double) elapsed_frequency(); 759 } 760 761 762 // Windows format: 763 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 764 // Java format: 765 // Java standards require the number of milliseconds since 1/1/1970 766 767 // Constant offset - calculated using offset() 768 static jlong _offset = 116444736000000000; 769 // Fake time counter for reproducible results when debugging 770 static jlong fake_time = 0; 771 772 #ifdef ASSERT 773 // Just to be safe, recalculate the offset in debug mode 774 static jlong _calculated_offset = 0; 775 static int _has_calculated_offset = 0; 776 777 jlong offset() { 778 if (_has_calculated_offset) return _calculated_offset; 779 SYSTEMTIME java_origin; 780 java_origin.wYear = 1970; 781 java_origin.wMonth = 1; 782 java_origin.wDayOfWeek = 0; // ignored 783 java_origin.wDay = 1; 784 java_origin.wHour = 0; 785 java_origin.wMinute = 0; 786 java_origin.wSecond = 0; 787 java_origin.wMilliseconds = 0; 788 FILETIME jot; 789 if (!SystemTimeToFileTime(&java_origin, &jot)) { 790 fatal("Error = %d\nWindows error", GetLastError()); 791 } 792 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 793 _has_calculated_offset = 1; 794 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 795 return _calculated_offset; 796 } 797 #else 798 jlong offset() { 799 return _offset; 800 } 801 #endif 802 803 jlong windows_to_java_time(FILETIME wt) { 804 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 805 return (a - offset()) / 10000; 806 } 807 808 // Returns time ticks in (10th of micro seconds) 809 jlong windows_to_time_ticks(FILETIME wt) { 810 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 811 return (a - offset()); 812 } 813 814 FILETIME java_to_windows_time(jlong l) { 815 jlong a = (l * 10000) + offset(); 816 FILETIME result; 817 result.dwHighDateTime = high(a); 818 result.dwLowDateTime = low(a); 819 return result; 820 } 821 822 bool os::supports_vtime() { return true; } 823 bool os::enable_vtime() { return false; } 824 bool os::vtime_enabled() { return false; } 825 826 double os::elapsedVTime() { 827 FILETIME created; 828 FILETIME exited; 829 FILETIME kernel; 830 FILETIME user; 831 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 832 // the resolution of windows_to_java_time() should be sufficient (ms) 833 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 834 } else { 835 return elapsedTime(); 836 } 837 } 838 839 jlong os::javaTimeMillis() { 840 if (UseFakeTimers) { 841 return fake_time++; 842 } else { 843 FILETIME wt; 844 GetSystemTimeAsFileTime(&wt); 845 return windows_to_java_time(wt); 846 } 847 } 848 849 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 850 FILETIME wt; 851 GetSystemTimeAsFileTime(&wt); 852 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 853 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 854 seconds = secs; 855 nanos = jlong(ticks - (secs*10000000)) * 100; 856 } 857 858 jlong os::javaTimeNanos() { 859 LARGE_INTEGER current_count; 860 QueryPerformanceCounter(¤t_count); 861 double current = as_long(current_count); 862 double freq = performance_frequency; 863 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 864 return time; 865 } 866 867 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 868 jlong freq = performance_frequency; 869 if (freq < NANOSECS_PER_SEC) { 870 // the performance counter is 64 bits and we will 871 // be multiplying it -- so no wrap in 64 bits 872 info_ptr->max_value = ALL_64_BITS; 873 } else if (freq > NANOSECS_PER_SEC) { 874 // use the max value the counter can reach to 875 // determine the max value which could be returned 876 julong max_counter = (julong)ALL_64_BITS; 877 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 878 } else { 879 // the performance counter is 64 bits and we will 880 // be using it directly -- so no wrap in 64 bits 881 info_ptr->max_value = ALL_64_BITS; 882 } 883 884 // using a counter, so no skipping 885 info_ptr->may_skip_backward = false; 886 info_ptr->may_skip_forward = false; 887 888 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 889 } 890 891 char* os::local_time_string(char *buf, size_t buflen) { 892 SYSTEMTIME st; 893 GetLocalTime(&st); 894 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 895 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 896 return buf; 897 } 898 899 bool os::getTimesSecs(double* process_real_time, 900 double* process_user_time, 901 double* process_system_time) { 902 HANDLE h_process = GetCurrentProcess(); 903 FILETIME create_time, exit_time, kernel_time, user_time; 904 BOOL result = GetProcessTimes(h_process, 905 &create_time, 906 &exit_time, 907 &kernel_time, 908 &user_time); 909 if (result != 0) { 910 FILETIME wt; 911 GetSystemTimeAsFileTime(&wt); 912 jlong rtc_millis = windows_to_java_time(wt); 913 jlong user_millis = windows_to_java_time(user_time); 914 jlong system_millis = windows_to_java_time(kernel_time); 915 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 916 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 917 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 918 return true; 919 } else { 920 return false; 921 } 922 } 923 924 void os::shutdown() { 925 // allow PerfMemory to attempt cleanup of any persistent resources 926 perfMemory_exit(); 927 928 // flush buffered output, finish log files 929 ostream_abort(); 930 931 // Check for abort hook 932 abort_hook_t abort_hook = Arguments::abort_hook(); 933 if (abort_hook != NULL) { 934 abort_hook(); 935 } 936 } 937 938 939 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 940 PMINIDUMP_EXCEPTION_INFORMATION, 941 PMINIDUMP_USER_STREAM_INFORMATION, 942 PMINIDUMP_CALLBACK_INFORMATION); 943 944 static HANDLE dumpFile = NULL; 945 946 // Check if dump file can be created. 947 void os::check_dump_limit(char* buffer, size_t buffsz) { 948 bool status = true; 949 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 950 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 951 status = false; 952 } 953 954 #ifndef ASSERT 955 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 956 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 957 status = false; 958 } 959 #endif 960 961 if (status) { 962 const char* cwd = get_current_directory(NULL, 0); 963 int pid = current_process_id(); 964 if (cwd != NULL) { 965 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 966 } else { 967 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 968 } 969 970 if (dumpFile == NULL && 971 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 972 == INVALID_HANDLE_VALUE) { 973 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 974 status = false; 975 } 976 } 977 VMError::record_coredump_status(buffer, status); 978 } 979 980 void os::abort(bool dump_core, void* siginfo, const void* context) { 981 HINSTANCE dbghelp; 982 EXCEPTION_POINTERS ep; 983 MINIDUMP_EXCEPTION_INFORMATION mei; 984 MINIDUMP_EXCEPTION_INFORMATION* pmei; 985 986 HANDLE hProcess = GetCurrentProcess(); 987 DWORD processId = GetCurrentProcessId(); 988 MINIDUMP_TYPE dumpType; 989 990 shutdown(); 991 if (!dump_core || dumpFile == NULL) { 992 if (dumpFile != NULL) { 993 CloseHandle(dumpFile); 994 } 995 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 996 } 997 998 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 999 1000 if (dbghelp == NULL) { 1001 jio_fprintf(stderr, "Failed to load dbghelp.dll\n"); 1002 CloseHandle(dumpFile); 1003 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1004 } 1005 1006 _MiniDumpWriteDump = 1007 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1008 PMINIDUMP_EXCEPTION_INFORMATION, 1009 PMINIDUMP_USER_STREAM_INFORMATION, 1010 PMINIDUMP_CALLBACK_INFORMATION), 1011 GetProcAddress(dbghelp, 1012 "MiniDumpWriteDump")); 1013 1014 if (_MiniDumpWriteDump == NULL) { 1015 jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n"); 1016 CloseHandle(dumpFile); 1017 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1018 } 1019 1020 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1021 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1022 1023 if (siginfo != NULL && context != NULL) { 1024 ep.ContextRecord = (PCONTEXT) context; 1025 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1026 1027 mei.ThreadId = GetCurrentThreadId(); 1028 mei.ExceptionPointers = &ep; 1029 pmei = &mei; 1030 } else { 1031 pmei = NULL; 1032 } 1033 1034 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1035 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1036 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1037 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1038 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1039 } 1040 CloseHandle(dumpFile); 1041 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1042 } 1043 1044 // Die immediately, no exit hook, no abort hook, no cleanup. 1045 void os::die() { 1046 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1047 } 1048 1049 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1050 // * dirent_md.c 1.15 00/02/02 1051 // 1052 // The declarations for DIR and struct dirent are in jvm_win32.h. 1053 1054 // Caller must have already run dirname through JVM_NativePath, which removes 1055 // duplicate slashes and converts all instances of '/' into '\\'. 1056 1057 DIR * os::opendir(const char *dirname) { 1058 assert(dirname != NULL, "just checking"); // hotspot change 1059 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1060 DWORD fattr; // hotspot change 1061 char alt_dirname[4] = { 0, 0, 0, 0 }; 1062 1063 if (dirp == 0) { 1064 errno = ENOMEM; 1065 return 0; 1066 } 1067 1068 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1069 // as a directory in FindFirstFile(). We detect this case here and 1070 // prepend the current drive name. 1071 // 1072 if (dirname[1] == '\0' && dirname[0] == '\\') { 1073 alt_dirname[0] = _getdrive() + 'A' - 1; 1074 alt_dirname[1] = ':'; 1075 alt_dirname[2] = '\\'; 1076 alt_dirname[3] = '\0'; 1077 dirname = alt_dirname; 1078 } 1079 1080 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1081 if (dirp->path == 0) { 1082 free(dirp); 1083 errno = ENOMEM; 1084 return 0; 1085 } 1086 strcpy(dirp->path, dirname); 1087 1088 fattr = GetFileAttributes(dirp->path); 1089 if (fattr == 0xffffffff) { 1090 free(dirp->path); 1091 free(dirp); 1092 errno = ENOENT; 1093 return 0; 1094 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1095 free(dirp->path); 1096 free(dirp); 1097 errno = ENOTDIR; 1098 return 0; 1099 } 1100 1101 // Append "*.*", or possibly "\\*.*", to path 1102 if (dirp->path[1] == ':' && 1103 (dirp->path[2] == '\0' || 1104 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1105 // No '\\' needed for cases like "Z:" or "Z:\" 1106 strcat(dirp->path, "*.*"); 1107 } else { 1108 strcat(dirp->path, "\\*.*"); 1109 } 1110 1111 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1112 if (dirp->handle == INVALID_HANDLE_VALUE) { 1113 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1114 free(dirp->path); 1115 free(dirp); 1116 errno = EACCES; 1117 return 0; 1118 } 1119 } 1120 return dirp; 1121 } 1122 1123 // parameter dbuf unused on Windows 1124 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1125 assert(dirp != NULL, "just checking"); // hotspot change 1126 if (dirp->handle == INVALID_HANDLE_VALUE) { 1127 return 0; 1128 } 1129 1130 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1131 1132 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1133 if (GetLastError() == ERROR_INVALID_HANDLE) { 1134 errno = EBADF; 1135 return 0; 1136 } 1137 FindClose(dirp->handle); 1138 dirp->handle = INVALID_HANDLE_VALUE; 1139 } 1140 1141 return &dirp->dirent; 1142 } 1143 1144 int os::closedir(DIR *dirp) { 1145 assert(dirp != NULL, "just checking"); // hotspot change 1146 if (dirp->handle != INVALID_HANDLE_VALUE) { 1147 if (!FindClose(dirp->handle)) { 1148 errno = EBADF; 1149 return -1; 1150 } 1151 dirp->handle = INVALID_HANDLE_VALUE; 1152 } 1153 free(dirp->path); 1154 free(dirp); 1155 return 0; 1156 } 1157 1158 // This must be hard coded because it's the system's temporary 1159 // directory not the java application's temp directory, ala java.io.tmpdir. 1160 const char* os::get_temp_directory() { 1161 static char path_buf[MAX_PATH]; 1162 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1163 return path_buf; 1164 } else { 1165 path_buf[0] = '\0'; 1166 return path_buf; 1167 } 1168 } 1169 1170 static bool file_exists(const char* filename) { 1171 if (filename == NULL || strlen(filename) == 0) { 1172 return false; 1173 } 1174 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1175 } 1176 1177 bool os::dll_build_name(char *buffer, size_t buflen, 1178 const char* pname, const char* fname) { 1179 bool retval = false; 1180 const size_t pnamelen = pname ? strlen(pname) : 0; 1181 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1182 1183 // Return error on buffer overflow. 1184 if (pnamelen + strlen(fname) + 10 > buflen) { 1185 return retval; 1186 } 1187 1188 if (pnamelen == 0) { 1189 jio_snprintf(buffer, buflen, "%s.dll", fname); 1190 retval = true; 1191 } else if (c == ':' || c == '\\') { 1192 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1193 retval = true; 1194 } else if (strchr(pname, *os::path_separator()) != NULL) { 1195 int n; 1196 char** pelements = split_path(pname, &n); 1197 if (pelements == NULL) { 1198 return false; 1199 } 1200 for (int i = 0; i < n; i++) { 1201 char* path = pelements[i]; 1202 // Really shouldn't be NULL, but check can't hurt 1203 size_t plen = (path == NULL) ? 0 : strlen(path); 1204 if (plen == 0) { 1205 continue; // skip the empty path values 1206 } 1207 const char lastchar = path[plen - 1]; 1208 if (lastchar == ':' || lastchar == '\\') { 1209 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1210 } else { 1211 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1212 } 1213 if (file_exists(buffer)) { 1214 retval = true; 1215 break; 1216 } 1217 } 1218 // release the storage 1219 for (int i = 0; i < n; i++) { 1220 if (pelements[i] != NULL) { 1221 FREE_C_HEAP_ARRAY(char, pelements[i]); 1222 } 1223 } 1224 if (pelements != NULL) { 1225 FREE_C_HEAP_ARRAY(char*, pelements); 1226 } 1227 } else { 1228 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1229 retval = true; 1230 } 1231 return retval; 1232 } 1233 1234 // Needs to be in os specific directory because windows requires another 1235 // header file <direct.h> 1236 const char* os::get_current_directory(char *buf, size_t buflen) { 1237 int n = static_cast<int>(buflen); 1238 if (buflen > INT_MAX) n = INT_MAX; 1239 return _getcwd(buf, n); 1240 } 1241 1242 //----------------------------------------------------------- 1243 // Helper functions for fatal error handler 1244 #ifdef _WIN64 1245 // Helper routine which returns true if address in 1246 // within the NTDLL address space. 1247 // 1248 static bool _addr_in_ntdll(address addr) { 1249 HMODULE hmod; 1250 MODULEINFO minfo; 1251 1252 hmod = GetModuleHandle("NTDLL.DLL"); 1253 if (hmod == NULL) return false; 1254 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1255 &minfo, sizeof(MODULEINFO))) { 1256 return false; 1257 } 1258 1259 if ((addr >= minfo.lpBaseOfDll) && 1260 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1261 return true; 1262 } else { 1263 return false; 1264 } 1265 } 1266 #endif 1267 1268 struct _modinfo { 1269 address addr; 1270 char* full_path; // point to a char buffer 1271 int buflen; // size of the buffer 1272 address base_addr; 1273 }; 1274 1275 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1276 address top_address, void * param) { 1277 struct _modinfo *pmod = (struct _modinfo *)param; 1278 if (!pmod) return -1; 1279 1280 if (base_addr <= pmod->addr && 1281 top_address > pmod->addr) { 1282 // if a buffer is provided, copy path name to the buffer 1283 if (pmod->full_path) { 1284 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1285 } 1286 pmod->base_addr = base_addr; 1287 return 1; 1288 } 1289 return 0; 1290 } 1291 1292 bool os::dll_address_to_library_name(address addr, char* buf, 1293 int buflen, int* offset) { 1294 // buf is not optional, but offset is optional 1295 assert(buf != NULL, "sanity check"); 1296 1297 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1298 // return the full path to the DLL file, sometimes it returns path 1299 // to the corresponding PDB file (debug info); sometimes it only 1300 // returns partial path, which makes life painful. 1301 1302 struct _modinfo mi; 1303 mi.addr = addr; 1304 mi.full_path = buf; 1305 mi.buflen = buflen; 1306 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1307 // buf already contains path name 1308 if (offset) *offset = addr - mi.base_addr; 1309 return true; 1310 } 1311 1312 buf[0] = '\0'; 1313 if (offset) *offset = -1; 1314 return false; 1315 } 1316 1317 bool os::dll_address_to_function_name(address addr, char *buf, 1318 int buflen, int *offset, 1319 bool demangle) { 1320 // buf is not optional, but offset is optional 1321 assert(buf != NULL, "sanity check"); 1322 1323 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1324 return true; 1325 } 1326 if (offset != NULL) *offset = -1; 1327 buf[0] = '\0'; 1328 return false; 1329 } 1330 1331 // save the start and end address of jvm.dll into param[0] and param[1] 1332 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1333 address top_address, void * param) { 1334 if (!param) return -1; 1335 1336 if (base_addr <= (address)_locate_jvm_dll && 1337 top_address > (address)_locate_jvm_dll) { 1338 ((address*)param)[0] = base_addr; 1339 ((address*)param)[1] = top_address; 1340 return 1; 1341 } 1342 return 0; 1343 } 1344 1345 address vm_lib_location[2]; // start and end address of jvm.dll 1346 1347 // check if addr is inside jvm.dll 1348 bool os::address_is_in_vm(address addr) { 1349 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1350 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1351 assert(false, "Can't find jvm module."); 1352 return false; 1353 } 1354 } 1355 1356 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1357 } 1358 1359 // print module info; param is outputStream* 1360 static int _print_module(const char* fname, address base_address, 1361 address top_address, void* param) { 1362 if (!param) return -1; 1363 1364 outputStream* st = (outputStream*)param; 1365 1366 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1367 return 0; 1368 } 1369 1370 // Loads .dll/.so and 1371 // in case of error it checks if .dll/.so was built for the 1372 // same architecture as Hotspot is running on 1373 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1374 void * result = LoadLibrary(name); 1375 if (result != NULL) { 1376 return result; 1377 } 1378 1379 DWORD errcode = GetLastError(); 1380 if (errcode == ERROR_MOD_NOT_FOUND) { 1381 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1382 ebuf[ebuflen - 1] = '\0'; 1383 return NULL; 1384 } 1385 1386 // Parsing dll below 1387 // If we can read dll-info and find that dll was built 1388 // for an architecture other than Hotspot is running in 1389 // - then print to buffer "DLL was built for a different architecture" 1390 // else call os::lasterror to obtain system error message 1391 1392 // Read system error message into ebuf 1393 // It may or may not be overwritten below (in the for loop and just above) 1394 lasterror(ebuf, (size_t) ebuflen); 1395 ebuf[ebuflen - 1] = '\0'; 1396 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1397 if (fd < 0) { 1398 return NULL; 1399 } 1400 1401 uint32_t signature_offset; 1402 uint16_t lib_arch = 0; 1403 bool failed_to_get_lib_arch = 1404 ( // Go to position 3c in the dll 1405 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1406 || 1407 // Read location of signature 1408 (sizeof(signature_offset) != 1409 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1410 || 1411 // Go to COFF File Header in dll 1412 // that is located after "signature" (4 bytes long) 1413 (os::seek_to_file_offset(fd, 1414 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1415 || 1416 // Read field that contains code of architecture 1417 // that dll was built for 1418 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1419 ); 1420 1421 ::close(fd); 1422 if (failed_to_get_lib_arch) { 1423 // file i/o error - report os::lasterror(...) msg 1424 return NULL; 1425 } 1426 1427 typedef struct { 1428 uint16_t arch_code; 1429 char* arch_name; 1430 } arch_t; 1431 1432 static const arch_t arch_array[] = { 1433 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1434 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1435 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1436 }; 1437 #if (defined _M_IA64) 1438 static const uint16_t running_arch = IMAGE_FILE_MACHINE_IA64; 1439 #elif (defined _M_AMD64) 1440 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1441 #elif (defined _M_IX86) 1442 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1443 #else 1444 #error Method os::dll_load requires that one of following \ 1445 is defined :_M_IA64,_M_AMD64 or _M_IX86 1446 #endif 1447 1448 1449 // Obtain a string for printf operation 1450 // lib_arch_str shall contain string what platform this .dll was built for 1451 // running_arch_str shall string contain what platform Hotspot was built for 1452 char *running_arch_str = NULL, *lib_arch_str = NULL; 1453 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1454 if (lib_arch == arch_array[i].arch_code) { 1455 lib_arch_str = arch_array[i].arch_name; 1456 } 1457 if (running_arch == arch_array[i].arch_code) { 1458 running_arch_str = arch_array[i].arch_name; 1459 } 1460 } 1461 1462 assert(running_arch_str, 1463 "Didn't find running architecture code in arch_array"); 1464 1465 // If the architecture is right 1466 // but some other error took place - report os::lasterror(...) msg 1467 if (lib_arch == running_arch) { 1468 return NULL; 1469 } 1470 1471 if (lib_arch_str != NULL) { 1472 ::_snprintf(ebuf, ebuflen - 1, 1473 "Can't load %s-bit .dll on a %s-bit platform", 1474 lib_arch_str, running_arch_str); 1475 } else { 1476 // don't know what architecture this dll was build for 1477 ::_snprintf(ebuf, ebuflen - 1, 1478 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1479 lib_arch, running_arch_str); 1480 } 1481 1482 return NULL; 1483 } 1484 1485 void os::print_dll_info(outputStream *st) { 1486 st->print_cr("Dynamic libraries:"); 1487 get_loaded_modules_info(_print_module, (void *)st); 1488 } 1489 1490 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1491 HANDLE hProcess; 1492 1493 # define MAX_NUM_MODULES 128 1494 HMODULE modules[MAX_NUM_MODULES]; 1495 static char filename[MAX_PATH]; 1496 int result = 0; 1497 1498 int pid = os::current_process_id(); 1499 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1500 FALSE, pid); 1501 if (hProcess == NULL) return 0; 1502 1503 DWORD size_needed; 1504 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1505 CloseHandle(hProcess); 1506 return 0; 1507 } 1508 1509 // number of modules that are currently loaded 1510 int num_modules = size_needed / sizeof(HMODULE); 1511 1512 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1513 // Get Full pathname: 1514 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1515 filename[0] = '\0'; 1516 } 1517 1518 MODULEINFO modinfo; 1519 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1520 modinfo.lpBaseOfDll = NULL; 1521 modinfo.SizeOfImage = 0; 1522 } 1523 1524 // Invoke callback function 1525 result = callback(filename, (address)modinfo.lpBaseOfDll, 1526 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1527 if (result) break; 1528 } 1529 1530 CloseHandle(hProcess); 1531 return result; 1532 } 1533 1534 bool os::get_host_name(char* buf, size_t buflen) { 1535 DWORD size = (DWORD)buflen; 1536 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1537 } 1538 1539 void os::get_summary_os_info(char* buf, size_t buflen) { 1540 stringStream sst(buf, buflen); 1541 os::win32::print_windows_version(&sst); 1542 // chop off newline character 1543 char* nl = strchr(buf, '\n'); 1544 if (nl != NULL) *nl = '\0'; 1545 } 1546 1547 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1548 int ret = vsnprintf(buf, len, fmt, args); 1549 // Get the correct buffer size if buf is too small 1550 if (ret < 0) { 1551 return _vscprintf(fmt, args); 1552 } 1553 return ret; 1554 } 1555 1556 void os::print_os_info_brief(outputStream* st) { 1557 os::print_os_info(st); 1558 } 1559 1560 void os::print_os_info(outputStream* st) { 1561 #ifdef ASSERT 1562 char buffer[1024]; 1563 st->print("HostName: "); 1564 if (get_host_name(buffer, sizeof(buffer))) { 1565 st->print("%s ", buffer); 1566 } else { 1567 st->print("N/A "); 1568 } 1569 #endif 1570 st->print("OS:"); 1571 os::win32::print_windows_version(st); 1572 } 1573 1574 void os::win32::print_windows_version(outputStream* st) { 1575 OSVERSIONINFOEX osvi; 1576 VS_FIXEDFILEINFO *file_info; 1577 TCHAR kernel32_path[MAX_PATH]; 1578 UINT len, ret; 1579 1580 // Use the GetVersionEx information to see if we're on a server or 1581 // workstation edition of Windows. Starting with Windows 8.1 we can't 1582 // trust the OS version information returned by this API. 1583 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1584 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1585 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1586 st->print_cr("Call to GetVersionEx failed"); 1587 return; 1588 } 1589 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1590 1591 // Get the full path to \Windows\System32\kernel32.dll and use that for 1592 // determining what version of Windows we're running on. 1593 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1594 ret = GetSystemDirectory(kernel32_path, len); 1595 if (ret == 0 || ret > len) { 1596 st->print_cr("Call to GetSystemDirectory failed"); 1597 return; 1598 } 1599 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1600 1601 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1602 if (version_size == 0) { 1603 st->print_cr("Call to GetFileVersionInfoSize failed"); 1604 return; 1605 } 1606 1607 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1608 if (version_info == NULL) { 1609 st->print_cr("Failed to allocate version_info"); 1610 return; 1611 } 1612 1613 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1614 os::free(version_info); 1615 st->print_cr("Call to GetFileVersionInfo failed"); 1616 return; 1617 } 1618 1619 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1620 os::free(version_info); 1621 st->print_cr("Call to VerQueryValue failed"); 1622 return; 1623 } 1624 1625 int major_version = HIWORD(file_info->dwProductVersionMS); 1626 int minor_version = LOWORD(file_info->dwProductVersionMS); 1627 int build_number = HIWORD(file_info->dwProductVersionLS); 1628 int build_minor = LOWORD(file_info->dwProductVersionLS); 1629 int os_vers = major_version * 1000 + minor_version; 1630 os::free(version_info); 1631 1632 st->print(" Windows "); 1633 switch (os_vers) { 1634 1635 case 6000: 1636 if (is_workstation) { 1637 st->print("Vista"); 1638 } else { 1639 st->print("Server 2008"); 1640 } 1641 break; 1642 1643 case 6001: 1644 if (is_workstation) { 1645 st->print("7"); 1646 } else { 1647 st->print("Server 2008 R2"); 1648 } 1649 break; 1650 1651 case 6002: 1652 if (is_workstation) { 1653 st->print("8"); 1654 } else { 1655 st->print("Server 2012"); 1656 } 1657 break; 1658 1659 case 6003: 1660 if (is_workstation) { 1661 st->print("8.1"); 1662 } else { 1663 st->print("Server 2012 R2"); 1664 } 1665 break; 1666 1667 case 10000: 1668 if (is_workstation) { 1669 st->print("10"); 1670 } else { 1671 // The server version name of Windows 10 is not known at this time 1672 st->print("%d.%d", major_version, minor_version); 1673 } 1674 break; 1675 1676 default: 1677 // Unrecognized windows, print out its major and minor versions 1678 st->print("%d.%d", major_version, minor_version); 1679 break; 1680 } 1681 1682 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1683 // find out whether we are running on 64 bit processor or not 1684 SYSTEM_INFO si; 1685 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1686 GetNativeSystemInfo(&si); 1687 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1688 st->print(" , 64 bit"); 1689 } 1690 1691 st->print(" Build %d", build_number); 1692 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1693 st->cr(); 1694 } 1695 1696 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1697 // Nothing to do for now. 1698 } 1699 1700 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1701 HKEY key; 1702 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1703 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1704 if (status == ERROR_SUCCESS) { 1705 DWORD size = (DWORD)buflen; 1706 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1707 if (status != ERROR_SUCCESS) { 1708 strncpy(buf, "## __CPU__", buflen); 1709 } 1710 RegCloseKey(key); 1711 } else { 1712 // Put generic cpu info to return 1713 strncpy(buf, "## __CPU__", buflen); 1714 } 1715 } 1716 1717 void os::print_memory_info(outputStream* st) { 1718 st->print("Memory:"); 1719 st->print(" %dk page", os::vm_page_size()>>10); 1720 1721 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1722 // value if total memory is larger than 4GB 1723 MEMORYSTATUSEX ms; 1724 ms.dwLength = sizeof(ms); 1725 GlobalMemoryStatusEx(&ms); 1726 1727 st->print(", physical %uk", os::physical_memory() >> 10); 1728 st->print("(%uk free)", os::available_memory() >> 10); 1729 1730 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1731 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1732 st->cr(); 1733 } 1734 1735 void os::print_siginfo(outputStream *st, const void* siginfo) { 1736 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1737 st->print("siginfo:"); 1738 1739 char tmp[64]; 1740 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1741 strcpy(tmp, "EXCEPTION_??"); 1742 } 1743 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1744 1745 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1746 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1747 er->NumberParameters >= 2) { 1748 switch (er->ExceptionInformation[0]) { 1749 case 0: st->print(", reading address"); break; 1750 case 1: st->print(", writing address"); break; 1751 case 8: st->print(", data execution prevention violation at address"); break; 1752 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1753 er->ExceptionInformation[0]); 1754 } 1755 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1756 } else { 1757 int num = er->NumberParameters; 1758 if (num > 0) { 1759 st->print(", ExceptionInformation="); 1760 for (int i = 0; i < num; i++) { 1761 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1762 } 1763 } 1764 } 1765 st->cr(); 1766 } 1767 1768 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1769 // do nothing 1770 } 1771 1772 static char saved_jvm_path[MAX_PATH] = {0}; 1773 1774 // Find the full path to the current module, jvm.dll 1775 void os::jvm_path(char *buf, jint buflen) { 1776 // Error checking. 1777 if (buflen < MAX_PATH) { 1778 assert(false, "must use a large-enough buffer"); 1779 buf[0] = '\0'; 1780 return; 1781 } 1782 // Lazy resolve the path to current module. 1783 if (saved_jvm_path[0] != 0) { 1784 strcpy(buf, saved_jvm_path); 1785 return; 1786 } 1787 1788 buf[0] = '\0'; 1789 if (Arguments::sun_java_launcher_is_altjvm()) { 1790 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1791 // for a JAVA_HOME environment variable and fix up the path so it 1792 // looks like jvm.dll is installed there (append a fake suffix 1793 // hotspot/jvm.dll). 1794 char* java_home_var = ::getenv("JAVA_HOME"); 1795 if (java_home_var != NULL && java_home_var[0] != 0 && 1796 strlen(java_home_var) < (size_t)buflen) { 1797 strncpy(buf, java_home_var, buflen); 1798 1799 // determine if this is a legacy image or modules image 1800 // modules image doesn't have "jre" subdirectory 1801 size_t len = strlen(buf); 1802 char* jrebin_p = buf + len; 1803 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1804 if (0 != _access(buf, 0)) { 1805 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1806 } 1807 len = strlen(buf); 1808 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1809 } 1810 } 1811 1812 if (buf[0] == '\0') { 1813 GetModuleFileName(vm_lib_handle, buf, buflen); 1814 } 1815 strncpy(saved_jvm_path, buf, MAX_PATH); 1816 saved_jvm_path[MAX_PATH - 1] = '\0'; 1817 } 1818 1819 1820 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1821 #ifndef _WIN64 1822 st->print("_"); 1823 #endif 1824 } 1825 1826 1827 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1828 #ifndef _WIN64 1829 st->print("@%d", args_size * sizeof(int)); 1830 #endif 1831 } 1832 1833 // This method is a copy of JDK's sysGetLastErrorString 1834 // from src/windows/hpi/src/system_md.c 1835 1836 size_t os::lasterror(char* buf, size_t len) { 1837 DWORD errval; 1838 1839 if ((errval = GetLastError()) != 0) { 1840 // DOS error 1841 size_t n = (size_t)FormatMessage( 1842 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1843 NULL, 1844 errval, 1845 0, 1846 buf, 1847 (DWORD)len, 1848 NULL); 1849 if (n > 3) { 1850 // Drop final '.', CR, LF 1851 if (buf[n - 1] == '\n') n--; 1852 if (buf[n - 1] == '\r') n--; 1853 if (buf[n - 1] == '.') n--; 1854 buf[n] = '\0'; 1855 } 1856 return n; 1857 } 1858 1859 if (errno != 0) { 1860 // C runtime error that has no corresponding DOS error code 1861 const char* s = strerror(errno); 1862 size_t n = strlen(s); 1863 if (n >= len) n = len - 1; 1864 strncpy(buf, s, n); 1865 buf[n] = '\0'; 1866 return n; 1867 } 1868 1869 return 0; 1870 } 1871 1872 int os::get_last_error() { 1873 DWORD error = GetLastError(); 1874 if (error == 0) { 1875 error = errno; 1876 } 1877 return (int)error; 1878 } 1879 1880 WindowsSemaphore::WindowsSemaphore(uint value) { 1881 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1882 1883 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1884 } 1885 1886 WindowsSemaphore::~WindowsSemaphore() { 1887 ::CloseHandle(_semaphore); 1888 } 1889 1890 void WindowsSemaphore::signal(uint count) { 1891 if (count > 0) { 1892 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1893 1894 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1895 } 1896 } 1897 1898 void WindowsSemaphore::wait() { 1899 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1900 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1901 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1902 } 1903 1904 // sun.misc.Signal 1905 // NOTE that this is a workaround for an apparent kernel bug where if 1906 // a signal handler for SIGBREAK is installed then that signal handler 1907 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1908 // See bug 4416763. 1909 static void (*sigbreakHandler)(int) = NULL; 1910 1911 static void UserHandler(int sig, void *siginfo, void *context) { 1912 os::signal_notify(sig); 1913 // We need to reinstate the signal handler each time... 1914 os::signal(sig, (void*)UserHandler); 1915 } 1916 1917 void* os::user_handler() { 1918 return (void*) UserHandler; 1919 } 1920 1921 void* os::signal(int signal_number, void* handler) { 1922 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1923 void (*oldHandler)(int) = sigbreakHandler; 1924 sigbreakHandler = (void (*)(int)) handler; 1925 return (void*) oldHandler; 1926 } else { 1927 return (void*)::signal(signal_number, (void (*)(int))handler); 1928 } 1929 } 1930 1931 void os::signal_raise(int signal_number) { 1932 raise(signal_number); 1933 } 1934 1935 // The Win32 C runtime library maps all console control events other than ^C 1936 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1937 // logoff, and shutdown events. We therefore install our own console handler 1938 // that raises SIGTERM for the latter cases. 1939 // 1940 static BOOL WINAPI consoleHandler(DWORD event) { 1941 switch (event) { 1942 case CTRL_C_EVENT: 1943 if (is_error_reported()) { 1944 // Ctrl-C is pressed during error reporting, likely because the error 1945 // handler fails to abort. Let VM die immediately. 1946 os::die(); 1947 } 1948 1949 os::signal_raise(SIGINT); 1950 return TRUE; 1951 break; 1952 case CTRL_BREAK_EVENT: 1953 if (sigbreakHandler != NULL) { 1954 (*sigbreakHandler)(SIGBREAK); 1955 } 1956 return TRUE; 1957 break; 1958 case CTRL_LOGOFF_EVENT: { 1959 // Don't terminate JVM if it is running in a non-interactive session, 1960 // such as a service process. 1961 USEROBJECTFLAGS flags; 1962 HANDLE handle = GetProcessWindowStation(); 1963 if (handle != NULL && 1964 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1965 sizeof(USEROBJECTFLAGS), NULL)) { 1966 // If it is a non-interactive session, let next handler to deal 1967 // with it. 1968 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1969 return FALSE; 1970 } 1971 } 1972 } 1973 case CTRL_CLOSE_EVENT: 1974 case CTRL_SHUTDOWN_EVENT: 1975 os::signal_raise(SIGTERM); 1976 return TRUE; 1977 break; 1978 default: 1979 break; 1980 } 1981 return FALSE; 1982 } 1983 1984 // The following code is moved from os.cpp for making this 1985 // code platform specific, which it is by its very nature. 1986 1987 // Return maximum OS signal used + 1 for internal use only 1988 // Used as exit signal for signal_thread 1989 int os::sigexitnum_pd() { 1990 return NSIG; 1991 } 1992 1993 // a counter for each possible signal value, including signal_thread exit signal 1994 static volatile jint pending_signals[NSIG+1] = { 0 }; 1995 static HANDLE sig_sem = NULL; 1996 1997 void os::signal_init_pd() { 1998 // Initialize signal structures 1999 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2000 2001 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2002 2003 // Programs embedding the VM do not want it to attempt to receive 2004 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2005 // shutdown hooks mechanism introduced in 1.3. For example, when 2006 // the VM is run as part of a Windows NT service (i.e., a servlet 2007 // engine in a web server), the correct behavior is for any console 2008 // control handler to return FALSE, not TRUE, because the OS's 2009 // "final" handler for such events allows the process to continue if 2010 // it is a service (while terminating it if it is not a service). 2011 // To make this behavior uniform and the mechanism simpler, we 2012 // completely disable the VM's usage of these console events if -Xrs 2013 // (=ReduceSignalUsage) is specified. This means, for example, that 2014 // the CTRL-BREAK thread dump mechanism is also disabled in this 2015 // case. See bugs 4323062, 4345157, and related bugs. 2016 2017 if (!ReduceSignalUsage) { 2018 // Add a CTRL-C handler 2019 SetConsoleCtrlHandler(consoleHandler, TRUE); 2020 } 2021 } 2022 2023 void os::signal_notify(int signal_number) { 2024 BOOL ret; 2025 if (sig_sem != NULL) { 2026 Atomic::inc(&pending_signals[signal_number]); 2027 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2028 assert(ret != 0, "ReleaseSemaphore() failed"); 2029 } 2030 } 2031 2032 static int check_pending_signals(bool wait_for_signal) { 2033 DWORD ret; 2034 while (true) { 2035 for (int i = 0; i < NSIG + 1; i++) { 2036 jint n = pending_signals[i]; 2037 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2038 return i; 2039 } 2040 } 2041 if (!wait_for_signal) { 2042 return -1; 2043 } 2044 2045 JavaThread *thread = JavaThread::current(); 2046 2047 ThreadBlockInVM tbivm(thread); 2048 2049 bool threadIsSuspended; 2050 do { 2051 thread->set_suspend_equivalent(); 2052 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2053 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2054 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2055 2056 // were we externally suspended while we were waiting? 2057 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2058 if (threadIsSuspended) { 2059 // The semaphore has been incremented, but while we were waiting 2060 // another thread suspended us. We don't want to continue running 2061 // while suspended because that would surprise the thread that 2062 // suspended us. 2063 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2064 assert(ret != 0, "ReleaseSemaphore() failed"); 2065 2066 thread->java_suspend_self(); 2067 } 2068 } while (threadIsSuspended); 2069 } 2070 } 2071 2072 int os::signal_lookup() { 2073 return check_pending_signals(false); 2074 } 2075 2076 int os::signal_wait() { 2077 return check_pending_signals(true); 2078 } 2079 2080 // Implicit OS exception handling 2081 2082 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2083 address handler) { 2084 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2085 // Save pc in thread 2086 #ifdef _M_IA64 2087 // Do not blow up if no thread info available. 2088 if (thread) { 2089 // Saving PRECISE pc (with slot information) in thread. 2090 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2091 // Convert precise PC into "Unix" format 2092 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2093 thread->set_saved_exception_pc((address)precise_pc); 2094 } 2095 // Set pc to handler 2096 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2097 // Clear out psr.ri (= Restart Instruction) in order to continue 2098 // at the beginning of the target bundle. 2099 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2100 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2101 #else 2102 #ifdef _M_AMD64 2103 // Do not blow up if no thread info available. 2104 if (thread) { 2105 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2106 } 2107 // Set pc to handler 2108 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2109 #else 2110 // Do not blow up if no thread info available. 2111 if (thread) { 2112 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2113 } 2114 // Set pc to handler 2115 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2116 #endif 2117 #endif 2118 2119 // Continue the execution 2120 return EXCEPTION_CONTINUE_EXECUTION; 2121 } 2122 2123 2124 // Used for PostMortemDump 2125 extern "C" void safepoints(); 2126 extern "C" void find(int x); 2127 extern "C" void events(); 2128 2129 // According to Windows API documentation, an illegal instruction sequence should generate 2130 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2131 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2132 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2133 2134 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2135 2136 // From "Execution Protection in the Windows Operating System" draft 0.35 2137 // Once a system header becomes available, the "real" define should be 2138 // included or copied here. 2139 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2140 2141 // Handle NAT Bit consumption on IA64. 2142 #ifdef _M_IA64 2143 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2144 #endif 2145 2146 // Windows Vista/2008 heap corruption check 2147 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2148 2149 #define def_excpt(val) #val, val 2150 2151 struct siglabel { 2152 char *name; 2153 int number; 2154 }; 2155 2156 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2157 // C++ compiler contain this error code. Because this is a compiler-generated 2158 // error, the code is not listed in the Win32 API header files. 2159 // The code is actually a cryptic mnemonic device, with the initial "E" 2160 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2161 // ASCII values of "msc". 2162 2163 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2164 2165 2166 struct siglabel exceptlabels[] = { 2167 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2168 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2169 def_excpt(EXCEPTION_BREAKPOINT), 2170 def_excpt(EXCEPTION_SINGLE_STEP), 2171 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2172 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2173 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2174 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2175 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2176 def_excpt(EXCEPTION_FLT_OVERFLOW), 2177 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2178 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2179 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2180 def_excpt(EXCEPTION_INT_OVERFLOW), 2181 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2182 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2183 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2184 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2185 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2186 def_excpt(EXCEPTION_STACK_OVERFLOW), 2187 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2188 def_excpt(EXCEPTION_GUARD_PAGE), 2189 def_excpt(EXCEPTION_INVALID_HANDLE), 2190 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2191 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2192 #ifdef _M_IA64 2193 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2194 #endif 2195 NULL, 0 2196 }; 2197 2198 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2199 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2200 if (exceptlabels[i].number == exception_code) { 2201 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2202 return buf; 2203 } 2204 } 2205 2206 return NULL; 2207 } 2208 2209 //----------------------------------------------------------------------------- 2210 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2211 // handle exception caused by idiv; should only happen for -MinInt/-1 2212 // (division by zero is handled explicitly) 2213 #ifdef _M_IA64 2214 assert(0, "Fix Handle_IDiv_Exception"); 2215 #else 2216 #ifdef _M_AMD64 2217 PCONTEXT ctx = exceptionInfo->ContextRecord; 2218 address pc = (address)ctx->Rip; 2219 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2220 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2221 if (pc[0] == 0xF7) { 2222 // set correct result values and continue after idiv instruction 2223 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2224 } else { 2225 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2226 } 2227 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2228 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2229 // idiv opcode (0xF7). 2230 ctx->Rdx = (DWORD)0; // remainder 2231 // Continue the execution 2232 #else 2233 PCONTEXT ctx = exceptionInfo->ContextRecord; 2234 address pc = (address)ctx->Eip; 2235 assert(pc[0] == 0xF7, "not an idiv opcode"); 2236 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2237 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2238 // set correct result values and continue after idiv instruction 2239 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2240 ctx->Eax = (DWORD)min_jint; // result 2241 ctx->Edx = (DWORD)0; // remainder 2242 // Continue the execution 2243 #endif 2244 #endif 2245 return EXCEPTION_CONTINUE_EXECUTION; 2246 } 2247 2248 //----------------------------------------------------------------------------- 2249 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2250 PCONTEXT ctx = exceptionInfo->ContextRecord; 2251 #ifndef _WIN64 2252 // handle exception caused by native method modifying control word 2253 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2254 2255 switch (exception_code) { 2256 case EXCEPTION_FLT_DENORMAL_OPERAND: 2257 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2258 case EXCEPTION_FLT_INEXACT_RESULT: 2259 case EXCEPTION_FLT_INVALID_OPERATION: 2260 case EXCEPTION_FLT_OVERFLOW: 2261 case EXCEPTION_FLT_STACK_CHECK: 2262 case EXCEPTION_FLT_UNDERFLOW: 2263 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2264 if (fp_control_word != ctx->FloatSave.ControlWord) { 2265 // Restore FPCW and mask out FLT exceptions 2266 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2267 // Mask out pending FLT exceptions 2268 ctx->FloatSave.StatusWord &= 0xffffff00; 2269 return EXCEPTION_CONTINUE_EXECUTION; 2270 } 2271 } 2272 2273 if (prev_uef_handler != NULL) { 2274 // We didn't handle this exception so pass it to the previous 2275 // UnhandledExceptionFilter. 2276 return (prev_uef_handler)(exceptionInfo); 2277 } 2278 #else // !_WIN64 2279 // On Windows, the mxcsr control bits are non-volatile across calls 2280 // See also CR 6192333 2281 // 2282 jint MxCsr = INITIAL_MXCSR; 2283 // we can't use StubRoutines::addr_mxcsr_std() 2284 // because in Win64 mxcsr is not saved there 2285 if (MxCsr != ctx->MxCsr) { 2286 ctx->MxCsr = MxCsr; 2287 return EXCEPTION_CONTINUE_EXECUTION; 2288 } 2289 #endif // !_WIN64 2290 2291 return EXCEPTION_CONTINUE_SEARCH; 2292 } 2293 2294 static inline void report_error(Thread* t, DWORD exception_code, 2295 address addr, void* siginfo, void* context) { 2296 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2297 2298 // If UseOsErrorReporting, this will return here and save the error file 2299 // somewhere where we can find it in the minidump. 2300 } 2301 2302 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2303 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2304 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2305 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2306 if (Interpreter::contains(pc)) { 2307 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2308 if (!fr->is_first_java_frame()) { 2309 assert(fr->safe_for_sender(thread), "Safety check"); 2310 *fr = fr->java_sender(); 2311 } 2312 } else { 2313 // more complex code with compiled code 2314 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2315 CodeBlob* cb = CodeCache::find_blob(pc); 2316 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2317 // Not sure where the pc points to, fallback to default 2318 // stack overflow handling 2319 return false; 2320 } else { 2321 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2322 // in compiled code, the stack banging is performed just after the return pc 2323 // has been pushed on the stack 2324 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2325 if (!fr->is_java_frame()) { 2326 assert(fr->safe_for_sender(thread), "Safety check"); 2327 *fr = fr->java_sender(); 2328 } 2329 } 2330 } 2331 assert(fr->is_java_frame(), "Safety check"); 2332 return true; 2333 } 2334 2335 //----------------------------------------------------------------------------- 2336 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2337 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2338 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2339 #ifdef _M_IA64 2340 // On Itanium, we need the "precise pc", which has the slot number coded 2341 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2342 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2343 // Convert the pc to "Unix format", which has the slot number coded 2344 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2345 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2346 // information is saved in the Unix format. 2347 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2348 #else 2349 #ifdef _M_AMD64 2350 address pc = (address) exceptionInfo->ContextRecord->Rip; 2351 #else 2352 address pc = (address) exceptionInfo->ContextRecord->Eip; 2353 #endif 2354 #endif 2355 Thread* t = Thread::current_or_null_safe(); 2356 2357 // Handle SafeFetch32 and SafeFetchN exceptions. 2358 if (StubRoutines::is_safefetch_fault(pc)) { 2359 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2360 } 2361 2362 #ifndef _WIN64 2363 // Execution protection violation - win32 running on AMD64 only 2364 // Handled first to avoid misdiagnosis as a "normal" access violation; 2365 // This is safe to do because we have a new/unique ExceptionInformation 2366 // code for this condition. 2367 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2368 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2369 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2370 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2371 2372 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2373 int page_size = os::vm_page_size(); 2374 2375 // Make sure the pc and the faulting address are sane. 2376 // 2377 // If an instruction spans a page boundary, and the page containing 2378 // the beginning of the instruction is executable but the following 2379 // page is not, the pc and the faulting address might be slightly 2380 // different - we still want to unguard the 2nd page in this case. 2381 // 2382 // 15 bytes seems to be a (very) safe value for max instruction size. 2383 bool pc_is_near_addr = 2384 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2385 bool instr_spans_page_boundary = 2386 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2387 (intptr_t) page_size) > 0); 2388 2389 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2390 static volatile address last_addr = 2391 (address) os::non_memory_address_word(); 2392 2393 // In conservative mode, don't unguard unless the address is in the VM 2394 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2395 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2396 2397 // Set memory to RWX and retry 2398 address page_start = 2399 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2400 bool res = os::protect_memory((char*) page_start, page_size, 2401 os::MEM_PROT_RWX); 2402 2403 if (PrintMiscellaneous && Verbose) { 2404 char buf[256]; 2405 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2406 "at " INTPTR_FORMAT 2407 ", unguarding " INTPTR_FORMAT ": %s", addr, 2408 page_start, (res ? "success" : strerror(errno))); 2409 tty->print_raw_cr(buf); 2410 } 2411 2412 // Set last_addr so if we fault again at the same address, we don't 2413 // end up in an endless loop. 2414 // 2415 // There are two potential complications here. Two threads trapping 2416 // at the same address at the same time could cause one of the 2417 // threads to think it already unguarded, and abort the VM. Likely 2418 // very rare. 2419 // 2420 // The other race involves two threads alternately trapping at 2421 // different addresses and failing to unguard the page, resulting in 2422 // an endless loop. This condition is probably even more unlikely 2423 // than the first. 2424 // 2425 // Although both cases could be avoided by using locks or thread 2426 // local last_addr, these solutions are unnecessary complication: 2427 // this handler is a best-effort safety net, not a complete solution. 2428 // It is disabled by default and should only be used as a workaround 2429 // in case we missed any no-execute-unsafe VM code. 2430 2431 last_addr = addr; 2432 2433 return EXCEPTION_CONTINUE_EXECUTION; 2434 } 2435 } 2436 2437 // Last unguard failed or not unguarding 2438 tty->print_raw_cr("Execution protection violation"); 2439 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2440 exceptionInfo->ContextRecord); 2441 return EXCEPTION_CONTINUE_SEARCH; 2442 } 2443 } 2444 #endif // _WIN64 2445 2446 // Check to see if we caught the safepoint code in the 2447 // process of write protecting the memory serialization page. 2448 // It write enables the page immediately after protecting it 2449 // so just return. 2450 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2451 JavaThread* thread = (JavaThread*) t; 2452 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2453 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2454 if (os::is_memory_serialize_page(thread, addr)) { 2455 // Block current thread until the memory serialize page permission restored. 2456 os::block_on_serialize_page_trap(); 2457 return EXCEPTION_CONTINUE_EXECUTION; 2458 } 2459 } 2460 2461 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2462 VM_Version::is_cpuinfo_segv_addr(pc)) { 2463 // Verify that OS save/restore AVX registers. 2464 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2465 } 2466 2467 if (t != NULL && t->is_Java_thread()) { 2468 JavaThread* thread = (JavaThread*) t; 2469 bool in_java = thread->thread_state() == _thread_in_Java; 2470 2471 // Handle potential stack overflows up front. 2472 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2473 #ifdef _M_IA64 2474 // Use guard page for register stack. 2475 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2476 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2477 // Check for a register stack overflow on Itanium 2478 if (thread->addr_inside_register_stack_red_zone(addr)) { 2479 // Fatal red zone violation happens if the Java program 2480 // catches a StackOverflow error and does so much processing 2481 // that it runs beyond the unprotected yellow guard zone. As 2482 // a result, we are out of here. 2483 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2484 } else if(thread->addr_inside_register_stack(addr)) { 2485 // Disable the yellow zone which sets the state that 2486 // we've got a stack overflow problem. 2487 if (thread->stack_yellow_reserved_zone_enabled()) { 2488 thread->disable_stack_yellow_reserved_zone(); 2489 } 2490 // Give us some room to process the exception. 2491 thread->disable_register_stack_guard(); 2492 // Tracing with +Verbose. 2493 if (Verbose) { 2494 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2495 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2496 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2497 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2498 thread->register_stack_base(), 2499 thread->register_stack_base() + thread->stack_size()); 2500 } 2501 2502 // Reguard the permanent register stack red zone just to be sure. 2503 // We saw Windows silently disabling this without telling us. 2504 thread->enable_register_stack_red_zone(); 2505 2506 return Handle_Exception(exceptionInfo, 2507 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2508 } 2509 #endif 2510 if (thread->stack_guards_enabled()) { 2511 if (_thread_in_Java) { 2512 frame fr; 2513 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2514 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2515 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2516 assert(fr.is_java_frame(), "Must be a Java frame"); 2517 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2518 } 2519 } 2520 // Yellow zone violation. The o/s has unprotected the first yellow 2521 // zone page for us. Note: must call disable_stack_yellow_zone to 2522 // update the enabled status, even if the zone contains only one page. 2523 thread->disable_stack_yellow_reserved_zone(); 2524 // If not in java code, return and hope for the best. 2525 return in_java 2526 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2527 : EXCEPTION_CONTINUE_EXECUTION; 2528 } else { 2529 // Fatal red zone violation. 2530 thread->disable_stack_red_zone(); 2531 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2532 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2533 exceptionInfo->ContextRecord); 2534 return EXCEPTION_CONTINUE_SEARCH; 2535 } 2536 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2537 // Either stack overflow or null pointer exception. 2538 if (in_java) { 2539 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2540 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2541 address stack_end = thread->stack_end(); 2542 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2543 // Stack overflow. 2544 assert(!os::uses_stack_guard_pages(), 2545 "should be caught by red zone code above."); 2546 return Handle_Exception(exceptionInfo, 2547 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2548 } 2549 // Check for safepoint polling and implicit null 2550 // We only expect null pointers in the stubs (vtable) 2551 // the rest are checked explicitly now. 2552 CodeBlob* cb = CodeCache::find_blob(pc); 2553 if (cb != NULL) { 2554 if (os::is_poll_address(addr)) { 2555 address stub = SharedRuntime::get_poll_stub(pc); 2556 return Handle_Exception(exceptionInfo, stub); 2557 } 2558 } 2559 { 2560 #ifdef _WIN64 2561 // If it's a legal stack address map the entire region in 2562 // 2563 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2564 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2565 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2566 addr = (address)((uintptr_t)addr & 2567 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2568 os::commit_memory((char *)addr, thread->stack_base() - addr, 2569 !ExecMem); 2570 return EXCEPTION_CONTINUE_EXECUTION; 2571 } else 2572 #endif 2573 { 2574 // Null pointer exception. 2575 #ifdef _M_IA64 2576 // Process implicit null checks in compiled code. Note: Implicit null checks 2577 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2578 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2579 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2580 // Handle implicit null check in UEP method entry 2581 if (cb && (cb->is_frame_complete_at(pc) || 2582 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2583 if (Verbose) { 2584 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2585 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2586 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2587 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2588 *(bundle_start + 1), *bundle_start); 2589 } 2590 return Handle_Exception(exceptionInfo, 2591 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2592 } 2593 } 2594 2595 // Implicit null checks were processed above. Hence, we should not reach 2596 // here in the usual case => die! 2597 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2598 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2599 exceptionInfo->ContextRecord); 2600 return EXCEPTION_CONTINUE_SEARCH; 2601 2602 #else // !IA64 2603 2604 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2605 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2606 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2607 } 2608 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2609 exceptionInfo->ContextRecord); 2610 return EXCEPTION_CONTINUE_SEARCH; 2611 #endif 2612 } 2613 } 2614 } 2615 2616 #ifdef _WIN64 2617 // Special care for fast JNI field accessors. 2618 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2619 // in and the heap gets shrunk before the field access. 2620 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2621 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2622 if (addr != (address)-1) { 2623 return Handle_Exception(exceptionInfo, addr); 2624 } 2625 } 2626 #endif 2627 2628 // Stack overflow or null pointer exception in native code. 2629 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2630 exceptionInfo->ContextRecord); 2631 return EXCEPTION_CONTINUE_SEARCH; 2632 } // /EXCEPTION_ACCESS_VIOLATION 2633 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2634 #if defined _M_IA64 2635 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2636 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2637 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2638 2639 // Compiled method patched to be non entrant? Following conditions must apply: 2640 // 1. must be first instruction in bundle 2641 // 2. must be a break instruction with appropriate code 2642 if ((((uint64_t) pc & 0x0F) == 0) && 2643 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2644 return Handle_Exception(exceptionInfo, 2645 (address)SharedRuntime::get_handle_wrong_method_stub()); 2646 } 2647 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2648 #endif 2649 2650 2651 if (in_java) { 2652 switch (exception_code) { 2653 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2654 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2655 2656 case EXCEPTION_INT_OVERFLOW: 2657 return Handle_IDiv_Exception(exceptionInfo); 2658 2659 } // switch 2660 } 2661 if (((thread->thread_state() == _thread_in_Java) || 2662 (thread->thread_state() == _thread_in_native)) && 2663 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2664 LONG result=Handle_FLT_Exception(exceptionInfo); 2665 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2666 } 2667 } 2668 2669 if (exception_code != EXCEPTION_BREAKPOINT) { 2670 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2671 exceptionInfo->ContextRecord); 2672 } 2673 return EXCEPTION_CONTINUE_SEARCH; 2674 } 2675 2676 #ifndef _WIN64 2677 // Special care for fast JNI accessors. 2678 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2679 // the heap gets shrunk before the field access. 2680 // Need to install our own structured exception handler since native code may 2681 // install its own. 2682 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2683 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2684 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2685 address pc = (address) exceptionInfo->ContextRecord->Eip; 2686 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2687 if (addr != (address)-1) { 2688 return Handle_Exception(exceptionInfo, addr); 2689 } 2690 } 2691 return EXCEPTION_CONTINUE_SEARCH; 2692 } 2693 2694 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2695 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2696 jobject obj, \ 2697 jfieldID fieldID) { \ 2698 __try { \ 2699 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2700 obj, \ 2701 fieldID); \ 2702 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2703 _exception_info())) { \ 2704 } \ 2705 return 0; \ 2706 } 2707 2708 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2709 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2710 DEFINE_FAST_GETFIELD(jchar, char, Char) 2711 DEFINE_FAST_GETFIELD(jshort, short, Short) 2712 DEFINE_FAST_GETFIELD(jint, int, Int) 2713 DEFINE_FAST_GETFIELD(jlong, long, Long) 2714 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2715 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2716 2717 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2718 switch (type) { 2719 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2720 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2721 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2722 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2723 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2724 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2725 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2726 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2727 default: ShouldNotReachHere(); 2728 } 2729 return (address)-1; 2730 } 2731 #endif 2732 2733 // Virtual Memory 2734 2735 int os::vm_page_size() { return os::win32::vm_page_size(); } 2736 int os::vm_allocation_granularity() { 2737 return os::win32::vm_allocation_granularity(); 2738 } 2739 2740 // Windows large page support is available on Windows 2003. In order to use 2741 // large page memory, the administrator must first assign additional privilege 2742 // to the user: 2743 // + select Control Panel -> Administrative Tools -> Local Security Policy 2744 // + select Local Policies -> User Rights Assignment 2745 // + double click "Lock pages in memory", add users and/or groups 2746 // + reboot 2747 // Note the above steps are needed for administrator as well, as administrators 2748 // by default do not have the privilege to lock pages in memory. 2749 // 2750 // Note about Windows 2003: although the API supports committing large page 2751 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2752 // scenario, I found through experiment it only uses large page if the entire 2753 // memory region is reserved and committed in a single VirtualAlloc() call. 2754 // This makes Windows large page support more or less like Solaris ISM, in 2755 // that the entire heap must be committed upfront. This probably will change 2756 // in the future, if so the code below needs to be revisited. 2757 2758 #ifndef MEM_LARGE_PAGES 2759 #define MEM_LARGE_PAGES 0x20000000 2760 #endif 2761 2762 static HANDLE _hProcess; 2763 static HANDLE _hToken; 2764 2765 // Container for NUMA node list info 2766 class NUMANodeListHolder { 2767 private: 2768 int *_numa_used_node_list; // allocated below 2769 int _numa_used_node_count; 2770 2771 void free_node_list() { 2772 if (_numa_used_node_list != NULL) { 2773 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2774 } 2775 } 2776 2777 public: 2778 NUMANodeListHolder() { 2779 _numa_used_node_count = 0; 2780 _numa_used_node_list = NULL; 2781 // do rest of initialization in build routine (after function pointers are set up) 2782 } 2783 2784 ~NUMANodeListHolder() { 2785 free_node_list(); 2786 } 2787 2788 bool build() { 2789 DWORD_PTR proc_aff_mask; 2790 DWORD_PTR sys_aff_mask; 2791 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2792 ULONG highest_node_number; 2793 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2794 free_node_list(); 2795 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2796 for (unsigned int i = 0; i <= highest_node_number; i++) { 2797 ULONGLONG proc_mask_numa_node; 2798 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2799 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2800 _numa_used_node_list[_numa_used_node_count++] = i; 2801 } 2802 } 2803 return (_numa_used_node_count > 1); 2804 } 2805 2806 int get_count() { return _numa_used_node_count; } 2807 int get_node_list_entry(int n) { 2808 // for indexes out of range, returns -1 2809 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2810 } 2811 2812 } numa_node_list_holder; 2813 2814 2815 2816 static size_t _large_page_size = 0; 2817 2818 static bool request_lock_memory_privilege() { 2819 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2820 os::current_process_id()); 2821 2822 LUID luid; 2823 if (_hProcess != NULL && 2824 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2825 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2826 2827 TOKEN_PRIVILEGES tp; 2828 tp.PrivilegeCount = 1; 2829 tp.Privileges[0].Luid = luid; 2830 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2831 2832 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2833 // privilege. Check GetLastError() too. See MSDN document. 2834 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2835 (GetLastError() == ERROR_SUCCESS)) { 2836 return true; 2837 } 2838 } 2839 2840 return false; 2841 } 2842 2843 static void cleanup_after_large_page_init() { 2844 if (_hProcess) CloseHandle(_hProcess); 2845 _hProcess = NULL; 2846 if (_hToken) CloseHandle(_hToken); 2847 _hToken = NULL; 2848 } 2849 2850 static bool numa_interleaving_init() { 2851 bool success = false; 2852 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2853 2854 // print a warning if UseNUMAInterleaving flag is specified on command line 2855 bool warn_on_failure = use_numa_interleaving_specified; 2856 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2857 2858 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2859 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2860 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2861 2862 if (numa_node_list_holder.build()) { 2863 if (PrintMiscellaneous && Verbose) { 2864 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2865 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2866 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2867 } 2868 tty->print("\n"); 2869 } 2870 success = true; 2871 } else { 2872 WARN("Process does not cover multiple NUMA nodes."); 2873 } 2874 if (!success) { 2875 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2876 } 2877 return success; 2878 #undef WARN 2879 } 2880 2881 // this routine is used whenever we need to reserve a contiguous VA range 2882 // but we need to make separate VirtualAlloc calls for each piece of the range 2883 // Reasons for doing this: 2884 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2885 // * UseNUMAInterleaving requires a separate node for each piece 2886 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2887 DWORD prot, 2888 bool should_inject_error = false) { 2889 char * p_buf; 2890 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2891 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2892 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2893 2894 // first reserve enough address space in advance since we want to be 2895 // able to break a single contiguous virtual address range into multiple 2896 // large page commits but WS2003 does not allow reserving large page space 2897 // so we just use 4K pages for reserve, this gives us a legal contiguous 2898 // address space. then we will deallocate that reservation, and re alloc 2899 // using large pages 2900 const size_t size_of_reserve = bytes + chunk_size; 2901 if (bytes > size_of_reserve) { 2902 // Overflowed. 2903 return NULL; 2904 } 2905 p_buf = (char *) VirtualAlloc(addr, 2906 size_of_reserve, // size of Reserve 2907 MEM_RESERVE, 2908 PAGE_READWRITE); 2909 // If reservation failed, return NULL 2910 if (p_buf == NULL) return NULL; 2911 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2912 os::release_memory(p_buf, bytes + chunk_size); 2913 2914 // we still need to round up to a page boundary (in case we are using large pages) 2915 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2916 // instead we handle this in the bytes_to_rq computation below 2917 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2918 2919 // now go through and allocate one chunk at a time until all bytes are 2920 // allocated 2921 size_t bytes_remaining = bytes; 2922 // An overflow of align_size_up() would have been caught above 2923 // in the calculation of size_of_reserve. 2924 char * next_alloc_addr = p_buf; 2925 HANDLE hProc = GetCurrentProcess(); 2926 2927 #ifdef ASSERT 2928 // Variable for the failure injection 2929 long ran_num = os::random(); 2930 size_t fail_after = ran_num % bytes; 2931 #endif 2932 2933 int count=0; 2934 while (bytes_remaining) { 2935 // select bytes_to_rq to get to the next chunk_size boundary 2936 2937 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2938 // Note allocate and commit 2939 char * p_new; 2940 2941 #ifdef ASSERT 2942 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2943 #else 2944 const bool inject_error_now = false; 2945 #endif 2946 2947 if (inject_error_now) { 2948 p_new = NULL; 2949 } else { 2950 if (!UseNUMAInterleaving) { 2951 p_new = (char *) VirtualAlloc(next_alloc_addr, 2952 bytes_to_rq, 2953 flags, 2954 prot); 2955 } else { 2956 // get the next node to use from the used_node_list 2957 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2958 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2959 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2960 } 2961 } 2962 2963 if (p_new == NULL) { 2964 // Free any allocated pages 2965 if (next_alloc_addr > p_buf) { 2966 // Some memory was committed so release it. 2967 size_t bytes_to_release = bytes - bytes_remaining; 2968 // NMT has yet to record any individual blocks, so it 2969 // need to create a dummy 'reserve' record to match 2970 // the release. 2971 MemTracker::record_virtual_memory_reserve((address)p_buf, 2972 bytes_to_release, CALLER_PC); 2973 os::release_memory(p_buf, bytes_to_release); 2974 } 2975 #ifdef ASSERT 2976 if (should_inject_error) { 2977 if (TracePageSizes && Verbose) { 2978 tty->print_cr("Reserving pages individually failed."); 2979 } 2980 } 2981 #endif 2982 return NULL; 2983 } 2984 2985 bytes_remaining -= bytes_to_rq; 2986 next_alloc_addr += bytes_to_rq; 2987 count++; 2988 } 2989 // Although the memory is allocated individually, it is returned as one. 2990 // NMT records it as one block. 2991 if ((flags & MEM_COMMIT) != 0) { 2992 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2993 } else { 2994 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2995 } 2996 2997 // made it this far, success 2998 return p_buf; 2999 } 3000 3001 3002 3003 void os::large_page_init() { 3004 if (!UseLargePages) return; 3005 3006 // print a warning if any large page related flag is specified on command line 3007 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 3008 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 3009 bool success = false; 3010 3011 #define WARN(msg) if (warn_on_failure) { warning(msg); } 3012 if (request_lock_memory_privilege()) { 3013 size_t s = GetLargePageMinimum(); 3014 if (s) { 3015 #if defined(IA32) || defined(AMD64) 3016 if (s > 4*M || LargePageSizeInBytes > 4*M) { 3017 WARN("JVM cannot use large pages bigger than 4mb."); 3018 } else { 3019 #endif 3020 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 3021 _large_page_size = LargePageSizeInBytes; 3022 } else { 3023 _large_page_size = s; 3024 } 3025 success = true; 3026 #if defined(IA32) || defined(AMD64) 3027 } 3028 #endif 3029 } else { 3030 WARN("Large page is not supported by the processor."); 3031 } 3032 } else { 3033 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 3034 } 3035 #undef WARN 3036 3037 const size_t default_page_size = (size_t) vm_page_size(); 3038 if (success && _large_page_size > default_page_size) { 3039 _page_sizes[0] = _large_page_size; 3040 _page_sizes[1] = default_page_size; 3041 _page_sizes[2] = 0; 3042 } 3043 3044 cleanup_after_large_page_init(); 3045 UseLargePages = success; 3046 } 3047 3048 // On win32, one cannot release just a part of reserved memory, it's an 3049 // all or nothing deal. When we split a reservation, we must break the 3050 // reservation into two reservations. 3051 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3052 bool realloc) { 3053 if (size > 0) { 3054 release_memory(base, size); 3055 if (realloc) { 3056 reserve_memory(split, base); 3057 } 3058 if (size != split) { 3059 reserve_memory(size - split, base + split); 3060 } 3061 } 3062 } 3063 3064 // Multiple threads can race in this code but it's not possible to unmap small sections of 3065 // virtual space to get requested alignment, like posix-like os's. 3066 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3067 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3068 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3069 "Alignment must be a multiple of allocation granularity (page size)"); 3070 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3071 3072 size_t extra_size = size + alignment; 3073 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3074 3075 char* aligned_base = NULL; 3076 3077 do { 3078 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3079 if (extra_base == NULL) { 3080 return NULL; 3081 } 3082 // Do manual alignment 3083 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3084 3085 os::release_memory(extra_base, extra_size); 3086 3087 aligned_base = os::reserve_memory(size, aligned_base); 3088 3089 } while (aligned_base == NULL); 3090 3091 return aligned_base; 3092 } 3093 3094 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3095 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3096 "reserve alignment"); 3097 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3098 char* res; 3099 // note that if UseLargePages is on, all the areas that require interleaving 3100 // will go thru reserve_memory_special rather than thru here. 3101 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3102 if (!use_individual) { 3103 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3104 } else { 3105 elapsedTimer reserveTimer; 3106 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3107 // in numa interleaving, we have to allocate pages individually 3108 // (well really chunks of NUMAInterleaveGranularity size) 3109 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3110 if (res == NULL) { 3111 warning("NUMA page allocation failed"); 3112 } 3113 if (Verbose && PrintMiscellaneous) { 3114 reserveTimer.stop(); 3115 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3116 reserveTimer.milliseconds(), reserveTimer.ticks()); 3117 } 3118 } 3119 assert(res == NULL || addr == NULL || addr == res, 3120 "Unexpected address from reserve."); 3121 3122 return res; 3123 } 3124 3125 // Reserve memory at an arbitrary address, only if that area is 3126 // available (and not reserved for something else). 3127 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3128 // Windows os::reserve_memory() fails of the requested address range is 3129 // not avilable. 3130 return reserve_memory(bytes, requested_addr); 3131 } 3132 3133 size_t os::large_page_size() { 3134 return _large_page_size; 3135 } 3136 3137 bool os::can_commit_large_page_memory() { 3138 // Windows only uses large page memory when the entire region is reserved 3139 // and committed in a single VirtualAlloc() call. This may change in the 3140 // future, but with Windows 2003 it's not possible to commit on demand. 3141 return false; 3142 } 3143 3144 bool os::can_execute_large_page_memory() { 3145 return true; 3146 } 3147 3148 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3149 bool exec) { 3150 assert(UseLargePages, "only for large pages"); 3151 3152 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3153 return NULL; // Fallback to small pages. 3154 } 3155 3156 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3157 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3158 3159 // with large pages, there are two cases where we need to use Individual Allocation 3160 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3161 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3162 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3163 if (TracePageSizes && Verbose) { 3164 tty->print_cr("Reserving large pages individually."); 3165 } 3166 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3167 if (p_buf == NULL) { 3168 // give an appropriate warning message 3169 if (UseNUMAInterleaving) { 3170 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3171 } 3172 if (UseLargePagesIndividualAllocation) { 3173 warning("Individually allocated large pages failed, " 3174 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3175 } 3176 return NULL; 3177 } 3178 3179 return p_buf; 3180 3181 } else { 3182 if (TracePageSizes && Verbose) { 3183 tty->print_cr("Reserving large pages in a single large chunk."); 3184 } 3185 // normal policy just allocate it all at once 3186 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3187 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3188 if (res != NULL) { 3189 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3190 } 3191 3192 return res; 3193 } 3194 } 3195 3196 bool os::release_memory_special(char* base, size_t bytes) { 3197 assert(base != NULL, "Sanity check"); 3198 return release_memory(base, bytes); 3199 } 3200 3201 void os::print_statistics() { 3202 } 3203 3204 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3205 int err = os::get_last_error(); 3206 char buf[256]; 3207 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3208 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3209 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3210 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3211 } 3212 3213 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3214 if (bytes == 0) { 3215 // Don't bother the OS with noops. 3216 return true; 3217 } 3218 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3219 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3220 // Don't attempt to print anything if the OS call fails. We're 3221 // probably low on resources, so the print itself may cause crashes. 3222 3223 // unless we have NUMAInterleaving enabled, the range of a commit 3224 // is always within a reserve covered by a single VirtualAlloc 3225 // in that case we can just do a single commit for the requested size 3226 if (!UseNUMAInterleaving) { 3227 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3228 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3229 return false; 3230 } 3231 if (exec) { 3232 DWORD oldprot; 3233 // Windows doc says to use VirtualProtect to get execute permissions 3234 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3235 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3236 return false; 3237 } 3238 } 3239 return true; 3240 } else { 3241 3242 // when NUMAInterleaving is enabled, the commit might cover a range that 3243 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3244 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3245 // returns represents the number of bytes that can be committed in one step. 3246 size_t bytes_remaining = bytes; 3247 char * next_alloc_addr = addr; 3248 while (bytes_remaining > 0) { 3249 MEMORY_BASIC_INFORMATION alloc_info; 3250 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3251 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3252 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3253 PAGE_READWRITE) == NULL) { 3254 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3255 exec);) 3256 return false; 3257 } 3258 if (exec) { 3259 DWORD oldprot; 3260 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3261 PAGE_EXECUTE_READWRITE, &oldprot)) { 3262 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3263 exec);) 3264 return false; 3265 } 3266 } 3267 bytes_remaining -= bytes_to_rq; 3268 next_alloc_addr += bytes_to_rq; 3269 } 3270 } 3271 // if we made it this far, return true 3272 return true; 3273 } 3274 3275 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3276 bool exec) { 3277 // alignment_hint is ignored on this OS 3278 return pd_commit_memory(addr, size, exec); 3279 } 3280 3281 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3282 const char* mesg) { 3283 assert(mesg != NULL, "mesg must be specified"); 3284 if (!pd_commit_memory(addr, size, exec)) { 3285 warn_fail_commit_memory(addr, size, exec); 3286 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3287 } 3288 } 3289 3290 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3291 size_t alignment_hint, bool exec, 3292 const char* mesg) { 3293 // alignment_hint is ignored on this OS 3294 pd_commit_memory_or_exit(addr, size, exec, mesg); 3295 } 3296 3297 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3298 if (bytes == 0) { 3299 // Don't bother the OS with noops. 3300 return true; 3301 } 3302 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3303 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3304 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3305 } 3306 3307 bool os::pd_release_memory(char* addr, size_t bytes) { 3308 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3309 } 3310 3311 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3312 return os::commit_memory(addr, size, !ExecMem); 3313 } 3314 3315 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3316 return os::uncommit_memory(addr, size); 3317 } 3318 3319 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3320 uint count = 0; 3321 bool ret = false; 3322 size_t bytes_remaining = bytes; 3323 char * next_protect_addr = addr; 3324 3325 // Use VirtualQuery() to get the chunk size. 3326 while (bytes_remaining) { 3327 MEMORY_BASIC_INFORMATION alloc_info; 3328 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3329 return false; 3330 } 3331 3332 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3333 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3334 // but we don't distinguish here as both cases are protected by same API. 3335 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3336 warning("Failed protecting pages individually for chunk #%u", count); 3337 if (!ret) { 3338 return false; 3339 } 3340 3341 bytes_remaining -= bytes_to_protect; 3342 next_protect_addr += bytes_to_protect; 3343 count++; 3344 } 3345 return ret; 3346 } 3347 3348 // Set protections specified 3349 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3350 bool is_committed) { 3351 unsigned int p = 0; 3352 switch (prot) { 3353 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3354 case MEM_PROT_READ: p = PAGE_READONLY; break; 3355 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3356 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3357 default: 3358 ShouldNotReachHere(); 3359 } 3360 3361 DWORD old_status; 3362 3363 // Strange enough, but on Win32 one can change protection only for committed 3364 // memory, not a big deal anyway, as bytes less or equal than 64K 3365 if (!is_committed) { 3366 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3367 "cannot commit protection page"); 3368 } 3369 // One cannot use os::guard_memory() here, as on Win32 guard page 3370 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3371 // 3372 // Pages in the region become guard pages. Any attempt to access a guard page 3373 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3374 // the guard page status. Guard pages thus act as a one-time access alarm. 3375 bool ret; 3376 if (UseNUMAInterleaving) { 3377 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3378 // so we must protect the chunks individually. 3379 ret = protect_pages_individually(addr, bytes, p, &old_status); 3380 } else { 3381 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3382 } 3383 #ifdef ASSERT 3384 if (!ret) { 3385 int err = os::get_last_error(); 3386 char buf[256]; 3387 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3388 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3389 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3390 buf_len != 0 ? buf : "<no_error_string>", err); 3391 } 3392 #endif 3393 return ret; 3394 } 3395 3396 bool os::guard_memory(char* addr, size_t bytes) { 3397 DWORD old_status; 3398 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3399 } 3400 3401 bool os::unguard_memory(char* addr, size_t bytes) { 3402 DWORD old_status; 3403 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3404 } 3405 3406 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3407 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3408 void os::numa_make_global(char *addr, size_t bytes) { } 3409 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3410 bool os::numa_topology_changed() { return false; } 3411 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3412 int os::numa_get_group_id() { return 0; } 3413 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3414 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3415 // Provide an answer for UMA systems 3416 ids[0] = 0; 3417 return 1; 3418 } else { 3419 // check for size bigger than actual groups_num 3420 size = MIN2(size, numa_get_groups_num()); 3421 for (int i = 0; i < (int)size; i++) { 3422 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3423 } 3424 return size; 3425 } 3426 } 3427 3428 bool os::get_page_info(char *start, page_info* info) { 3429 return false; 3430 } 3431 3432 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3433 page_info* page_found) { 3434 return end; 3435 } 3436 3437 char* os::non_memory_address_word() { 3438 // Must never look like an address returned by reserve_memory, 3439 // even in its subfields (as defined by the CPU immediate fields, 3440 // if the CPU splits constants across multiple instructions). 3441 return (char*)-1; 3442 } 3443 3444 #define MAX_ERROR_COUNT 100 3445 #define SYS_THREAD_ERROR 0xffffffffUL 3446 3447 void os::pd_start_thread(Thread* thread) { 3448 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3449 // Returns previous suspend state: 3450 // 0: Thread was not suspended 3451 // 1: Thread is running now 3452 // >1: Thread is still suspended. 3453 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3454 } 3455 3456 class HighResolutionInterval : public CHeapObj<mtThread> { 3457 // The default timer resolution seems to be 10 milliseconds. 3458 // (Where is this written down?) 3459 // If someone wants to sleep for only a fraction of the default, 3460 // then we set the timer resolution down to 1 millisecond for 3461 // the duration of their interval. 3462 // We carefully set the resolution back, since otherwise we 3463 // seem to incur an overhead (3%?) that we don't need. 3464 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3465 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3466 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3467 // timeBeginPeriod() if the relative error exceeded some threshold. 3468 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3469 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3470 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3471 // resolution timers running. 3472 private: 3473 jlong resolution; 3474 public: 3475 HighResolutionInterval(jlong ms) { 3476 resolution = ms % 10L; 3477 if (resolution != 0) { 3478 MMRESULT result = timeBeginPeriod(1L); 3479 } 3480 } 3481 ~HighResolutionInterval() { 3482 if (resolution != 0) { 3483 MMRESULT result = timeEndPeriod(1L); 3484 } 3485 resolution = 0L; 3486 } 3487 }; 3488 3489 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3490 jlong limit = (jlong) MAXDWORD; 3491 3492 while (ms > limit) { 3493 int res; 3494 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3495 return res; 3496 } 3497 ms -= limit; 3498 } 3499 3500 assert(thread == Thread::current(), "thread consistency check"); 3501 OSThread* osthread = thread->osthread(); 3502 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3503 int result; 3504 if (interruptable) { 3505 assert(thread->is_Java_thread(), "must be java thread"); 3506 JavaThread *jt = (JavaThread *) thread; 3507 ThreadBlockInVM tbivm(jt); 3508 3509 jt->set_suspend_equivalent(); 3510 // cleared by handle_special_suspend_equivalent_condition() or 3511 // java_suspend_self() via check_and_wait_while_suspended() 3512 3513 HANDLE events[1]; 3514 events[0] = osthread->interrupt_event(); 3515 HighResolutionInterval *phri=NULL; 3516 if (!ForceTimeHighResolution) { 3517 phri = new HighResolutionInterval(ms); 3518 } 3519 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3520 result = OS_TIMEOUT; 3521 } else { 3522 ResetEvent(osthread->interrupt_event()); 3523 osthread->set_interrupted(false); 3524 result = OS_INTRPT; 3525 } 3526 delete phri; //if it is NULL, harmless 3527 3528 // were we externally suspended while we were waiting? 3529 jt->check_and_wait_while_suspended(); 3530 } else { 3531 assert(!thread->is_Java_thread(), "must not be java thread"); 3532 Sleep((long) ms); 3533 result = OS_TIMEOUT; 3534 } 3535 return result; 3536 } 3537 3538 // Short sleep, direct OS call. 3539 // 3540 // ms = 0, means allow others (if any) to run. 3541 // 3542 void os::naked_short_sleep(jlong ms) { 3543 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3544 Sleep(ms); 3545 } 3546 3547 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3548 void os::infinite_sleep() { 3549 while (true) { // sleep forever ... 3550 Sleep(100000); // ... 100 seconds at a time 3551 } 3552 } 3553 3554 typedef BOOL (WINAPI * STTSignature)(void); 3555 3556 void os::naked_yield() { 3557 // Consider passing back the return value from SwitchToThread(). 3558 SwitchToThread(); 3559 } 3560 3561 // Win32 only gives you access to seven real priorities at a time, 3562 // so we compress Java's ten down to seven. It would be better 3563 // if we dynamically adjusted relative priorities. 3564 3565 int os::java_to_os_priority[CriticalPriority + 1] = { 3566 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3567 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3568 THREAD_PRIORITY_LOWEST, // 2 3569 THREAD_PRIORITY_BELOW_NORMAL, // 3 3570 THREAD_PRIORITY_BELOW_NORMAL, // 4 3571 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3572 THREAD_PRIORITY_NORMAL, // 6 3573 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3574 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3575 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3576 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3577 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3578 }; 3579 3580 int prio_policy1[CriticalPriority + 1] = { 3581 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3582 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3583 THREAD_PRIORITY_LOWEST, // 2 3584 THREAD_PRIORITY_BELOW_NORMAL, // 3 3585 THREAD_PRIORITY_BELOW_NORMAL, // 4 3586 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3587 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3588 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3589 THREAD_PRIORITY_HIGHEST, // 8 3590 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3591 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3592 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3593 }; 3594 3595 static int prio_init() { 3596 // If ThreadPriorityPolicy is 1, switch tables 3597 if (ThreadPriorityPolicy == 1) { 3598 int i; 3599 for (i = 0; i < CriticalPriority + 1; i++) { 3600 os::java_to_os_priority[i] = prio_policy1[i]; 3601 } 3602 } 3603 if (UseCriticalJavaThreadPriority) { 3604 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3605 } 3606 return 0; 3607 } 3608 3609 OSReturn os::set_native_priority(Thread* thread, int priority) { 3610 if (!UseThreadPriorities) return OS_OK; 3611 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3612 return ret ? OS_OK : OS_ERR; 3613 } 3614 3615 OSReturn os::get_native_priority(const Thread* const thread, 3616 int* priority_ptr) { 3617 if (!UseThreadPriorities) { 3618 *priority_ptr = java_to_os_priority[NormPriority]; 3619 return OS_OK; 3620 } 3621 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3622 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3623 assert(false, "GetThreadPriority failed"); 3624 return OS_ERR; 3625 } 3626 *priority_ptr = os_prio; 3627 return OS_OK; 3628 } 3629 3630 3631 // Hint to the underlying OS that a task switch would not be good. 3632 // Void return because it's a hint and can fail. 3633 void os::hint_no_preempt() {} 3634 3635 void os::interrupt(Thread* thread) { 3636 assert(!thread->is_Java_thread() || Thread::current() == thread || 3637 Threads_lock->owned_by_self(), 3638 "possibility of dangling Thread pointer"); 3639 3640 OSThread* osthread = thread->osthread(); 3641 osthread->set_interrupted(true); 3642 // More than one thread can get here with the same value of osthread, 3643 // resulting in multiple notifications. We do, however, want the store 3644 // to interrupted() to be visible to other threads before we post 3645 // the interrupt event. 3646 OrderAccess::release(); 3647 SetEvent(osthread->interrupt_event()); 3648 // For JSR166: unpark after setting status 3649 if (thread->is_Java_thread()) { 3650 ((JavaThread*)thread)->parker()->unpark(); 3651 } 3652 3653 ParkEvent * ev = thread->_ParkEvent; 3654 if (ev != NULL) ev->unpark(); 3655 } 3656 3657 3658 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3659 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3660 "possibility of dangling Thread pointer"); 3661 3662 OSThread* osthread = thread->osthread(); 3663 // There is no synchronization between the setting of the interrupt 3664 // and it being cleared here. It is critical - see 6535709 - that 3665 // we only clear the interrupt state, and reset the interrupt event, 3666 // if we are going to report that we were indeed interrupted - else 3667 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3668 // depending on the timing. By checking thread interrupt event to see 3669 // if the thread gets real interrupt thus prevent spurious wakeup. 3670 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3671 if (interrupted && clear_interrupted) { 3672 osthread->set_interrupted(false); 3673 ResetEvent(osthread->interrupt_event()); 3674 } // Otherwise leave the interrupted state alone 3675 3676 return interrupted; 3677 } 3678 3679 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3680 ExtendedPC os::get_thread_pc(Thread* thread) { 3681 CONTEXT context; 3682 context.ContextFlags = CONTEXT_CONTROL; 3683 HANDLE handle = thread->osthread()->thread_handle(); 3684 #ifdef _M_IA64 3685 assert(0, "Fix get_thread_pc"); 3686 return ExtendedPC(NULL); 3687 #else 3688 if (GetThreadContext(handle, &context)) { 3689 #ifdef _M_AMD64 3690 return ExtendedPC((address) context.Rip); 3691 #else 3692 return ExtendedPC((address) context.Eip); 3693 #endif 3694 } else { 3695 return ExtendedPC(NULL); 3696 } 3697 #endif 3698 } 3699 3700 // GetCurrentThreadId() returns DWORD 3701 intx os::current_thread_id() { return GetCurrentThreadId(); } 3702 3703 static int _initial_pid = 0; 3704 3705 int os::current_process_id() { 3706 return (_initial_pid ? _initial_pid : _getpid()); 3707 } 3708 3709 int os::win32::_vm_page_size = 0; 3710 int os::win32::_vm_allocation_granularity = 0; 3711 int os::win32::_processor_type = 0; 3712 // Processor level is not available on non-NT systems, use vm_version instead 3713 int os::win32::_processor_level = 0; 3714 julong os::win32::_physical_memory = 0; 3715 size_t os::win32::_default_stack_size = 0; 3716 3717 intx os::win32::_os_thread_limit = 0; 3718 volatile intx os::win32::_os_thread_count = 0; 3719 3720 bool os::win32::_is_windows_server = false; 3721 3722 // 6573254 3723 // Currently, the bug is observed across all the supported Windows releases, 3724 // including the latest one (as of this writing - Windows Server 2012 R2) 3725 bool os::win32::_has_exit_bug = true; 3726 3727 void os::win32::initialize_system_info() { 3728 SYSTEM_INFO si; 3729 GetSystemInfo(&si); 3730 _vm_page_size = si.dwPageSize; 3731 _vm_allocation_granularity = si.dwAllocationGranularity; 3732 _processor_type = si.dwProcessorType; 3733 _processor_level = si.wProcessorLevel; 3734 set_processor_count(si.dwNumberOfProcessors); 3735 3736 MEMORYSTATUSEX ms; 3737 ms.dwLength = sizeof(ms); 3738 3739 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3740 // dwMemoryLoad (% of memory in use) 3741 GlobalMemoryStatusEx(&ms); 3742 _physical_memory = ms.ullTotalPhys; 3743 3744 OSVERSIONINFOEX oi; 3745 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3746 GetVersionEx((OSVERSIONINFO*)&oi); 3747 switch (oi.dwPlatformId) { 3748 case VER_PLATFORM_WIN32_NT: 3749 { 3750 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3751 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3752 oi.wProductType == VER_NT_SERVER) { 3753 _is_windows_server = true; 3754 } 3755 } 3756 break; 3757 default: fatal("Unknown platform"); 3758 } 3759 3760 _default_stack_size = os::current_stack_size(); 3761 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3762 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3763 "stack size not a multiple of page size"); 3764 3765 initialize_performance_counter(); 3766 } 3767 3768 3769 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3770 int ebuflen) { 3771 char path[MAX_PATH]; 3772 DWORD size; 3773 DWORD pathLen = (DWORD)sizeof(path); 3774 HINSTANCE result = NULL; 3775 3776 // only allow library name without path component 3777 assert(strchr(name, '\\') == NULL, "path not allowed"); 3778 assert(strchr(name, ':') == NULL, "path not allowed"); 3779 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3780 jio_snprintf(ebuf, ebuflen, 3781 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3782 return NULL; 3783 } 3784 3785 // search system directory 3786 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3787 if (size >= pathLen) { 3788 return NULL; // truncated 3789 } 3790 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3791 return NULL; // truncated 3792 } 3793 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3794 return result; 3795 } 3796 } 3797 3798 // try Windows directory 3799 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3800 if (size >= pathLen) { 3801 return NULL; // truncated 3802 } 3803 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3804 return NULL; // truncated 3805 } 3806 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3807 return result; 3808 } 3809 } 3810 3811 jio_snprintf(ebuf, ebuflen, 3812 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3813 return NULL; 3814 } 3815 3816 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3817 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3818 3819 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3820 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3821 return TRUE; 3822 } 3823 3824 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3825 // Basic approach: 3826 // - Each exiting thread registers its intent to exit and then does so. 3827 // - A thread trying to terminate the process must wait for all 3828 // threads currently exiting to complete their exit. 3829 3830 if (os::win32::has_exit_bug()) { 3831 // The array holds handles of the threads that have started exiting by calling 3832 // _endthreadex(). 3833 // Should be large enough to avoid blocking the exiting thread due to lack of 3834 // a free slot. 3835 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3836 static int handle_count = 0; 3837 3838 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3839 static CRITICAL_SECTION crit_sect; 3840 static volatile jint process_exiting = 0; 3841 int i, j; 3842 DWORD res; 3843 HANDLE hproc, hthr; 3844 3845 // The first thread that reached this point, initializes the critical section. 3846 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3847 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3848 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3849 if (what != EPT_THREAD) { 3850 // Atomically set process_exiting before the critical section 3851 // to increase the visibility between racing threads. 3852 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3853 } 3854 EnterCriticalSection(&crit_sect); 3855 3856 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3857 // Remove from the array those handles of the threads that have completed exiting. 3858 for (i = 0, j = 0; i < handle_count; ++i) { 3859 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3860 if (res == WAIT_TIMEOUT) { 3861 handles[j++] = handles[i]; 3862 } else { 3863 if (res == WAIT_FAILED) { 3864 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3865 GetLastError(), __FILE__, __LINE__); 3866 } 3867 // Don't keep the handle, if we failed waiting for it. 3868 CloseHandle(handles[i]); 3869 } 3870 } 3871 3872 // If there's no free slot in the array of the kept handles, we'll have to 3873 // wait until at least one thread completes exiting. 3874 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3875 // Raise the priority of the oldest exiting thread to increase its chances 3876 // to complete sooner. 3877 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3878 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3879 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3880 i = (res - WAIT_OBJECT_0); 3881 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3882 for (; i < handle_count; ++i) { 3883 handles[i] = handles[i + 1]; 3884 } 3885 } else { 3886 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3887 (res == WAIT_FAILED ? "failed" : "timed out"), 3888 GetLastError(), __FILE__, __LINE__); 3889 // Don't keep handles, if we failed waiting for them. 3890 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3891 CloseHandle(handles[i]); 3892 } 3893 handle_count = 0; 3894 } 3895 } 3896 3897 // Store a duplicate of the current thread handle in the array of handles. 3898 hproc = GetCurrentProcess(); 3899 hthr = GetCurrentThread(); 3900 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3901 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3902 warning("DuplicateHandle failed (%u) in %s: %d\n", 3903 GetLastError(), __FILE__, __LINE__); 3904 } else { 3905 ++handle_count; 3906 } 3907 3908 // The current exiting thread has stored its handle in the array, and now 3909 // should leave the critical section before calling _endthreadex(). 3910 3911 } else if (what != EPT_THREAD && handle_count > 0) { 3912 jlong start_time, finish_time, timeout_left; 3913 // Before ending the process, make sure all the threads that had called 3914 // _endthreadex() completed. 3915 3916 // Set the priority level of the current thread to the same value as 3917 // the priority level of exiting threads. 3918 // This is to ensure it will be given a fair chance to execute if 3919 // the timeout expires. 3920 hthr = GetCurrentThread(); 3921 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3922 start_time = os::javaTimeNanos(); 3923 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3924 for (i = 0; ; ) { 3925 int portion_count = handle_count - i; 3926 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3927 portion_count = MAXIMUM_WAIT_OBJECTS; 3928 } 3929 for (j = 0; j < portion_count; ++j) { 3930 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3931 } 3932 timeout_left = (finish_time - start_time) / 1000000L; 3933 if (timeout_left < 0) { 3934 timeout_left = 0; 3935 } 3936 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3937 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3938 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3939 (res == WAIT_FAILED ? "failed" : "timed out"), 3940 GetLastError(), __FILE__, __LINE__); 3941 // Reset portion_count so we close the remaining 3942 // handles due to this error. 3943 portion_count = handle_count - i; 3944 } 3945 for (j = 0; j < portion_count; ++j) { 3946 CloseHandle(handles[i + j]); 3947 } 3948 if ((i += portion_count) >= handle_count) { 3949 break; 3950 } 3951 start_time = os::javaTimeNanos(); 3952 } 3953 handle_count = 0; 3954 } 3955 3956 LeaveCriticalSection(&crit_sect); 3957 } 3958 3959 if (OrderAccess::load_acquire(&process_exiting) != 0 && 3960 process_exiting != (jint)GetCurrentThreadId()) { 3961 // Some other thread is about to call exit(), so we 3962 // don't let the current thread proceed to exit() or _endthreadex() 3963 while (true) { 3964 SuspendThread(GetCurrentThread()); 3965 // Avoid busy-wait loop, if SuspendThread() failed. 3966 Sleep(EXIT_TIMEOUT); 3967 } 3968 } 3969 } 3970 3971 // We are here if either 3972 // - there's no 'race at exit' bug on this OS release; 3973 // - initialization of the critical section failed (unlikely); 3974 // - the current thread has stored its handle and left the critical section; 3975 // - the process-exiting thread has raised the flag and left the critical section. 3976 if (what == EPT_THREAD) { 3977 _endthreadex((unsigned)exit_code); 3978 } else if (what == EPT_PROCESS) { 3979 ::exit(exit_code); 3980 } else { 3981 _exit(exit_code); 3982 } 3983 3984 // Should not reach here 3985 return exit_code; 3986 } 3987 3988 #undef EXIT_TIMEOUT 3989 3990 void os::win32::setmode_streams() { 3991 _setmode(_fileno(stdin), _O_BINARY); 3992 _setmode(_fileno(stdout), _O_BINARY); 3993 _setmode(_fileno(stderr), _O_BINARY); 3994 } 3995 3996 3997 bool os::is_debugger_attached() { 3998 return IsDebuggerPresent() ? true : false; 3999 } 4000 4001 4002 void os::wait_for_keypress_at_exit(void) { 4003 if (PauseAtExit) { 4004 fprintf(stderr, "Press any key to continue...\n"); 4005 fgetc(stdin); 4006 } 4007 } 4008 4009 4010 bool os::message_box(const char* title, const char* message) { 4011 int result = MessageBox(NULL, message, title, 4012 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 4013 return result == IDYES; 4014 } 4015 4016 #ifndef PRODUCT 4017 #ifndef _WIN64 4018 // Helpers to check whether NX protection is enabled 4019 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 4020 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 4021 pex->ExceptionRecord->NumberParameters > 0 && 4022 pex->ExceptionRecord->ExceptionInformation[0] == 4023 EXCEPTION_INFO_EXEC_VIOLATION) { 4024 return EXCEPTION_EXECUTE_HANDLER; 4025 } 4026 return EXCEPTION_CONTINUE_SEARCH; 4027 } 4028 4029 void nx_check_protection() { 4030 // If NX is enabled we'll get an exception calling into code on the stack 4031 char code[] = { (char)0xC3 }; // ret 4032 void *code_ptr = (void *)code; 4033 __try { 4034 __asm call code_ptr 4035 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4036 tty->print_raw_cr("NX protection detected."); 4037 } 4038 } 4039 #endif // _WIN64 4040 #endif // PRODUCT 4041 4042 // This is called _before_ the global arguments have been parsed 4043 void os::init(void) { 4044 _initial_pid = _getpid(); 4045 4046 init_random(1234567); 4047 4048 win32::initialize_system_info(); 4049 win32::setmode_streams(); 4050 init_page_sizes((size_t) win32::vm_page_size()); 4051 4052 // This may be overridden later when argument processing is done. 4053 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 4054 4055 // Initialize main_process and main_thread 4056 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4057 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4058 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4059 fatal("DuplicateHandle failed\n"); 4060 } 4061 main_thread_id = (int) GetCurrentThreadId(); 4062 4063 // initialize fast thread access - only used for 32-bit 4064 win32::initialize_thread_ptr_offset(); 4065 } 4066 4067 // To install functions for atexit processing 4068 extern "C" { 4069 static void perfMemory_exit_helper() { 4070 perfMemory_exit(); 4071 } 4072 } 4073 4074 static jint initSock(); 4075 4076 // this is called _after_ the global arguments have been parsed 4077 jint os::init_2(void) { 4078 // Allocate a single page and mark it as readable for safepoint polling 4079 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4080 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4081 4082 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4083 guarantee(return_page != NULL, "Commit Failed for polling page"); 4084 4085 os::set_polling_page(polling_page); 4086 4087 #ifndef PRODUCT 4088 if (Verbose && PrintMiscellaneous) { 4089 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", 4090 (intptr_t)polling_page); 4091 } 4092 #endif 4093 4094 if (!UseMembar) { 4095 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4096 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4097 4098 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4099 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4100 4101 os::set_memory_serialize_page(mem_serialize_page); 4102 4103 #ifndef PRODUCT 4104 if (Verbose && PrintMiscellaneous) { 4105 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", 4106 (intptr_t)mem_serialize_page); 4107 } 4108 #endif 4109 } 4110 4111 // Setup Windows Exceptions 4112 4113 // for debugging float code generation bugs 4114 if (ForceFloatExceptions) { 4115 #ifndef _WIN64 4116 static long fp_control_word = 0; 4117 __asm { fstcw fp_control_word } 4118 // see Intel PPro Manual, Vol. 2, p 7-16 4119 const long precision = 0x20; 4120 const long underflow = 0x10; 4121 const long overflow = 0x08; 4122 const long zero_div = 0x04; 4123 const long denorm = 0x02; 4124 const long invalid = 0x01; 4125 fp_control_word |= invalid; 4126 __asm { fldcw fp_control_word } 4127 #endif 4128 } 4129 4130 // If stack_commit_size is 0, windows will reserve the default size, 4131 // but only commit a small portion of it. 4132 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 4133 size_t default_reserve_size = os::win32::default_stack_size(); 4134 size_t actual_reserve_size = stack_commit_size; 4135 if (stack_commit_size < default_reserve_size) { 4136 // If stack_commit_size == 0, we want this too 4137 actual_reserve_size = default_reserve_size; 4138 } 4139 4140 // Check minimum allowable stack size for thread creation and to initialize 4141 // the java system classes, including StackOverflowError - depends on page 4142 // size. Add a page for compiler2 recursion in main thread. 4143 // Add in 2*BytesPerWord times page size to account for VM stack during 4144 // class initialization depending on 32 or 64 bit VM. 4145 size_t min_stack_allowed = 4146 (size_t)(JavaThread::stack_yellow_zone_size() + JavaThread::stack_red_zone_size() + 4147 JavaThread::stack_shadow_zone_size() + 4148 (2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size()); 4149 if (actual_reserve_size < min_stack_allowed) { 4150 tty->print_cr("\nThe stack size specified is too small, " 4151 "Specify at least %dk", 4152 min_stack_allowed / K); 4153 return JNI_ERR; 4154 } 4155 4156 JavaThread::set_stack_size_at_create(stack_commit_size); 4157 4158 // Calculate theoretical max. size of Threads to guard gainst artifical 4159 // out-of-memory situations, where all available address-space has been 4160 // reserved by thread stacks. 4161 assert(actual_reserve_size != 0, "Must have a stack"); 4162 4163 // Calculate the thread limit when we should start doing Virtual Memory 4164 // banging. Currently when the threads will have used all but 200Mb of space. 4165 // 4166 // TODO: consider performing a similar calculation for commit size instead 4167 // as reserve size, since on a 64-bit platform we'll run into that more 4168 // often than running out of virtual memory space. We can use the 4169 // lower value of the two calculations as the os_thread_limit. 4170 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4171 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4172 4173 // at exit methods are called in the reverse order of their registration. 4174 // there is no limit to the number of functions registered. atexit does 4175 // not set errno. 4176 4177 if (PerfAllowAtExitRegistration) { 4178 // only register atexit functions if PerfAllowAtExitRegistration is set. 4179 // atexit functions can be delayed until process exit time, which 4180 // can be problematic for embedded VM situations. Embedded VMs should 4181 // call DestroyJavaVM() to assure that VM resources are released. 4182 4183 // note: perfMemory_exit_helper atexit function may be removed in 4184 // the future if the appropriate cleanup code can be added to the 4185 // VM_Exit VMOperation's doit method. 4186 if (atexit(perfMemory_exit_helper) != 0) { 4187 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4188 } 4189 } 4190 4191 #ifndef _WIN64 4192 // Print something if NX is enabled (win32 on AMD64) 4193 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4194 #endif 4195 4196 // initialize thread priority policy 4197 prio_init(); 4198 4199 if (UseNUMA && !ForceNUMA) { 4200 UseNUMA = false; // We don't fully support this yet 4201 } 4202 4203 if (UseNUMAInterleaving) { 4204 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4205 bool success = numa_interleaving_init(); 4206 if (!success) UseNUMAInterleaving = false; 4207 } 4208 4209 if (initSock() != JNI_OK) { 4210 return JNI_ERR; 4211 } 4212 4213 return JNI_OK; 4214 } 4215 4216 // Mark the polling page as unreadable 4217 void os::make_polling_page_unreadable(void) { 4218 DWORD old_status; 4219 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4220 PAGE_NOACCESS, &old_status)) { 4221 fatal("Could not disable polling page"); 4222 } 4223 } 4224 4225 // Mark the polling page as readable 4226 void os::make_polling_page_readable(void) { 4227 DWORD old_status; 4228 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4229 PAGE_READONLY, &old_status)) { 4230 fatal("Could not enable polling page"); 4231 } 4232 } 4233 4234 4235 int os::stat(const char *path, struct stat *sbuf) { 4236 char pathbuf[MAX_PATH]; 4237 if (strlen(path) > MAX_PATH - 1) { 4238 errno = ENAMETOOLONG; 4239 return -1; 4240 } 4241 os::native_path(strcpy(pathbuf, path)); 4242 int ret = ::stat(pathbuf, sbuf); 4243 if (sbuf != NULL && UseUTCFileTimestamp) { 4244 // Fix for 6539723. st_mtime returned from stat() is dependent on 4245 // the system timezone and so can return different values for the 4246 // same file if/when daylight savings time changes. This adjustment 4247 // makes sure the same timestamp is returned regardless of the TZ. 4248 // 4249 // See: 4250 // http://msdn.microsoft.com/library/ 4251 // default.asp?url=/library/en-us/sysinfo/base/ 4252 // time_zone_information_str.asp 4253 // and 4254 // http://msdn.microsoft.com/library/default.asp?url= 4255 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4256 // 4257 // NOTE: there is a insidious bug here: If the timezone is changed 4258 // after the call to stat() but before 'GetTimeZoneInformation()', then 4259 // the adjustment we do here will be wrong and we'll return the wrong 4260 // value (which will likely end up creating an invalid class data 4261 // archive). Absent a better API for this, or some time zone locking 4262 // mechanism, we'll have to live with this risk. 4263 TIME_ZONE_INFORMATION tz; 4264 DWORD tzid = GetTimeZoneInformation(&tz); 4265 int daylightBias = 4266 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4267 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4268 } 4269 return ret; 4270 } 4271 4272 4273 #define FT2INT64(ft) \ 4274 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4275 4276 4277 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4278 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4279 // of a thread. 4280 // 4281 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4282 // the fast estimate available on the platform. 4283 4284 // current_thread_cpu_time() is not optimized for Windows yet 4285 jlong os::current_thread_cpu_time() { 4286 // return user + sys since the cost is the same 4287 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4288 } 4289 4290 jlong os::thread_cpu_time(Thread* thread) { 4291 // consistent with what current_thread_cpu_time() returns. 4292 return os::thread_cpu_time(thread, true /* user+sys */); 4293 } 4294 4295 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4296 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4297 } 4298 4299 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4300 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4301 // If this function changes, os::is_thread_cpu_time_supported() should too 4302 FILETIME CreationTime; 4303 FILETIME ExitTime; 4304 FILETIME KernelTime; 4305 FILETIME UserTime; 4306 4307 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4308 &ExitTime, &KernelTime, &UserTime) == 0) { 4309 return -1; 4310 } else if (user_sys_cpu_time) { 4311 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4312 } else { 4313 return FT2INT64(UserTime) * 100; 4314 } 4315 } 4316 4317 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4318 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4319 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4320 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4321 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4322 } 4323 4324 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4325 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4326 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4327 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4328 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4329 } 4330 4331 bool os::is_thread_cpu_time_supported() { 4332 // see os::thread_cpu_time 4333 FILETIME CreationTime; 4334 FILETIME ExitTime; 4335 FILETIME KernelTime; 4336 FILETIME UserTime; 4337 4338 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4339 &KernelTime, &UserTime) == 0) { 4340 return false; 4341 } else { 4342 return true; 4343 } 4344 } 4345 4346 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4347 // It does have primitives (PDH API) to get CPU usage and run queue length. 4348 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4349 // If we wanted to implement loadavg on Windows, we have a few options: 4350 // 4351 // a) Query CPU usage and run queue length and "fake" an answer by 4352 // returning the CPU usage if it's under 100%, and the run queue 4353 // length otherwise. It turns out that querying is pretty slow 4354 // on Windows, on the order of 200 microseconds on a fast machine. 4355 // Note that on the Windows the CPU usage value is the % usage 4356 // since the last time the API was called (and the first call 4357 // returns 100%), so we'd have to deal with that as well. 4358 // 4359 // b) Sample the "fake" answer using a sampling thread and store 4360 // the answer in a global variable. The call to loadavg would 4361 // just return the value of the global, avoiding the slow query. 4362 // 4363 // c) Sample a better answer using exponential decay to smooth the 4364 // value. This is basically the algorithm used by UNIX kernels. 4365 // 4366 // Note that sampling thread starvation could affect both (b) and (c). 4367 int os::loadavg(double loadavg[], int nelem) { 4368 return -1; 4369 } 4370 4371 4372 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4373 bool os::dont_yield() { 4374 return DontYieldALot; 4375 } 4376 4377 // This method is a slightly reworked copy of JDK's sysOpen 4378 // from src/windows/hpi/src/sys_api_md.c 4379 4380 int os::open(const char *path, int oflag, int mode) { 4381 char pathbuf[MAX_PATH]; 4382 4383 if (strlen(path) > MAX_PATH - 1) { 4384 errno = ENAMETOOLONG; 4385 return -1; 4386 } 4387 os::native_path(strcpy(pathbuf, path)); 4388 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4389 } 4390 4391 FILE* os::open(int fd, const char* mode) { 4392 return ::_fdopen(fd, mode); 4393 } 4394 4395 // Is a (classpath) directory empty? 4396 bool os::dir_is_empty(const char* path) { 4397 WIN32_FIND_DATA fd; 4398 HANDLE f = FindFirstFile(path, &fd); 4399 if (f == INVALID_HANDLE_VALUE) { 4400 return true; 4401 } 4402 FindClose(f); 4403 return false; 4404 } 4405 4406 // create binary file, rewriting existing file if required 4407 int os::create_binary_file(const char* path, bool rewrite_existing) { 4408 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4409 if (!rewrite_existing) { 4410 oflags |= _O_EXCL; 4411 } 4412 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4413 } 4414 4415 // return current position of file pointer 4416 jlong os::current_file_offset(int fd) { 4417 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4418 } 4419 4420 // move file pointer to the specified offset 4421 jlong os::seek_to_file_offset(int fd, jlong offset) { 4422 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4423 } 4424 4425 4426 jlong os::lseek(int fd, jlong offset, int whence) { 4427 return (jlong) ::_lseeki64(fd, offset, whence); 4428 } 4429 4430 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4431 OVERLAPPED ov; 4432 DWORD nread; 4433 BOOL result; 4434 4435 ZeroMemory(&ov, sizeof(ov)); 4436 ov.Offset = (DWORD)offset; 4437 ov.OffsetHigh = (DWORD)(offset >> 32); 4438 4439 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4440 4441 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4442 4443 return result ? nread : 0; 4444 } 4445 4446 4447 // This method is a slightly reworked copy of JDK's sysNativePath 4448 // from src/windows/hpi/src/path_md.c 4449 4450 // Convert a pathname to native format. On win32, this involves forcing all 4451 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4452 // sometimes rejects '/') and removing redundant separators. The input path is 4453 // assumed to have been converted into the character encoding used by the local 4454 // system. Because this might be a double-byte encoding, care is taken to 4455 // treat double-byte lead characters correctly. 4456 // 4457 // This procedure modifies the given path in place, as the result is never 4458 // longer than the original. There is no error return; this operation always 4459 // succeeds. 4460 char * os::native_path(char *path) { 4461 char *src = path, *dst = path, *end = path; 4462 char *colon = NULL; // If a drive specifier is found, this will 4463 // point to the colon following the drive letter 4464 4465 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4466 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4467 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4468 4469 // Check for leading separators 4470 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4471 while (isfilesep(*src)) { 4472 src++; 4473 } 4474 4475 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4476 // Remove leading separators if followed by drive specifier. This 4477 // hack is necessary to support file URLs containing drive 4478 // specifiers (e.g., "file://c:/path"). As a side effect, 4479 // "/c:/path" can be used as an alternative to "c:/path". 4480 *dst++ = *src++; 4481 colon = dst; 4482 *dst++ = ':'; 4483 src++; 4484 } else { 4485 src = path; 4486 if (isfilesep(src[0]) && isfilesep(src[1])) { 4487 // UNC pathname: Retain first separator; leave src pointed at 4488 // second separator so that further separators will be collapsed 4489 // into the second separator. The result will be a pathname 4490 // beginning with "\\\\" followed (most likely) by a host name. 4491 src = dst = path + 1; 4492 path[0] = '\\'; // Force first separator to '\\' 4493 } 4494 } 4495 4496 end = dst; 4497 4498 // Remove redundant separators from remainder of path, forcing all 4499 // separators to be '\\' rather than '/'. Also, single byte space 4500 // characters are removed from the end of the path because those 4501 // are not legal ending characters on this operating system. 4502 // 4503 while (*src != '\0') { 4504 if (isfilesep(*src)) { 4505 *dst++ = '\\'; src++; 4506 while (isfilesep(*src)) src++; 4507 if (*src == '\0') { 4508 // Check for trailing separator 4509 end = dst; 4510 if (colon == dst - 2) break; // "z:\\" 4511 if (dst == path + 1) break; // "\\" 4512 if (dst == path + 2 && isfilesep(path[0])) { 4513 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4514 // beginning of a UNC pathname. Even though it is not, by 4515 // itself, a valid UNC pathname, we leave it as is in order 4516 // to be consistent with the path canonicalizer as well 4517 // as the win32 APIs, which treat this case as an invalid 4518 // UNC pathname rather than as an alias for the root 4519 // directory of the current drive. 4520 break; 4521 } 4522 end = --dst; // Path does not denote a root directory, so 4523 // remove trailing separator 4524 break; 4525 } 4526 end = dst; 4527 } else { 4528 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4529 *dst++ = *src++; 4530 if (*src) *dst++ = *src++; 4531 end = dst; 4532 } else { // Copy a single-byte character 4533 char c = *src++; 4534 *dst++ = c; 4535 // Space is not a legal ending character 4536 if (c != ' ') end = dst; 4537 } 4538 } 4539 } 4540 4541 *end = '\0'; 4542 4543 // For "z:", add "." to work around a bug in the C runtime library 4544 if (colon == dst - 1) { 4545 path[2] = '.'; 4546 path[3] = '\0'; 4547 } 4548 4549 return path; 4550 } 4551 4552 // This code is a copy of JDK's sysSetLength 4553 // from src/windows/hpi/src/sys_api_md.c 4554 4555 int os::ftruncate(int fd, jlong length) { 4556 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4557 long high = (long)(length >> 32); 4558 DWORD ret; 4559 4560 if (h == (HANDLE)(-1)) { 4561 return -1; 4562 } 4563 4564 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4565 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4566 return -1; 4567 } 4568 4569 if (::SetEndOfFile(h) == FALSE) { 4570 return -1; 4571 } 4572 4573 return 0; 4574 } 4575 4576 4577 // This code is a copy of JDK's sysSync 4578 // from src/windows/hpi/src/sys_api_md.c 4579 // except for the legacy workaround for a bug in Win 98 4580 4581 int os::fsync(int fd) { 4582 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4583 4584 if ((!::FlushFileBuffers(handle)) && 4585 (GetLastError() != ERROR_ACCESS_DENIED)) { 4586 // from winerror.h 4587 return -1; 4588 } 4589 return 0; 4590 } 4591 4592 static int nonSeekAvailable(int, long *); 4593 static int stdinAvailable(int, long *); 4594 4595 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4596 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4597 4598 // This code is a copy of JDK's sysAvailable 4599 // from src/windows/hpi/src/sys_api_md.c 4600 4601 int os::available(int fd, jlong *bytes) { 4602 jlong cur, end; 4603 struct _stati64 stbuf64; 4604 4605 if (::_fstati64(fd, &stbuf64) >= 0) { 4606 int mode = stbuf64.st_mode; 4607 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4608 int ret; 4609 long lpbytes; 4610 if (fd == 0) { 4611 ret = stdinAvailable(fd, &lpbytes); 4612 } else { 4613 ret = nonSeekAvailable(fd, &lpbytes); 4614 } 4615 (*bytes) = (jlong)(lpbytes); 4616 return ret; 4617 } 4618 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4619 return FALSE; 4620 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4621 return FALSE; 4622 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4623 return FALSE; 4624 } 4625 *bytes = end - cur; 4626 return TRUE; 4627 } else { 4628 return FALSE; 4629 } 4630 } 4631 4632 // This code is a copy of JDK's nonSeekAvailable 4633 // from src/windows/hpi/src/sys_api_md.c 4634 4635 static int nonSeekAvailable(int fd, long *pbytes) { 4636 // This is used for available on non-seekable devices 4637 // (like both named and anonymous pipes, such as pipes 4638 // connected to an exec'd process). 4639 // Standard Input is a special case. 4640 HANDLE han; 4641 4642 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4643 return FALSE; 4644 } 4645 4646 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4647 // PeekNamedPipe fails when at EOF. In that case we 4648 // simply make *pbytes = 0 which is consistent with the 4649 // behavior we get on Solaris when an fd is at EOF. 4650 // The only alternative is to raise an Exception, 4651 // which isn't really warranted. 4652 // 4653 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4654 return FALSE; 4655 } 4656 *pbytes = 0; 4657 } 4658 return TRUE; 4659 } 4660 4661 #define MAX_INPUT_EVENTS 2000 4662 4663 // This code is a copy of JDK's stdinAvailable 4664 // from src/windows/hpi/src/sys_api_md.c 4665 4666 static int stdinAvailable(int fd, long *pbytes) { 4667 HANDLE han; 4668 DWORD numEventsRead = 0; // Number of events read from buffer 4669 DWORD numEvents = 0; // Number of events in buffer 4670 DWORD i = 0; // Loop index 4671 DWORD curLength = 0; // Position marker 4672 DWORD actualLength = 0; // Number of bytes readable 4673 BOOL error = FALSE; // Error holder 4674 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4675 4676 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4677 return FALSE; 4678 } 4679 4680 // Construct an array of input records in the console buffer 4681 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4682 if (error == 0) { 4683 return nonSeekAvailable(fd, pbytes); 4684 } 4685 4686 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4687 if (numEvents > MAX_INPUT_EVENTS) { 4688 numEvents = MAX_INPUT_EVENTS; 4689 } 4690 4691 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4692 if (lpBuffer == NULL) { 4693 return FALSE; 4694 } 4695 4696 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4697 if (error == 0) { 4698 os::free(lpBuffer); 4699 return FALSE; 4700 } 4701 4702 // Examine input records for the number of bytes available 4703 for (i=0; i<numEvents; i++) { 4704 if (lpBuffer[i].EventType == KEY_EVENT) { 4705 4706 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4707 &(lpBuffer[i].Event); 4708 if (keyRecord->bKeyDown == TRUE) { 4709 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4710 curLength++; 4711 if (*keyPressed == '\r') { 4712 actualLength = curLength; 4713 } 4714 } 4715 } 4716 } 4717 4718 if (lpBuffer != NULL) { 4719 os::free(lpBuffer); 4720 } 4721 4722 *pbytes = (long) actualLength; 4723 return TRUE; 4724 } 4725 4726 // Map a block of memory. 4727 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4728 char *addr, size_t bytes, bool read_only, 4729 bool allow_exec) { 4730 HANDLE hFile; 4731 char* base; 4732 4733 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4734 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4735 if (hFile == NULL) { 4736 if (PrintMiscellaneous && Verbose) { 4737 DWORD err = GetLastError(); 4738 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4739 } 4740 return NULL; 4741 } 4742 4743 if (allow_exec) { 4744 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4745 // unless it comes from a PE image (which the shared archive is not.) 4746 // Even VirtualProtect refuses to give execute access to mapped memory 4747 // that was not previously executable. 4748 // 4749 // Instead, stick the executable region in anonymous memory. Yuck. 4750 // Penalty is that ~4 pages will not be shareable - in the future 4751 // we might consider DLLizing the shared archive with a proper PE 4752 // header so that mapping executable + sharing is possible. 4753 4754 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4755 PAGE_READWRITE); 4756 if (base == NULL) { 4757 if (PrintMiscellaneous && Verbose) { 4758 DWORD err = GetLastError(); 4759 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4760 } 4761 CloseHandle(hFile); 4762 return NULL; 4763 } 4764 4765 DWORD bytes_read; 4766 OVERLAPPED overlapped; 4767 overlapped.Offset = (DWORD)file_offset; 4768 overlapped.OffsetHigh = 0; 4769 overlapped.hEvent = NULL; 4770 // ReadFile guarantees that if the return value is true, the requested 4771 // number of bytes were read before returning. 4772 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4773 if (!res) { 4774 if (PrintMiscellaneous && Verbose) { 4775 DWORD err = GetLastError(); 4776 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4777 } 4778 release_memory(base, bytes); 4779 CloseHandle(hFile); 4780 return NULL; 4781 } 4782 } else { 4783 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4784 NULL /* file_name */); 4785 if (hMap == NULL) { 4786 if (PrintMiscellaneous && Verbose) { 4787 DWORD err = GetLastError(); 4788 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4789 } 4790 CloseHandle(hFile); 4791 return NULL; 4792 } 4793 4794 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4795 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4796 (DWORD)bytes, addr); 4797 if (base == NULL) { 4798 if (PrintMiscellaneous && Verbose) { 4799 DWORD err = GetLastError(); 4800 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4801 } 4802 CloseHandle(hMap); 4803 CloseHandle(hFile); 4804 return NULL; 4805 } 4806 4807 if (CloseHandle(hMap) == 0) { 4808 if (PrintMiscellaneous && Verbose) { 4809 DWORD err = GetLastError(); 4810 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4811 } 4812 CloseHandle(hFile); 4813 return base; 4814 } 4815 } 4816 4817 if (allow_exec) { 4818 DWORD old_protect; 4819 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4820 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4821 4822 if (!res) { 4823 if (PrintMiscellaneous && Verbose) { 4824 DWORD err = GetLastError(); 4825 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4826 } 4827 // Don't consider this a hard error, on IA32 even if the 4828 // VirtualProtect fails, we should still be able to execute 4829 CloseHandle(hFile); 4830 return base; 4831 } 4832 } 4833 4834 if (CloseHandle(hFile) == 0) { 4835 if (PrintMiscellaneous && Verbose) { 4836 DWORD err = GetLastError(); 4837 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4838 } 4839 return base; 4840 } 4841 4842 return base; 4843 } 4844 4845 4846 // Remap a block of memory. 4847 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4848 char *addr, size_t bytes, bool read_only, 4849 bool allow_exec) { 4850 // This OS does not allow existing memory maps to be remapped so we 4851 // have to unmap the memory before we remap it. 4852 if (!os::unmap_memory(addr, bytes)) { 4853 return NULL; 4854 } 4855 4856 // There is a very small theoretical window between the unmap_memory() 4857 // call above and the map_memory() call below where a thread in native 4858 // code may be able to access an address that is no longer mapped. 4859 4860 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4861 read_only, allow_exec); 4862 } 4863 4864 4865 // Unmap a block of memory. 4866 // Returns true=success, otherwise false. 4867 4868 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4869 MEMORY_BASIC_INFORMATION mem_info; 4870 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4871 if (PrintMiscellaneous && Verbose) { 4872 DWORD err = GetLastError(); 4873 tty->print_cr("VirtualQuery() failed: GetLastError->%ld.", err); 4874 } 4875 return false; 4876 } 4877 4878 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4879 // Instead, executable region was allocated using VirtualAlloc(). See 4880 // pd_map_memory() above. 4881 // 4882 // The following flags should match the 'exec_access' flages used for 4883 // VirtualProtect() in pd_map_memory(). 4884 if (mem_info.Protect == PAGE_EXECUTE_READ || 4885 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4886 return pd_release_memory(addr, bytes); 4887 } 4888 4889 BOOL result = UnmapViewOfFile(addr); 4890 if (result == 0) { 4891 if (PrintMiscellaneous && Verbose) { 4892 DWORD err = GetLastError(); 4893 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4894 } 4895 return false; 4896 } 4897 return true; 4898 } 4899 4900 void os::pause() { 4901 char filename[MAX_PATH]; 4902 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4903 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4904 } else { 4905 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4906 } 4907 4908 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4909 if (fd != -1) { 4910 struct stat buf; 4911 ::close(fd); 4912 while (::stat(filename, &buf) == 0) { 4913 Sleep(100); 4914 } 4915 } else { 4916 jio_fprintf(stderr, 4917 "Could not open pause file '%s', continuing immediately.\n", filename); 4918 } 4919 } 4920 4921 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4922 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4923 } 4924 4925 // See the caveats for this class in os_windows.hpp 4926 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4927 // into this method and returns false. If no OS EXCEPTION was raised, returns 4928 // true. 4929 // The callback is supposed to provide the method that should be protected. 4930 // 4931 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4932 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4933 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4934 "crash_protection already set?"); 4935 4936 bool success = true; 4937 __try { 4938 WatcherThread::watcher_thread()->set_crash_protection(this); 4939 cb.call(); 4940 } __except(EXCEPTION_EXECUTE_HANDLER) { 4941 // only for protection, nothing to do 4942 success = false; 4943 } 4944 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4945 return success; 4946 } 4947 4948 // An Event wraps a win32 "CreateEvent" kernel handle. 4949 // 4950 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4951 // 4952 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4953 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4954 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4955 // In addition, an unpark() operation might fetch the handle field, but the 4956 // event could recycle between the fetch and the SetEvent() operation. 4957 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4958 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4959 // on an stale but recycled handle would be harmless, but in practice this might 4960 // confuse other non-Sun code, so it's not a viable approach. 4961 // 4962 // 2: Once a win32 event handle is associated with an Event, it remains associated 4963 // with the Event. The event handle is never closed. This could be construed 4964 // as handle leakage, but only up to the maximum # of threads that have been extant 4965 // at any one time. This shouldn't be an issue, as windows platforms typically 4966 // permit a process to have hundreds of thousands of open handles. 4967 // 4968 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4969 // and release unused handles. 4970 // 4971 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4972 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4973 // 4974 // 5. Use an RCU-like mechanism (Read-Copy Update). 4975 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4976 // 4977 // We use (2). 4978 // 4979 // TODO-FIXME: 4980 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4981 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4982 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4983 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4984 // into a single win32 CreateEvent() handle. 4985 // 4986 // Assumption: 4987 // Only one parker can exist on an event, which is why we allocate 4988 // them per-thread. Multiple unparkers can coexist. 4989 // 4990 // _Event transitions in park() 4991 // -1 => -1 : illegal 4992 // 1 => 0 : pass - return immediately 4993 // 0 => -1 : block; then set _Event to 0 before returning 4994 // 4995 // _Event transitions in unpark() 4996 // 0 => 1 : just return 4997 // 1 => 1 : just return 4998 // -1 => either 0 or 1; must signal target thread 4999 // That is, we can safely transition _Event from -1 to either 5000 // 0 or 1. 5001 // 5002 // _Event serves as a restricted-range semaphore. 5003 // -1 : thread is blocked, i.e. there is a waiter 5004 // 0 : neutral: thread is running or ready, 5005 // could have been signaled after a wait started 5006 // 1 : signaled - thread is running or ready 5007 // 5008 // Another possible encoding of _Event would be with 5009 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5010 // 5011 5012 int os::PlatformEvent::park(jlong Millis) { 5013 // Transitions for _Event: 5014 // -1 => -1 : illegal 5015 // 1 => 0 : pass - return immediately 5016 // 0 => -1 : block; then set _Event to 0 before returning 5017 5018 guarantee(_ParkHandle != NULL , "Invariant"); 5019 guarantee(Millis > 0 , "Invariant"); 5020 5021 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 5022 // the initial park() operation. 5023 // Consider: use atomic decrement instead of CAS-loop 5024 5025 int v; 5026 for (;;) { 5027 v = _Event; 5028 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5029 } 5030 guarantee((v == 0) || (v == 1), "invariant"); 5031 if (v != 0) return OS_OK; 5032 5033 // Do this the hard way by blocking ... 5034 // TODO: consider a brief spin here, gated on the success of recent 5035 // spin attempts by this thread. 5036 // 5037 // We decompose long timeouts into series of shorter timed waits. 5038 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5039 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5040 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5041 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5042 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5043 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5044 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5045 // for the already waited time. This policy does not admit any new outcomes. 5046 // In the future, however, we might want to track the accumulated wait time and 5047 // adjust Millis accordingly if we encounter a spurious wakeup. 5048 5049 const int MAXTIMEOUT = 0x10000000; 5050 DWORD rv = WAIT_TIMEOUT; 5051 while (_Event < 0 && Millis > 0) { 5052 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5053 if (Millis > MAXTIMEOUT) { 5054 prd = MAXTIMEOUT; 5055 } 5056 rv = ::WaitForSingleObject(_ParkHandle, prd); 5057 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5058 if (rv == WAIT_TIMEOUT) { 5059 Millis -= prd; 5060 } 5061 } 5062 v = _Event; 5063 _Event = 0; 5064 // see comment at end of os::PlatformEvent::park() below: 5065 OrderAccess::fence(); 5066 // If we encounter a nearly simultanous timeout expiry and unpark() 5067 // we return OS_OK indicating we awoke via unpark(). 5068 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5069 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5070 } 5071 5072 void os::PlatformEvent::park() { 5073 // Transitions for _Event: 5074 // -1 => -1 : illegal 5075 // 1 => 0 : pass - return immediately 5076 // 0 => -1 : block; then set _Event to 0 before returning 5077 5078 guarantee(_ParkHandle != NULL, "Invariant"); 5079 // Invariant: Only the thread associated with the Event/PlatformEvent 5080 // may call park(). 5081 // Consider: use atomic decrement instead of CAS-loop 5082 int v; 5083 for (;;) { 5084 v = _Event; 5085 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5086 } 5087 guarantee((v == 0) || (v == 1), "invariant"); 5088 if (v != 0) return; 5089 5090 // Do this the hard way by blocking ... 5091 // TODO: consider a brief spin here, gated on the success of recent 5092 // spin attempts by this thread. 5093 while (_Event < 0) { 5094 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5095 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5096 } 5097 5098 // Usually we'll find _Event == 0 at this point, but as 5099 // an optional optimization we clear it, just in case can 5100 // multiple unpark() operations drove _Event up to 1. 5101 _Event = 0; 5102 OrderAccess::fence(); 5103 guarantee(_Event >= 0, "invariant"); 5104 } 5105 5106 void os::PlatformEvent::unpark() { 5107 guarantee(_ParkHandle != NULL, "Invariant"); 5108 5109 // Transitions for _Event: 5110 // 0 => 1 : just return 5111 // 1 => 1 : just return 5112 // -1 => either 0 or 1; must signal target thread 5113 // That is, we can safely transition _Event from -1 to either 5114 // 0 or 1. 5115 // See also: "Semaphores in Plan 9" by Mullender & Cox 5116 // 5117 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5118 // that it will take two back-to-back park() calls for the owning 5119 // thread to block. This has the benefit of forcing a spurious return 5120 // from the first park() call after an unpark() call which will help 5121 // shake out uses of park() and unpark() without condition variables. 5122 5123 if (Atomic::xchg(1, &_Event) >= 0) return; 5124 5125 ::SetEvent(_ParkHandle); 5126 } 5127 5128 5129 // JSR166 5130 // ------------------------------------------------------- 5131 5132 // The Windows implementation of Park is very straightforward: Basic 5133 // operations on Win32 Events turn out to have the right semantics to 5134 // use them directly. We opportunistically resuse the event inherited 5135 // from Monitor. 5136 5137 void Parker::park(bool isAbsolute, jlong time) { 5138 guarantee(_ParkEvent != NULL, "invariant"); 5139 // First, demultiplex/decode time arguments 5140 if (time < 0) { // don't wait 5141 return; 5142 } else if (time == 0 && !isAbsolute) { 5143 time = INFINITE; 5144 } else if (isAbsolute) { 5145 time -= os::javaTimeMillis(); // convert to relative time 5146 if (time <= 0) { // already elapsed 5147 return; 5148 } 5149 } else { // relative 5150 time /= 1000000; // Must coarsen from nanos to millis 5151 if (time == 0) { // Wait for the minimal time unit if zero 5152 time = 1; 5153 } 5154 } 5155 5156 JavaThread* thread = JavaThread::current(); 5157 5158 // Don't wait if interrupted or already triggered 5159 if (Thread::is_interrupted(thread, false) || 5160 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5161 ResetEvent(_ParkEvent); 5162 return; 5163 } else { 5164 ThreadBlockInVM tbivm(thread); 5165 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5166 thread->set_suspend_equivalent(); 5167 5168 WaitForSingleObject(_ParkEvent, time); 5169 ResetEvent(_ParkEvent); 5170 5171 // If externally suspended while waiting, re-suspend 5172 if (thread->handle_special_suspend_equivalent_condition()) { 5173 thread->java_suspend_self(); 5174 } 5175 } 5176 } 5177 5178 void Parker::unpark() { 5179 guarantee(_ParkEvent != NULL, "invariant"); 5180 SetEvent(_ParkEvent); 5181 } 5182 5183 // Run the specified command in a separate process. Return its exit value, 5184 // or -1 on failure (e.g. can't create a new process). 5185 int os::fork_and_exec(char* cmd) { 5186 STARTUPINFO si; 5187 PROCESS_INFORMATION pi; 5188 5189 memset(&si, 0, sizeof(si)); 5190 si.cb = sizeof(si); 5191 memset(&pi, 0, sizeof(pi)); 5192 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5193 cmd, // command line 5194 NULL, // process security attribute 5195 NULL, // thread security attribute 5196 TRUE, // inherits system handles 5197 0, // no creation flags 5198 NULL, // use parent's environment block 5199 NULL, // use parent's starting directory 5200 &si, // (in) startup information 5201 &pi); // (out) process information 5202 5203 if (rslt) { 5204 // Wait until child process exits. 5205 WaitForSingleObject(pi.hProcess, INFINITE); 5206 5207 DWORD exit_code; 5208 GetExitCodeProcess(pi.hProcess, &exit_code); 5209 5210 // Close process and thread handles. 5211 CloseHandle(pi.hProcess); 5212 CloseHandle(pi.hThread); 5213 5214 return (int)exit_code; 5215 } else { 5216 return -1; 5217 } 5218 } 5219 5220 //-------------------------------------------------------------------------------------------------- 5221 // Non-product code 5222 5223 static int mallocDebugIntervalCounter = 0; 5224 static int mallocDebugCounter = 0; 5225 bool os::check_heap(bool force) { 5226 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 5227 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 5228 // Note: HeapValidate executes two hardware breakpoints when it finds something 5229 // wrong; at these points, eax contains the address of the offending block (I think). 5230 // To get to the exlicit error message(s) below, just continue twice. 5231 // 5232 // Note: we want to check the CRT heap, which is not necessarily located in the 5233 // process default heap. 5234 HANDLE heap = (HANDLE) _get_heap_handle(); 5235 if (!heap) { 5236 return true; 5237 } 5238 5239 // If we fail to lock the heap, then gflags.exe has been used 5240 // or some other special heap flag has been set that prevents 5241 // locking. We don't try to walk a heap we can't lock. 5242 if (HeapLock(heap) != 0) { 5243 PROCESS_HEAP_ENTRY phe; 5244 phe.lpData = NULL; 5245 while (HeapWalk(heap, &phe) != 0) { 5246 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 5247 !HeapValidate(heap, 0, phe.lpData)) { 5248 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 5249 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 5250 HeapUnlock(heap); 5251 fatal("corrupted C heap"); 5252 } 5253 } 5254 DWORD err = GetLastError(); 5255 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 5256 HeapUnlock(heap); 5257 fatal("heap walk aborted with error %d", err); 5258 } 5259 HeapUnlock(heap); 5260 } 5261 mallocDebugIntervalCounter = 0; 5262 } 5263 return true; 5264 } 5265 5266 5267 bool os::find(address addr, outputStream* st) { 5268 int offset = -1; 5269 bool result = false; 5270 char buf[256]; 5271 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5272 st->print(PTR_FORMAT " ", addr); 5273 if (strlen(buf) < sizeof(buf) - 1) { 5274 char* p = strrchr(buf, '\\'); 5275 if (p) { 5276 st->print("%s", p + 1); 5277 } else { 5278 st->print("%s", buf); 5279 } 5280 } else { 5281 // The library name is probably truncated. Let's omit the library name. 5282 // See also JDK-8147512. 5283 } 5284 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5285 st->print("::%s + 0x%x", buf, offset); 5286 } 5287 st->cr(); 5288 result = true; 5289 } 5290 return result; 5291 } 5292 5293 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5294 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5295 5296 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5297 JavaThread* thread = JavaThread::current(); 5298 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5299 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5300 5301 if (os::is_memory_serialize_page(thread, addr)) { 5302 return EXCEPTION_CONTINUE_EXECUTION; 5303 } 5304 } 5305 5306 return EXCEPTION_CONTINUE_SEARCH; 5307 } 5308 5309 // We don't build a headless jre for Windows 5310 bool os::is_headless_jre() { return false; } 5311 5312 static jint initSock() { 5313 WSADATA wsadata; 5314 5315 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5316 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5317 ::GetLastError()); 5318 return JNI_ERR; 5319 } 5320 return JNI_OK; 5321 } 5322 5323 struct hostent* os::get_host_by_name(char* name) { 5324 return (struct hostent*)gethostbyname(name); 5325 } 5326 5327 int os::socket_close(int fd) { 5328 return ::closesocket(fd); 5329 } 5330 5331 int os::socket(int domain, int type, int protocol) { 5332 return ::socket(domain, type, protocol); 5333 } 5334 5335 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5336 return ::connect(fd, him, len); 5337 } 5338 5339 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5340 return ::recv(fd, buf, (int)nBytes, flags); 5341 } 5342 5343 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5344 return ::send(fd, buf, (int)nBytes, flags); 5345 } 5346 5347 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5348 return ::send(fd, buf, (int)nBytes, flags); 5349 } 5350 5351 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5352 #if defined(IA32) 5353 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5354 #elif defined (AMD64) 5355 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5356 #endif 5357 5358 // returns true if thread could be suspended, 5359 // false otherwise 5360 static bool do_suspend(HANDLE* h) { 5361 if (h != NULL) { 5362 if (SuspendThread(*h) != ~0) { 5363 return true; 5364 } 5365 } 5366 return false; 5367 } 5368 5369 // resume the thread 5370 // calling resume on an active thread is a no-op 5371 static void do_resume(HANDLE* h) { 5372 if (h != NULL) { 5373 ResumeThread(*h); 5374 } 5375 } 5376 5377 // retrieve a suspend/resume context capable handle 5378 // from the tid. Caller validates handle return value. 5379 void get_thread_handle_for_extended_context(HANDLE* h, 5380 OSThread::thread_id_t tid) { 5381 if (h != NULL) { 5382 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5383 } 5384 } 5385 5386 // Thread sampling implementation 5387 // 5388 void os::SuspendedThreadTask::internal_do_task() { 5389 CONTEXT ctxt; 5390 HANDLE h = NULL; 5391 5392 // get context capable handle for thread 5393 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5394 5395 // sanity 5396 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5397 return; 5398 } 5399 5400 // suspend the thread 5401 if (do_suspend(&h)) { 5402 ctxt.ContextFlags = sampling_context_flags; 5403 // get thread context 5404 GetThreadContext(h, &ctxt); 5405 SuspendedThreadTaskContext context(_thread, &ctxt); 5406 // pass context to Thread Sampling impl 5407 do_task(context); 5408 // resume thread 5409 do_resume(&h); 5410 } 5411 5412 // close handle 5413 CloseHandle(h); 5414 } 5415 5416 bool os::start_debugging(char *buf, int buflen) { 5417 int len = (int)strlen(buf); 5418 char *p = &buf[len]; 5419 5420 jio_snprintf(p, buflen-len, 5421 "\n\n" 5422 "Do you want to debug the problem?\n\n" 5423 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5424 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5425 "Otherwise, select 'No' to abort...", 5426 os::current_process_id(), os::current_thread_id()); 5427 5428 bool yes = os::message_box("Unexpected Error", buf); 5429 5430 if (yes) { 5431 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5432 // exception. If VM is running inside a debugger, the debugger will 5433 // catch the exception. Otherwise, the breakpoint exception will reach 5434 // the default windows exception handler, which can spawn a debugger and 5435 // automatically attach to the dying VM. 5436 os::breakpoint(); 5437 yes = false; 5438 } 5439 return yes; 5440 } 5441 5442 void* os::get_default_process_handle() { 5443 return (void*)GetModuleHandle(NULL); 5444 } 5445 5446 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5447 // which is used to find statically linked in agents. 5448 // Additionally for windows, takes into account __stdcall names. 5449 // Parameters: 5450 // sym_name: Symbol in library we are looking for 5451 // lib_name: Name of library to look in, NULL for shared libs. 5452 // is_absolute_path == true if lib_name is absolute path to agent 5453 // such as "C:/a/b/L.dll" 5454 // == false if only the base name of the library is passed in 5455 // such as "L" 5456 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5457 bool is_absolute_path) { 5458 char *agent_entry_name; 5459 size_t len; 5460 size_t name_len; 5461 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5462 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5463 const char *start; 5464 5465 if (lib_name != NULL) { 5466 len = name_len = strlen(lib_name); 5467 if (is_absolute_path) { 5468 // Need to strip path, prefix and suffix 5469 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5470 lib_name = ++start; 5471 } else { 5472 // Need to check for drive prefix 5473 if ((start = strchr(lib_name, ':')) != NULL) { 5474 lib_name = ++start; 5475 } 5476 } 5477 if (len <= (prefix_len + suffix_len)) { 5478 return NULL; 5479 } 5480 lib_name += prefix_len; 5481 name_len = strlen(lib_name) - suffix_len; 5482 } 5483 } 5484 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5485 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5486 if (agent_entry_name == NULL) { 5487 return NULL; 5488 } 5489 if (lib_name != NULL) { 5490 const char *p = strrchr(sym_name, '@'); 5491 if (p != NULL && p != sym_name) { 5492 // sym_name == _Agent_OnLoad@XX 5493 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5494 agent_entry_name[(p-sym_name)] = '\0'; 5495 // agent_entry_name == _Agent_OnLoad 5496 strcat(agent_entry_name, "_"); 5497 strncat(agent_entry_name, lib_name, name_len); 5498 strcat(agent_entry_name, p); 5499 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5500 } else { 5501 strcpy(agent_entry_name, sym_name); 5502 strcat(agent_entry_name, "_"); 5503 strncat(agent_entry_name, lib_name, name_len); 5504 } 5505 } else { 5506 strcpy(agent_entry_name, sym_name); 5507 } 5508 return agent_entry_name; 5509 } 5510 5511 #ifndef PRODUCT 5512 5513 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5514 // contiguous memory block at a particular address. 5515 // The test first tries to find a good approximate address to allocate at by using the same 5516 // method to allocate some memory at any address. The test then tries to allocate memory in 5517 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5518 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5519 // the previously allocated memory is available for allocation. The only actual failure 5520 // that is reported is when the test tries to allocate at a particular location but gets a 5521 // different valid one. A NULL return value at this point is not considered an error but may 5522 // be legitimate. 5523 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5524 void TestReserveMemorySpecial_test() { 5525 if (!UseLargePages) { 5526 if (VerboseInternalVMTests) { 5527 tty->print("Skipping test because large pages are disabled"); 5528 } 5529 return; 5530 } 5531 // save current value of globals 5532 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5533 bool old_use_numa_interleaving = UseNUMAInterleaving; 5534 5535 // set globals to make sure we hit the correct code path 5536 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5537 5538 // do an allocation at an address selected by the OS to get a good one. 5539 const size_t large_allocation_size = os::large_page_size() * 4; 5540 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5541 if (result == NULL) { 5542 if (VerboseInternalVMTests) { 5543 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5544 large_allocation_size); 5545 } 5546 } else { 5547 os::release_memory_special(result, large_allocation_size); 5548 5549 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5550 // we managed to get it once. 5551 const size_t expected_allocation_size = os::large_page_size(); 5552 char* expected_location = result + os::large_page_size(); 5553 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5554 if (actual_location == NULL) { 5555 if (VerboseInternalVMTests) { 5556 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5557 expected_location, large_allocation_size); 5558 } 5559 } else { 5560 // release memory 5561 os::release_memory_special(actual_location, expected_allocation_size); 5562 // only now check, after releasing any memory to avoid any leaks. 5563 assert(actual_location == expected_location, 5564 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5565 expected_location, expected_allocation_size, actual_location); 5566 } 5567 } 5568 5569 // restore globals 5570 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5571 UseNUMAInterleaving = old_use_numa_interleaving; 5572 } 5573 #endif // PRODUCT 5574 5575 /* 5576 All the defined signal names for Windows. 5577 5578 NOTE that not all of these names are accepted by FindSignal! 5579 5580 For various reasons some of these may be rejected at runtime. 5581 5582 Here are the names currently accepted by a user of sun.misc.Signal with 5583 1.4.1 (ignoring potential interaction with use of chaining, etc): 5584 5585 (LIST TBD) 5586 5587 */ 5588 int os::get_signal_number(const char* name) { 5589 static const struct { 5590 char* name; 5591 int number; 5592 } siglabels [] = 5593 // derived from version 6.0 VC98/include/signal.h 5594 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5595 "FPE", SIGFPE, // floating point exception 5596 "SEGV", SIGSEGV, // segment violation 5597 "INT", SIGINT, // interrupt 5598 "TERM", SIGTERM, // software term signal from kill 5599 "BREAK", SIGBREAK, // Ctrl-Break sequence 5600 "ILL", SIGILL}; // illegal instruction 5601 for(int i=0;i<sizeof(siglabels)/sizeof(struct siglabel);i++) 5602 if(!strcmp(name, siglabels[i].name)) 5603 return siglabels[i].number; 5604 return -1; 5605 } 5606 5607 // Fast current thread access 5608 5609 int os::win32::_thread_ptr_offset = 0; 5610 5611 static void call_wrapper_dummy() {} 5612 5613 // We need to call the os_exception_wrapper once so that it sets 5614 // up the offset from FS of the thread pointer. 5615 void os::win32::initialize_thread_ptr_offset() { 5616 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5617 NULL, NULL, NULL, NULL); 5618 }