1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/decoder.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/events.hpp" 74 #include "utilities/growableArray.hpp" 75 #include "utilities/macros.hpp" 76 #include "utilities/vmError.hpp" 77 #include "windbghelp.hpp" 78 79 80 #ifdef _DEBUG 81 #include <crtdbg.h> 82 #endif 83 84 85 #include <windows.h> 86 #include <sys/types.h> 87 #include <sys/stat.h> 88 #include <sys/timeb.h> 89 #include <objidl.h> 90 #include <shlobj.h> 91 92 #include <malloc.h> 93 #include <signal.h> 94 #include <direct.h> 95 #include <errno.h> 96 #include <fcntl.h> 97 #include <io.h> 98 #include <process.h> // For _beginthreadex(), _endthreadex() 99 #include <imagehlp.h> // For os::dll_address_to_function_name 100 // for enumerating dll libraries 101 #include <vdmdbg.h> 102 103 // for timer info max values which include all bits 104 #define ALL_64_BITS CONST64(-1) 105 106 // For DLL loading/load error detection 107 // Values of PE COFF 108 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 109 #define IMAGE_FILE_SIGNATURE_LENGTH 4 110 111 static HANDLE main_process; 112 static HANDLE main_thread; 113 static int main_thread_id; 114 115 static FILETIME process_creation_time; 116 static FILETIME process_exit_time; 117 static FILETIME process_user_time; 118 static FILETIME process_kernel_time; 119 120 #ifdef _M_AMD64 121 #define __CPU__ amd64 122 #else 123 #define __CPU__ i486 124 #endif 125 126 // save DLL module handle, used by GetModuleFileName 127 128 HINSTANCE vm_lib_handle; 129 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 131 switch (reason) { 132 case DLL_PROCESS_ATTACH: 133 vm_lib_handle = hinst; 134 if (ForceTimeHighResolution) { 135 timeBeginPeriod(1L); 136 } 137 break; 138 case DLL_PROCESS_DETACH: 139 if (ForceTimeHighResolution) { 140 timeEndPeriod(1L); 141 } 142 break; 143 default: 144 break; 145 } 146 return true; 147 } 148 149 static inline double fileTimeAsDouble(FILETIME* time) { 150 const double high = (double) ((unsigned int) ~0); 151 const double split = 10000000.0; 152 double result = (time->dwLowDateTime / split) + 153 time->dwHighDateTime * (high/split); 154 return result; 155 } 156 157 // Implementation of os 158 159 bool os::unsetenv(const char* name) { 160 assert(name != NULL, "Null pointer"); 161 return (SetEnvironmentVariable(name, NULL) == TRUE); 162 } 163 164 // No setuid programs under Windows. 165 bool os::have_special_privileges() { 166 return false; 167 } 168 169 170 // This method is a periodic task to check for misbehaving JNI applications 171 // under CheckJNI, we can add any periodic checks here. 172 // For Windows at the moment does nothing 173 void os::run_periodic_checks() { 174 return; 175 } 176 177 // previous UnhandledExceptionFilter, if there is one 178 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 179 180 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 181 182 void os::init_system_properties_values() { 183 // sysclasspath, java_home, dll_dir 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH + 1]; 190 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 191 192 if (alt_home_dir != NULL) { 193 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 194 home_dir[MAX_PATH] = '\0'; 195 } else { 196 os::jvm_path(home_dir, sizeof(home_dir)); 197 // Found the full path to jvm.dll. 198 // Now cut the path to <java_home>/jre if we can. 199 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) { 202 *pslash = '\0'; // get rid of \{client|server} 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \bin 206 } 207 } 208 } 209 210 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 211 if (home_path == NULL) { 212 return; 213 } 214 strcpy(home_path, home_dir); 215 Arguments::set_java_home(home_path); 216 FREE_C_HEAP_ARRAY(char, home_path); 217 218 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 219 mtInternal); 220 if (dll_path == NULL) { 221 return; 222 } 223 strcpy(dll_path, home_dir); 224 strcat(dll_path, bin); 225 Arguments::set_dll_dir(dll_path); 226 FREE_C_HEAP_ARRAY(char, dll_path); 227 228 if (!set_boot_path('\\', ';')) { 229 return; 230 } 231 } 232 233 // library_path 234 #define EXT_DIR "\\lib\\ext" 235 #define BIN_DIR "\\bin" 236 #define PACKAGE_DIR "\\Sun\\Java" 237 { 238 // Win32 library search order (See the documentation for LoadLibrary): 239 // 240 // 1. The directory from which application is loaded. 241 // 2. The system wide Java Extensions directory (Java only) 242 // 3. System directory (GetSystemDirectory) 243 // 4. Windows directory (GetWindowsDirectory) 244 // 5. The PATH environment variable 245 // 6. The current directory 246 247 char *library_path; 248 char tmp[MAX_PATH]; 249 char *path_str = ::getenv("PATH"); 250 251 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 252 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 253 254 library_path[0] = '\0'; 255 256 GetModuleFileName(NULL, tmp, sizeof(tmp)); 257 *(strrchr(tmp, '\\')) = '\0'; 258 strcat(library_path, tmp); 259 260 GetWindowsDirectory(tmp, sizeof(tmp)); 261 strcat(library_path, ";"); 262 strcat(library_path, tmp); 263 strcat(library_path, PACKAGE_DIR BIN_DIR); 264 265 GetSystemDirectory(tmp, sizeof(tmp)); 266 strcat(library_path, ";"); 267 strcat(library_path, tmp); 268 269 GetWindowsDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 if (path_str) { 274 strcat(library_path, ";"); 275 strcat(library_path, path_str); 276 } 277 278 strcat(library_path, ";."); 279 280 Arguments::set_library_path(library_path); 281 FREE_C_HEAP_ARRAY(char, library_path); 282 } 283 284 // Default extensions directory 285 { 286 char path[MAX_PATH]; 287 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 288 GetWindowsDirectory(path, MAX_PATH); 289 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 290 path, PACKAGE_DIR, EXT_DIR); 291 Arguments::set_ext_dirs(buf); 292 } 293 #undef EXT_DIR 294 #undef BIN_DIR 295 #undef PACKAGE_DIR 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 316 // So far, this method is only used by Native Memory Tracking, which is 317 // only supported on Windows XP or later. 318 // 319 int os::get_native_stack(address* stack, int frames, int toSkip) { 320 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 321 for (int index = captured; index < frames; index ++) { 322 stack[index] = NULL; 323 } 324 return captured; 325 } 326 327 328 // os::current_stack_base() 329 // 330 // Returns the base of the stack, which is the stack's 331 // starting address. This function must be called 332 // while running on the stack of the thread being queried. 333 334 address os::current_stack_base() { 335 MEMORY_BASIC_INFORMATION minfo; 336 address stack_bottom; 337 size_t stack_size; 338 339 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 340 stack_bottom = (address)minfo.AllocationBase; 341 stack_size = minfo.RegionSize; 342 343 // Add up the sizes of all the regions with the same 344 // AllocationBase. 345 while (1) { 346 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 347 if (stack_bottom == (address)minfo.AllocationBase) { 348 stack_size += minfo.RegionSize; 349 } else { 350 break; 351 } 352 } 353 return stack_bottom + stack_size; 354 } 355 356 size_t os::current_stack_size() { 357 size_t sz; 358 MEMORY_BASIC_INFORMATION minfo; 359 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 360 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 361 return sz; 362 } 363 364 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 365 const struct tm* time_struct_ptr = localtime(clock); 366 if (time_struct_ptr != NULL) { 367 *res = *time_struct_ptr; 368 return res; 369 } 370 return NULL; 371 } 372 373 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 374 const struct tm* time_struct_ptr = gmtime(clock); 375 if (time_struct_ptr != NULL) { 376 *res = *time_struct_ptr; 377 return res; 378 } 379 return NULL; 380 } 381 382 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 383 384 // Thread start routine for all newly created threads 385 static unsigned __stdcall thread_native_entry(Thread* thread) { 386 // Try to randomize the cache line index of hot stack frames. 387 // This helps when threads of the same stack traces evict each other's 388 // cache lines. The threads can be either from the same JVM instance, or 389 // from different JVM instances. The benefit is especially true for 390 // processors with hyperthreading technology. 391 static int counter = 0; 392 int pid = os::current_process_id(); 393 _alloca(((pid ^ counter++) & 7) * 128); 394 395 thread->initialize_thread_current(); 396 397 OSThread* osthr = thread->osthread(); 398 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 399 400 if (UseNUMA) { 401 int lgrp_id = os::numa_get_group_id(); 402 if (lgrp_id != -1) { 403 thread->set_lgrp_id(lgrp_id); 404 } 405 } 406 407 // Diagnostic code to investigate JDK-6573254 408 int res = 30115; // non-java thread 409 if (thread->is_Java_thread()) { 410 res = 20115; // java thread 411 } 412 413 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 414 415 // Install a win32 structured exception handler around every thread created 416 // by VM, so VM can generate error dump when an exception occurred in non- 417 // Java thread (e.g. VM thread). 418 __try { 419 thread->run(); 420 } __except(topLevelExceptionFilter( 421 (_EXCEPTION_POINTERS*)_exception_info())) { 422 // Nothing to do. 423 } 424 425 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 426 427 // One less thread is executing 428 // When the VMThread gets here, the main thread may have already exited 429 // which frees the CodeHeap containing the Atomic::add code 430 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 431 Atomic::dec(&os::win32::_os_thread_count); 432 } 433 434 // If a thread has not deleted itself ("delete this") as part of its 435 // termination sequence, we have to ensure thread-local-storage is 436 // cleared before we actually terminate. No threads should ever be 437 // deleted asynchronously with respect to their termination. 438 if (Thread::current_or_null_safe() != NULL) { 439 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 440 thread->clear_thread_current(); 441 } 442 443 // Thread must not return from exit_process_or_thread(), but if it does, 444 // let it proceed to exit normally 445 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 446 } 447 448 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 449 int thread_id) { 450 // Allocate the OSThread object 451 OSThread* osthread = new OSThread(NULL, NULL); 452 if (osthread == NULL) return NULL; 453 454 // Initialize support for Java interrupts 455 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 456 if (interrupt_event == NULL) { 457 delete osthread; 458 return NULL; 459 } 460 osthread->set_interrupt_event(interrupt_event); 461 462 // Store info on the Win32 thread into the OSThread 463 osthread->set_thread_handle(thread_handle); 464 osthread->set_thread_id(thread_id); 465 466 if (UseNUMA) { 467 int lgrp_id = os::numa_get_group_id(); 468 if (lgrp_id != -1) { 469 thread->set_lgrp_id(lgrp_id); 470 } 471 } 472 473 // Initial thread state is INITIALIZED, not SUSPENDED 474 osthread->set_state(INITIALIZED); 475 476 return osthread; 477 } 478 479 480 bool os::create_attached_thread(JavaThread* thread) { 481 #ifdef ASSERT 482 thread->verify_not_published(); 483 #endif 484 HANDLE thread_h; 485 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 486 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 487 fatal("DuplicateHandle failed\n"); 488 } 489 OSThread* osthread = create_os_thread(thread, thread_h, 490 (int)current_thread_id()); 491 if (osthread == NULL) { 492 return false; 493 } 494 495 // Initial thread state is RUNNABLE 496 osthread->set_state(RUNNABLE); 497 498 thread->set_osthread(osthread); 499 500 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 501 os::current_thread_id()); 502 503 return true; 504 } 505 506 bool os::create_main_thread(JavaThread* thread) { 507 #ifdef ASSERT 508 thread->verify_not_published(); 509 #endif 510 if (_starting_thread == NULL) { 511 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 512 if (_starting_thread == NULL) { 513 return false; 514 } 515 } 516 517 // The primordial thread is runnable from the start) 518 _starting_thread->set_state(RUNNABLE); 519 520 thread->set_osthread(_starting_thread); 521 return true; 522 } 523 524 // Helper function to trace _beginthreadex attributes, 525 // similar to os::Posix::describe_pthread_attr() 526 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 527 size_t stacksize, unsigned initflag) { 528 stringStream ss(buf, buflen); 529 if (stacksize == 0) { 530 ss.print("stacksize: default, "); 531 } else { 532 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 533 } 534 ss.print("flags: "); 535 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 536 #define ALL(X) \ 537 X(CREATE_SUSPENDED) \ 538 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 539 ALL(PRINT_FLAG) 540 #undef ALL 541 #undef PRINT_FLAG 542 return buf; 543 } 544 545 // Allocate and initialize a new OSThread 546 bool os::create_thread(Thread* thread, ThreadType thr_type, 547 size_t stack_size) { 548 unsigned thread_id; 549 550 // Allocate the OSThread object 551 OSThread* osthread = new OSThread(NULL, NULL); 552 if (osthread == NULL) { 553 return false; 554 } 555 556 // Initialize support for Java interrupts 557 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 558 if (interrupt_event == NULL) { 559 delete osthread; 560 return NULL; 561 } 562 osthread->set_interrupt_event(interrupt_event); 563 osthread->set_interrupted(false); 564 565 thread->set_osthread(osthread); 566 567 if (stack_size == 0) { 568 switch (thr_type) { 569 case os::java_thread: 570 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 571 if (JavaThread::stack_size_at_create() > 0) { 572 stack_size = JavaThread::stack_size_at_create(); 573 } 574 break; 575 case os::compiler_thread: 576 if (CompilerThreadStackSize > 0) { 577 stack_size = (size_t)(CompilerThreadStackSize * K); 578 break; 579 } // else fall through: 580 // use VMThreadStackSize if CompilerThreadStackSize is not defined 581 case os::vm_thread: 582 case os::pgc_thread: 583 case os::cgc_thread: 584 case os::watcher_thread: 585 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 586 break; 587 } 588 } 589 590 // Create the Win32 thread 591 // 592 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 593 // does not specify stack size. Instead, it specifies the size of 594 // initially committed space. The stack size is determined by 595 // PE header in the executable. If the committed "stack_size" is larger 596 // than default value in the PE header, the stack is rounded up to the 597 // nearest multiple of 1MB. For example if the launcher has default 598 // stack size of 320k, specifying any size less than 320k does not 599 // affect the actual stack size at all, it only affects the initial 600 // commitment. On the other hand, specifying 'stack_size' larger than 601 // default value may cause significant increase in memory usage, because 602 // not only the stack space will be rounded up to MB, but also the 603 // entire space is committed upfront. 604 // 605 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 606 // for CreateThread() that can treat 'stack_size' as stack size. However we 607 // are not supposed to call CreateThread() directly according to MSDN 608 // document because JVM uses C runtime library. The good news is that the 609 // flag appears to work with _beginthredex() as well. 610 611 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 612 HANDLE thread_handle = 613 (HANDLE)_beginthreadex(NULL, 614 (unsigned)stack_size, 615 (unsigned (__stdcall *)(void*)) thread_native_entry, 616 thread, 617 initflag, 618 &thread_id); 619 620 char buf[64]; 621 if (thread_handle != NULL) { 622 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 623 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 624 } else { 625 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 626 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 627 } 628 629 if (thread_handle == NULL) { 630 // Need to clean up stuff we've allocated so far 631 CloseHandle(osthread->interrupt_event()); 632 thread->set_osthread(NULL); 633 delete osthread; 634 return NULL; 635 } 636 637 Atomic::inc(&os::win32::_os_thread_count); 638 639 // Store info on the Win32 thread into the OSThread 640 osthread->set_thread_handle(thread_handle); 641 osthread->set_thread_id(thread_id); 642 643 // Initial thread state is INITIALIZED, not SUSPENDED 644 osthread->set_state(INITIALIZED); 645 646 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 647 return true; 648 } 649 650 651 // Free Win32 resources related to the OSThread 652 void os::free_thread(OSThread* osthread) { 653 assert(osthread != NULL, "osthread not set"); 654 655 // We are told to free resources of the argument thread, 656 // but we can only really operate on the current thread. 657 assert(Thread::current()->osthread() == osthread, 658 "os::free_thread but not current thread"); 659 660 CloseHandle(osthread->thread_handle()); 661 CloseHandle(osthread->interrupt_event()); 662 delete osthread; 663 } 664 665 static jlong first_filetime; 666 static jlong initial_performance_count; 667 static jlong performance_frequency; 668 669 670 jlong as_long(LARGE_INTEGER x) { 671 jlong result = 0; // initialization to avoid warning 672 set_high(&result, x.HighPart); 673 set_low(&result, x.LowPart); 674 return result; 675 } 676 677 678 jlong os::elapsed_counter() { 679 LARGE_INTEGER count; 680 QueryPerformanceCounter(&count); 681 return as_long(count) - initial_performance_count; 682 } 683 684 685 jlong os::elapsed_frequency() { 686 return performance_frequency; 687 } 688 689 690 julong os::available_memory() { 691 return win32::available_memory(); 692 } 693 694 julong os::win32::available_memory() { 695 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 696 // value if total memory is larger than 4GB 697 MEMORYSTATUSEX ms; 698 ms.dwLength = sizeof(ms); 699 GlobalMemoryStatusEx(&ms); 700 701 return (julong)ms.ullAvailPhys; 702 } 703 704 julong os::physical_memory() { 705 return win32::physical_memory(); 706 } 707 708 bool os::has_allocatable_memory_limit(julong* limit) { 709 MEMORYSTATUSEX ms; 710 ms.dwLength = sizeof(ms); 711 GlobalMemoryStatusEx(&ms); 712 #ifdef _LP64 713 *limit = (julong)ms.ullAvailVirtual; 714 return true; 715 #else 716 // Limit to 1400m because of the 2gb address space wall 717 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 718 return true; 719 #endif 720 } 721 722 int os::active_processor_count() { 723 // User has overridden the number of active processors 724 if (ActiveProcessorCount > 0) { 725 log_trace(os)("active_processor_count: " 726 "active processor count set by user : %d", 727 ActiveProcessorCount); 728 return ActiveProcessorCount; 729 } 730 731 DWORD_PTR lpProcessAffinityMask = 0; 732 DWORD_PTR lpSystemAffinityMask = 0; 733 int proc_count = processor_count(); 734 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 735 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 736 // Nof active processors is number of bits in process affinity mask 737 int bitcount = 0; 738 while (lpProcessAffinityMask != 0) { 739 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 740 bitcount++; 741 } 742 return bitcount; 743 } else { 744 return proc_count; 745 } 746 } 747 748 void os::set_native_thread_name(const char *name) { 749 750 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 751 // 752 // Note that unfortunately this only works if the process 753 // is already attached to a debugger; debugger must observe 754 // the exception below to show the correct name. 755 756 // If there is no debugger attached skip raising the exception 757 if (!IsDebuggerPresent()) { 758 return; 759 } 760 761 const DWORD MS_VC_EXCEPTION = 0x406D1388; 762 struct { 763 DWORD dwType; // must be 0x1000 764 LPCSTR szName; // pointer to name (in user addr space) 765 DWORD dwThreadID; // thread ID (-1=caller thread) 766 DWORD dwFlags; // reserved for future use, must be zero 767 } info; 768 769 info.dwType = 0x1000; 770 info.szName = name; 771 info.dwThreadID = -1; 772 info.dwFlags = 0; 773 774 __try { 775 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 776 } __except(EXCEPTION_EXECUTE_HANDLER) {} 777 } 778 779 bool os::distribute_processes(uint length, uint* distribution) { 780 // Not yet implemented. 781 return false; 782 } 783 784 bool os::bind_to_processor(uint processor_id) { 785 // Not yet implemented. 786 return false; 787 } 788 789 void os::win32::initialize_performance_counter() { 790 LARGE_INTEGER count; 791 QueryPerformanceFrequency(&count); 792 performance_frequency = as_long(count); 793 QueryPerformanceCounter(&count); 794 initial_performance_count = as_long(count); 795 } 796 797 798 double os::elapsedTime() { 799 return (double) elapsed_counter() / (double) elapsed_frequency(); 800 } 801 802 803 // Windows format: 804 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 805 // Java format: 806 // Java standards require the number of milliseconds since 1/1/1970 807 808 // Constant offset - calculated using offset() 809 static jlong _offset = 116444736000000000; 810 // Fake time counter for reproducible results when debugging 811 static jlong fake_time = 0; 812 813 #ifdef ASSERT 814 // Just to be safe, recalculate the offset in debug mode 815 static jlong _calculated_offset = 0; 816 static int _has_calculated_offset = 0; 817 818 jlong offset() { 819 if (_has_calculated_offset) return _calculated_offset; 820 SYSTEMTIME java_origin; 821 java_origin.wYear = 1970; 822 java_origin.wMonth = 1; 823 java_origin.wDayOfWeek = 0; // ignored 824 java_origin.wDay = 1; 825 java_origin.wHour = 0; 826 java_origin.wMinute = 0; 827 java_origin.wSecond = 0; 828 java_origin.wMilliseconds = 0; 829 FILETIME jot; 830 if (!SystemTimeToFileTime(&java_origin, &jot)) { 831 fatal("Error = %d\nWindows error", GetLastError()); 832 } 833 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 834 _has_calculated_offset = 1; 835 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 836 return _calculated_offset; 837 } 838 #else 839 jlong offset() { 840 return _offset; 841 } 842 #endif 843 844 jlong windows_to_java_time(FILETIME wt) { 845 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 846 return (a - offset()) / 10000; 847 } 848 849 // Returns time ticks in (10th of micro seconds) 850 jlong windows_to_time_ticks(FILETIME wt) { 851 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 852 return (a - offset()); 853 } 854 855 FILETIME java_to_windows_time(jlong l) { 856 jlong a = (l * 10000) + offset(); 857 FILETIME result; 858 result.dwHighDateTime = high(a); 859 result.dwLowDateTime = low(a); 860 return result; 861 } 862 863 bool os::supports_vtime() { return true; } 864 bool os::enable_vtime() { return false; } 865 bool os::vtime_enabled() { return false; } 866 867 double os::elapsedVTime() { 868 FILETIME created; 869 FILETIME exited; 870 FILETIME kernel; 871 FILETIME user; 872 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 873 // the resolution of windows_to_java_time() should be sufficient (ms) 874 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 875 } else { 876 return elapsedTime(); 877 } 878 } 879 880 jlong os::javaTimeMillis() { 881 if (UseFakeTimers) { 882 return fake_time++; 883 } else { 884 FILETIME wt; 885 GetSystemTimeAsFileTime(&wt); 886 return windows_to_java_time(wt); 887 } 888 } 889 890 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 891 FILETIME wt; 892 GetSystemTimeAsFileTime(&wt); 893 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 894 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 895 seconds = secs; 896 nanos = jlong(ticks - (secs*10000000)) * 100; 897 } 898 899 jlong os::javaTimeNanos() { 900 LARGE_INTEGER current_count; 901 QueryPerformanceCounter(¤t_count); 902 double current = as_long(current_count); 903 double freq = performance_frequency; 904 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 905 return time; 906 } 907 908 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 909 jlong freq = performance_frequency; 910 if (freq < NANOSECS_PER_SEC) { 911 // the performance counter is 64 bits and we will 912 // be multiplying it -- so no wrap in 64 bits 913 info_ptr->max_value = ALL_64_BITS; 914 } else if (freq > NANOSECS_PER_SEC) { 915 // use the max value the counter can reach to 916 // determine the max value which could be returned 917 julong max_counter = (julong)ALL_64_BITS; 918 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 919 } else { 920 // the performance counter is 64 bits and we will 921 // be using it directly -- so no wrap in 64 bits 922 info_ptr->max_value = ALL_64_BITS; 923 } 924 925 // using a counter, so no skipping 926 info_ptr->may_skip_backward = false; 927 info_ptr->may_skip_forward = false; 928 929 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 930 } 931 932 char* os::local_time_string(char *buf, size_t buflen) { 933 SYSTEMTIME st; 934 GetLocalTime(&st); 935 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 936 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 937 return buf; 938 } 939 940 bool os::getTimesSecs(double* process_real_time, 941 double* process_user_time, 942 double* process_system_time) { 943 HANDLE h_process = GetCurrentProcess(); 944 FILETIME create_time, exit_time, kernel_time, user_time; 945 BOOL result = GetProcessTimes(h_process, 946 &create_time, 947 &exit_time, 948 &kernel_time, 949 &user_time); 950 if (result != 0) { 951 FILETIME wt; 952 GetSystemTimeAsFileTime(&wt); 953 jlong rtc_millis = windows_to_java_time(wt); 954 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 955 *process_user_time = 956 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 957 *process_system_time = 958 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 959 return true; 960 } else { 961 return false; 962 } 963 } 964 965 void os::shutdown() { 966 // allow PerfMemory to attempt cleanup of any persistent resources 967 perfMemory_exit(); 968 969 // flush buffered output, finish log files 970 ostream_abort(); 971 972 // Check for abort hook 973 abort_hook_t abort_hook = Arguments::abort_hook(); 974 if (abort_hook != NULL) { 975 abort_hook(); 976 } 977 } 978 979 980 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 981 PMINIDUMP_EXCEPTION_INFORMATION, 982 PMINIDUMP_USER_STREAM_INFORMATION, 983 PMINIDUMP_CALLBACK_INFORMATION); 984 985 static HANDLE dumpFile = NULL; 986 987 // Check if dump file can be created. 988 void os::check_dump_limit(char* buffer, size_t buffsz) { 989 bool status = true; 990 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 991 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 992 status = false; 993 } 994 995 #ifndef ASSERT 996 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 997 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 998 status = false; 999 } 1000 #endif 1001 1002 if (status) { 1003 const char* cwd = get_current_directory(NULL, 0); 1004 int pid = current_process_id(); 1005 if (cwd != NULL) { 1006 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1007 } else { 1008 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1009 } 1010 1011 if (dumpFile == NULL && 1012 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1013 == INVALID_HANDLE_VALUE) { 1014 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1015 status = false; 1016 } 1017 } 1018 VMError::record_coredump_status(buffer, status); 1019 } 1020 1021 void os::abort(bool dump_core, void* siginfo, const void* context) { 1022 EXCEPTION_POINTERS ep; 1023 MINIDUMP_EXCEPTION_INFORMATION mei; 1024 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1025 1026 HANDLE hProcess = GetCurrentProcess(); 1027 DWORD processId = GetCurrentProcessId(); 1028 MINIDUMP_TYPE dumpType; 1029 1030 shutdown(); 1031 if (!dump_core || dumpFile == NULL) { 1032 if (dumpFile != NULL) { 1033 CloseHandle(dumpFile); 1034 } 1035 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1036 } 1037 1038 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1039 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1040 1041 if (siginfo != NULL && context != NULL) { 1042 ep.ContextRecord = (PCONTEXT) context; 1043 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1044 1045 mei.ThreadId = GetCurrentThreadId(); 1046 mei.ExceptionPointers = &ep; 1047 pmei = &mei; 1048 } else { 1049 pmei = NULL; 1050 } 1051 1052 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1053 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1054 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1055 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1056 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1057 } 1058 CloseHandle(dumpFile); 1059 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1060 } 1061 1062 // Die immediately, no exit hook, no abort hook, no cleanup. 1063 void os::die() { 1064 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1065 } 1066 1067 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1068 // * dirent_md.c 1.15 00/02/02 1069 // 1070 // The declarations for DIR and struct dirent are in jvm_win32.h. 1071 1072 // Caller must have already run dirname through JVM_NativePath, which removes 1073 // duplicate slashes and converts all instances of '/' into '\\'. 1074 1075 DIR * os::opendir(const char *dirname) { 1076 assert(dirname != NULL, "just checking"); // hotspot change 1077 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1078 DWORD fattr; // hotspot change 1079 char alt_dirname[4] = { 0, 0, 0, 0 }; 1080 1081 if (dirp == 0) { 1082 errno = ENOMEM; 1083 return 0; 1084 } 1085 1086 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1087 // as a directory in FindFirstFile(). We detect this case here and 1088 // prepend the current drive name. 1089 // 1090 if (dirname[1] == '\0' && dirname[0] == '\\') { 1091 alt_dirname[0] = _getdrive() + 'A' - 1; 1092 alt_dirname[1] = ':'; 1093 alt_dirname[2] = '\\'; 1094 alt_dirname[3] = '\0'; 1095 dirname = alt_dirname; 1096 } 1097 1098 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1099 if (dirp->path == 0) { 1100 free(dirp); 1101 errno = ENOMEM; 1102 return 0; 1103 } 1104 strcpy(dirp->path, dirname); 1105 1106 fattr = GetFileAttributes(dirp->path); 1107 if (fattr == 0xffffffff) { 1108 free(dirp->path); 1109 free(dirp); 1110 errno = ENOENT; 1111 return 0; 1112 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1113 free(dirp->path); 1114 free(dirp); 1115 errno = ENOTDIR; 1116 return 0; 1117 } 1118 1119 // Append "*.*", or possibly "\\*.*", to path 1120 if (dirp->path[1] == ':' && 1121 (dirp->path[2] == '\0' || 1122 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1123 // No '\\' needed for cases like "Z:" or "Z:\" 1124 strcat(dirp->path, "*.*"); 1125 } else { 1126 strcat(dirp->path, "\\*.*"); 1127 } 1128 1129 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1130 if (dirp->handle == INVALID_HANDLE_VALUE) { 1131 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1132 free(dirp->path); 1133 free(dirp); 1134 errno = EACCES; 1135 return 0; 1136 } 1137 } 1138 return dirp; 1139 } 1140 1141 // parameter dbuf unused on Windows 1142 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1143 assert(dirp != NULL, "just checking"); // hotspot change 1144 if (dirp->handle == INVALID_HANDLE_VALUE) { 1145 return 0; 1146 } 1147 1148 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1149 1150 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1151 if (GetLastError() == ERROR_INVALID_HANDLE) { 1152 errno = EBADF; 1153 return 0; 1154 } 1155 FindClose(dirp->handle); 1156 dirp->handle = INVALID_HANDLE_VALUE; 1157 } 1158 1159 return &dirp->dirent; 1160 } 1161 1162 int os::closedir(DIR *dirp) { 1163 assert(dirp != NULL, "just checking"); // hotspot change 1164 if (dirp->handle != INVALID_HANDLE_VALUE) { 1165 if (!FindClose(dirp->handle)) { 1166 errno = EBADF; 1167 return -1; 1168 } 1169 dirp->handle = INVALID_HANDLE_VALUE; 1170 } 1171 free(dirp->path); 1172 free(dirp); 1173 return 0; 1174 } 1175 1176 // This must be hard coded because it's the system's temporary 1177 // directory not the java application's temp directory, ala java.io.tmpdir. 1178 const char* os::get_temp_directory() { 1179 static char path_buf[MAX_PATH]; 1180 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1181 return path_buf; 1182 } else { 1183 path_buf[0] = '\0'; 1184 return path_buf; 1185 } 1186 } 1187 1188 // Needs to be in os specific directory because windows requires another 1189 // header file <direct.h> 1190 const char* os::get_current_directory(char *buf, size_t buflen) { 1191 int n = static_cast<int>(buflen); 1192 if (buflen > INT_MAX) n = INT_MAX; 1193 return _getcwd(buf, n); 1194 } 1195 1196 //----------------------------------------------------------- 1197 // Helper functions for fatal error handler 1198 #ifdef _WIN64 1199 // Helper routine which returns true if address in 1200 // within the NTDLL address space. 1201 // 1202 static bool _addr_in_ntdll(address addr) { 1203 HMODULE hmod; 1204 MODULEINFO minfo; 1205 1206 hmod = GetModuleHandle("NTDLL.DLL"); 1207 if (hmod == NULL) return false; 1208 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1209 &minfo, sizeof(MODULEINFO))) { 1210 return false; 1211 } 1212 1213 if ((addr >= minfo.lpBaseOfDll) && 1214 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1215 return true; 1216 } else { 1217 return false; 1218 } 1219 } 1220 #endif 1221 1222 struct _modinfo { 1223 address addr; 1224 char* full_path; // point to a char buffer 1225 int buflen; // size of the buffer 1226 address base_addr; 1227 }; 1228 1229 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1230 address top_address, void * param) { 1231 struct _modinfo *pmod = (struct _modinfo *)param; 1232 if (!pmod) return -1; 1233 1234 if (base_addr <= pmod->addr && 1235 top_address > pmod->addr) { 1236 // if a buffer is provided, copy path name to the buffer 1237 if (pmod->full_path) { 1238 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1239 } 1240 pmod->base_addr = base_addr; 1241 return 1; 1242 } 1243 return 0; 1244 } 1245 1246 bool os::dll_address_to_library_name(address addr, char* buf, 1247 int buflen, int* offset) { 1248 // buf is not optional, but offset is optional 1249 assert(buf != NULL, "sanity check"); 1250 1251 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1252 // return the full path to the DLL file, sometimes it returns path 1253 // to the corresponding PDB file (debug info); sometimes it only 1254 // returns partial path, which makes life painful. 1255 1256 struct _modinfo mi; 1257 mi.addr = addr; 1258 mi.full_path = buf; 1259 mi.buflen = buflen; 1260 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1261 // buf already contains path name 1262 if (offset) *offset = addr - mi.base_addr; 1263 return true; 1264 } 1265 1266 buf[0] = '\0'; 1267 if (offset) *offset = -1; 1268 return false; 1269 } 1270 1271 bool os::dll_address_to_function_name(address addr, char *buf, 1272 int buflen, int *offset, 1273 bool demangle) { 1274 // buf is not optional, but offset is optional 1275 assert(buf != NULL, "sanity check"); 1276 1277 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1278 return true; 1279 } 1280 if (offset != NULL) *offset = -1; 1281 buf[0] = '\0'; 1282 return false; 1283 } 1284 1285 // save the start and end address of jvm.dll into param[0] and param[1] 1286 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1287 address top_address, void * param) { 1288 if (!param) return -1; 1289 1290 if (base_addr <= (address)_locate_jvm_dll && 1291 top_address > (address)_locate_jvm_dll) { 1292 ((address*)param)[0] = base_addr; 1293 ((address*)param)[1] = top_address; 1294 return 1; 1295 } 1296 return 0; 1297 } 1298 1299 address vm_lib_location[2]; // start and end address of jvm.dll 1300 1301 // check if addr is inside jvm.dll 1302 bool os::address_is_in_vm(address addr) { 1303 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1304 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1305 assert(false, "Can't find jvm module."); 1306 return false; 1307 } 1308 } 1309 1310 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1311 } 1312 1313 // print module info; param is outputStream* 1314 static int _print_module(const char* fname, address base_address, 1315 address top_address, void* param) { 1316 if (!param) return -1; 1317 1318 outputStream* st = (outputStream*)param; 1319 1320 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1321 return 0; 1322 } 1323 1324 // Loads .dll/.so and 1325 // in case of error it checks if .dll/.so was built for the 1326 // same architecture as Hotspot is running on 1327 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1328 void * result = LoadLibrary(name); 1329 if (result != NULL) { 1330 return result; 1331 } 1332 1333 DWORD errcode = GetLastError(); 1334 if (errcode == ERROR_MOD_NOT_FOUND) { 1335 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1336 ebuf[ebuflen - 1] = '\0'; 1337 return NULL; 1338 } 1339 1340 // Parsing dll below 1341 // If we can read dll-info and find that dll was built 1342 // for an architecture other than Hotspot is running in 1343 // - then print to buffer "DLL was built for a different architecture" 1344 // else call os::lasterror to obtain system error message 1345 1346 // Read system error message into ebuf 1347 // It may or may not be overwritten below (in the for loop and just above) 1348 lasterror(ebuf, (size_t) ebuflen); 1349 ebuf[ebuflen - 1] = '\0'; 1350 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1351 if (fd < 0) { 1352 return NULL; 1353 } 1354 1355 uint32_t signature_offset; 1356 uint16_t lib_arch = 0; 1357 bool failed_to_get_lib_arch = 1358 ( // Go to position 3c in the dll 1359 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1360 || 1361 // Read location of signature 1362 (sizeof(signature_offset) != 1363 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1364 || 1365 // Go to COFF File Header in dll 1366 // that is located after "signature" (4 bytes long) 1367 (os::seek_to_file_offset(fd, 1368 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1369 || 1370 // Read field that contains code of architecture 1371 // that dll was built for 1372 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1373 ); 1374 1375 ::close(fd); 1376 if (failed_to_get_lib_arch) { 1377 // file i/o error - report os::lasterror(...) msg 1378 return NULL; 1379 } 1380 1381 typedef struct { 1382 uint16_t arch_code; 1383 char* arch_name; 1384 } arch_t; 1385 1386 static const arch_t arch_array[] = { 1387 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1388 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1389 }; 1390 #if (defined _M_AMD64) 1391 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1392 #elif (defined _M_IX86) 1393 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1394 #else 1395 #error Method os::dll_load requires that one of following \ 1396 is defined :_M_AMD64 or _M_IX86 1397 #endif 1398 1399 1400 // Obtain a string for printf operation 1401 // lib_arch_str shall contain string what platform this .dll was built for 1402 // running_arch_str shall string contain what platform Hotspot was built for 1403 char *running_arch_str = NULL, *lib_arch_str = NULL; 1404 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1405 if (lib_arch == arch_array[i].arch_code) { 1406 lib_arch_str = arch_array[i].arch_name; 1407 } 1408 if (running_arch == arch_array[i].arch_code) { 1409 running_arch_str = arch_array[i].arch_name; 1410 } 1411 } 1412 1413 assert(running_arch_str, 1414 "Didn't find running architecture code in arch_array"); 1415 1416 // If the architecture is right 1417 // but some other error took place - report os::lasterror(...) msg 1418 if (lib_arch == running_arch) { 1419 return NULL; 1420 } 1421 1422 if (lib_arch_str != NULL) { 1423 ::_snprintf(ebuf, ebuflen - 1, 1424 "Can't load %s-bit .dll on a %s-bit platform", 1425 lib_arch_str, running_arch_str); 1426 } else { 1427 // don't know what architecture this dll was build for 1428 ::_snprintf(ebuf, ebuflen - 1, 1429 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1430 lib_arch, running_arch_str); 1431 } 1432 1433 return NULL; 1434 } 1435 1436 void os::print_dll_info(outputStream *st) { 1437 st->print_cr("Dynamic libraries:"); 1438 get_loaded_modules_info(_print_module, (void *)st); 1439 } 1440 1441 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1442 HANDLE hProcess; 1443 1444 # define MAX_NUM_MODULES 128 1445 HMODULE modules[MAX_NUM_MODULES]; 1446 static char filename[MAX_PATH]; 1447 int result = 0; 1448 1449 int pid = os::current_process_id(); 1450 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1451 FALSE, pid); 1452 if (hProcess == NULL) return 0; 1453 1454 DWORD size_needed; 1455 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1456 CloseHandle(hProcess); 1457 return 0; 1458 } 1459 1460 // number of modules that are currently loaded 1461 int num_modules = size_needed / sizeof(HMODULE); 1462 1463 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1464 // Get Full pathname: 1465 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1466 filename[0] = '\0'; 1467 } 1468 1469 MODULEINFO modinfo; 1470 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1471 modinfo.lpBaseOfDll = NULL; 1472 modinfo.SizeOfImage = 0; 1473 } 1474 1475 // Invoke callback function 1476 result = callback(filename, (address)modinfo.lpBaseOfDll, 1477 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1478 if (result) break; 1479 } 1480 1481 CloseHandle(hProcess); 1482 return result; 1483 } 1484 1485 bool os::get_host_name(char* buf, size_t buflen) { 1486 DWORD size = (DWORD)buflen; 1487 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1488 } 1489 1490 void os::get_summary_os_info(char* buf, size_t buflen) { 1491 stringStream sst(buf, buflen); 1492 os::win32::print_windows_version(&sst); 1493 // chop off newline character 1494 char* nl = strchr(buf, '\n'); 1495 if (nl != NULL) *nl = '\0'; 1496 } 1497 1498 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1499 int ret = vsnprintf(buf, len, fmt, args); 1500 // Get the correct buffer size if buf is too small 1501 if (ret < 0) { 1502 return _vscprintf(fmt, args); 1503 } 1504 return ret; 1505 } 1506 1507 static inline time_t get_mtime(const char* filename) { 1508 struct stat st; 1509 int ret = os::stat(filename, &st); 1510 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1511 return st.st_mtime; 1512 } 1513 1514 int os::compare_file_modified_times(const char* file1, const char* file2) { 1515 time_t t1 = get_mtime(file1); 1516 time_t t2 = get_mtime(file2); 1517 return t1 - t2; 1518 } 1519 1520 void os::print_os_info_brief(outputStream* st) { 1521 os::print_os_info(st); 1522 } 1523 1524 void os::print_os_info(outputStream* st) { 1525 #ifdef ASSERT 1526 char buffer[1024]; 1527 st->print("HostName: "); 1528 if (get_host_name(buffer, sizeof(buffer))) { 1529 st->print("%s ", buffer); 1530 } else { 1531 st->print("N/A "); 1532 } 1533 #endif 1534 st->print("OS:"); 1535 os::win32::print_windows_version(st); 1536 } 1537 1538 void os::win32::print_windows_version(outputStream* st) { 1539 OSVERSIONINFOEX osvi; 1540 VS_FIXEDFILEINFO *file_info; 1541 TCHAR kernel32_path[MAX_PATH]; 1542 UINT len, ret; 1543 1544 // Use the GetVersionEx information to see if we're on a server or 1545 // workstation edition of Windows. Starting with Windows 8.1 we can't 1546 // trust the OS version information returned by this API. 1547 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1548 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1549 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1550 st->print_cr("Call to GetVersionEx failed"); 1551 return; 1552 } 1553 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1554 1555 // Get the full path to \Windows\System32\kernel32.dll and use that for 1556 // determining what version of Windows we're running on. 1557 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1558 ret = GetSystemDirectory(kernel32_path, len); 1559 if (ret == 0 || ret > len) { 1560 st->print_cr("Call to GetSystemDirectory failed"); 1561 return; 1562 } 1563 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1564 1565 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1566 if (version_size == 0) { 1567 st->print_cr("Call to GetFileVersionInfoSize failed"); 1568 return; 1569 } 1570 1571 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1572 if (version_info == NULL) { 1573 st->print_cr("Failed to allocate version_info"); 1574 return; 1575 } 1576 1577 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1578 os::free(version_info); 1579 st->print_cr("Call to GetFileVersionInfo failed"); 1580 return; 1581 } 1582 1583 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1584 os::free(version_info); 1585 st->print_cr("Call to VerQueryValue failed"); 1586 return; 1587 } 1588 1589 int major_version = HIWORD(file_info->dwProductVersionMS); 1590 int minor_version = LOWORD(file_info->dwProductVersionMS); 1591 int build_number = HIWORD(file_info->dwProductVersionLS); 1592 int build_minor = LOWORD(file_info->dwProductVersionLS); 1593 int os_vers = major_version * 1000 + minor_version; 1594 os::free(version_info); 1595 1596 st->print(" Windows "); 1597 switch (os_vers) { 1598 1599 case 6000: 1600 if (is_workstation) { 1601 st->print("Vista"); 1602 } else { 1603 st->print("Server 2008"); 1604 } 1605 break; 1606 1607 case 6001: 1608 if (is_workstation) { 1609 st->print("7"); 1610 } else { 1611 st->print("Server 2008 R2"); 1612 } 1613 break; 1614 1615 case 6002: 1616 if (is_workstation) { 1617 st->print("8"); 1618 } else { 1619 st->print("Server 2012"); 1620 } 1621 break; 1622 1623 case 6003: 1624 if (is_workstation) { 1625 st->print("8.1"); 1626 } else { 1627 st->print("Server 2012 R2"); 1628 } 1629 break; 1630 1631 case 10000: 1632 if (is_workstation) { 1633 st->print("10"); 1634 } else { 1635 st->print("Server 2016"); 1636 } 1637 break; 1638 1639 default: 1640 // Unrecognized windows, print out its major and minor versions 1641 st->print("%d.%d", major_version, minor_version); 1642 break; 1643 } 1644 1645 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1646 // find out whether we are running on 64 bit processor or not 1647 SYSTEM_INFO si; 1648 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1649 GetNativeSystemInfo(&si); 1650 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1651 st->print(" , 64 bit"); 1652 } 1653 1654 st->print(" Build %d", build_number); 1655 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1656 st->cr(); 1657 } 1658 1659 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1660 // Nothing to do for now. 1661 } 1662 1663 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1664 HKEY key; 1665 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1666 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1667 if (status == ERROR_SUCCESS) { 1668 DWORD size = (DWORD)buflen; 1669 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1670 if (status != ERROR_SUCCESS) { 1671 strncpy(buf, "## __CPU__", buflen); 1672 } 1673 RegCloseKey(key); 1674 } else { 1675 // Put generic cpu info to return 1676 strncpy(buf, "## __CPU__", buflen); 1677 } 1678 } 1679 1680 void os::print_memory_info(outputStream* st) { 1681 st->print("Memory:"); 1682 st->print(" %dk page", os::vm_page_size()>>10); 1683 1684 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1685 // value if total memory is larger than 4GB 1686 MEMORYSTATUSEX ms; 1687 ms.dwLength = sizeof(ms); 1688 GlobalMemoryStatusEx(&ms); 1689 1690 st->print(", physical %uk", os::physical_memory() >> 10); 1691 st->print("(%uk free)", os::available_memory() >> 10); 1692 1693 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1694 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1695 st->cr(); 1696 } 1697 1698 void os::print_siginfo(outputStream *st, const void* siginfo) { 1699 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1700 st->print("siginfo:"); 1701 1702 char tmp[64]; 1703 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1704 strcpy(tmp, "EXCEPTION_??"); 1705 } 1706 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1707 1708 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1709 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1710 er->NumberParameters >= 2) { 1711 switch (er->ExceptionInformation[0]) { 1712 case 0: st->print(", reading address"); break; 1713 case 1: st->print(", writing address"); break; 1714 case 8: st->print(", data execution prevention violation at address"); break; 1715 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1716 er->ExceptionInformation[0]); 1717 } 1718 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1719 } else { 1720 int num = er->NumberParameters; 1721 if (num > 0) { 1722 st->print(", ExceptionInformation="); 1723 for (int i = 0; i < num; i++) { 1724 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1725 } 1726 } 1727 } 1728 st->cr(); 1729 } 1730 1731 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1732 // do nothing 1733 } 1734 1735 static char saved_jvm_path[MAX_PATH] = {0}; 1736 1737 // Find the full path to the current module, jvm.dll 1738 void os::jvm_path(char *buf, jint buflen) { 1739 // Error checking. 1740 if (buflen < MAX_PATH) { 1741 assert(false, "must use a large-enough buffer"); 1742 buf[0] = '\0'; 1743 return; 1744 } 1745 // Lazy resolve the path to current module. 1746 if (saved_jvm_path[0] != 0) { 1747 strcpy(buf, saved_jvm_path); 1748 return; 1749 } 1750 1751 buf[0] = '\0'; 1752 if (Arguments::sun_java_launcher_is_altjvm()) { 1753 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1754 // for a JAVA_HOME environment variable and fix up the path so it 1755 // looks like jvm.dll is installed there (append a fake suffix 1756 // hotspot/jvm.dll). 1757 char* java_home_var = ::getenv("JAVA_HOME"); 1758 if (java_home_var != NULL && java_home_var[0] != 0 && 1759 strlen(java_home_var) < (size_t)buflen) { 1760 strncpy(buf, java_home_var, buflen); 1761 1762 // determine if this is a legacy image or modules image 1763 // modules image doesn't have "jre" subdirectory 1764 size_t len = strlen(buf); 1765 char* jrebin_p = buf + len; 1766 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1767 if (0 != _access(buf, 0)) { 1768 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1769 } 1770 len = strlen(buf); 1771 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1772 } 1773 } 1774 1775 if (buf[0] == '\0') { 1776 GetModuleFileName(vm_lib_handle, buf, buflen); 1777 } 1778 strncpy(saved_jvm_path, buf, MAX_PATH); 1779 saved_jvm_path[MAX_PATH - 1] = '\0'; 1780 } 1781 1782 1783 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1784 #ifndef _WIN64 1785 st->print("_"); 1786 #endif 1787 } 1788 1789 1790 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1791 #ifndef _WIN64 1792 st->print("@%d", args_size * sizeof(int)); 1793 #endif 1794 } 1795 1796 // This method is a copy of JDK's sysGetLastErrorString 1797 // from src/windows/hpi/src/system_md.c 1798 1799 size_t os::lasterror(char* buf, size_t len) { 1800 DWORD errval; 1801 1802 if ((errval = GetLastError()) != 0) { 1803 // DOS error 1804 size_t n = (size_t)FormatMessage( 1805 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1806 NULL, 1807 errval, 1808 0, 1809 buf, 1810 (DWORD)len, 1811 NULL); 1812 if (n > 3) { 1813 // Drop final '.', CR, LF 1814 if (buf[n - 1] == '\n') n--; 1815 if (buf[n - 1] == '\r') n--; 1816 if (buf[n - 1] == '.') n--; 1817 buf[n] = '\0'; 1818 } 1819 return n; 1820 } 1821 1822 if (errno != 0) { 1823 // C runtime error that has no corresponding DOS error code 1824 const char* s = os::strerror(errno); 1825 size_t n = strlen(s); 1826 if (n >= len) n = len - 1; 1827 strncpy(buf, s, n); 1828 buf[n] = '\0'; 1829 return n; 1830 } 1831 1832 return 0; 1833 } 1834 1835 int os::get_last_error() { 1836 DWORD error = GetLastError(); 1837 if (error == 0) { 1838 error = errno; 1839 } 1840 return (int)error; 1841 } 1842 1843 WindowsSemaphore::WindowsSemaphore(uint value) { 1844 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1845 1846 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1847 } 1848 1849 WindowsSemaphore::~WindowsSemaphore() { 1850 ::CloseHandle(_semaphore); 1851 } 1852 1853 void WindowsSemaphore::signal(uint count) { 1854 if (count > 0) { 1855 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1856 1857 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1858 } 1859 } 1860 1861 void WindowsSemaphore::wait() { 1862 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1863 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1864 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1865 } 1866 1867 bool WindowsSemaphore::trywait() { 1868 DWORD ret = ::WaitForSingleObject(_semaphore, 0); 1869 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1870 return ret == WAIT_OBJECT_0; 1871 } 1872 1873 // sun.misc.Signal 1874 // NOTE that this is a workaround for an apparent kernel bug where if 1875 // a signal handler for SIGBREAK is installed then that signal handler 1876 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1877 // See bug 4416763. 1878 static void (*sigbreakHandler)(int) = NULL; 1879 1880 static void UserHandler(int sig, void *siginfo, void *context) { 1881 os::signal_notify(sig); 1882 // We need to reinstate the signal handler each time... 1883 os::signal(sig, (void*)UserHandler); 1884 } 1885 1886 void* os::user_handler() { 1887 return (void*) UserHandler; 1888 } 1889 1890 void* os::signal(int signal_number, void* handler) { 1891 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1892 void (*oldHandler)(int) = sigbreakHandler; 1893 sigbreakHandler = (void (*)(int)) handler; 1894 return (void*) oldHandler; 1895 } else { 1896 return (void*)::signal(signal_number, (void (*)(int))handler); 1897 } 1898 } 1899 1900 void os::signal_raise(int signal_number) { 1901 raise(signal_number); 1902 } 1903 1904 // The Win32 C runtime library maps all console control events other than ^C 1905 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1906 // logoff, and shutdown events. We therefore install our own console handler 1907 // that raises SIGTERM for the latter cases. 1908 // 1909 static BOOL WINAPI consoleHandler(DWORD event) { 1910 switch (event) { 1911 case CTRL_C_EVENT: 1912 if (VMError::is_error_reported()) { 1913 // Ctrl-C is pressed during error reporting, likely because the error 1914 // handler fails to abort. Let VM die immediately. 1915 os::die(); 1916 } 1917 1918 os::signal_raise(SIGINT); 1919 return TRUE; 1920 break; 1921 case CTRL_BREAK_EVENT: 1922 if (sigbreakHandler != NULL) { 1923 (*sigbreakHandler)(SIGBREAK); 1924 } 1925 return TRUE; 1926 break; 1927 case CTRL_LOGOFF_EVENT: { 1928 // Don't terminate JVM if it is running in a non-interactive session, 1929 // such as a service process. 1930 USEROBJECTFLAGS flags; 1931 HANDLE handle = GetProcessWindowStation(); 1932 if (handle != NULL && 1933 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1934 sizeof(USEROBJECTFLAGS), NULL)) { 1935 // If it is a non-interactive session, let next handler to deal 1936 // with it. 1937 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1938 return FALSE; 1939 } 1940 } 1941 } 1942 case CTRL_CLOSE_EVENT: 1943 case CTRL_SHUTDOWN_EVENT: 1944 os::signal_raise(SIGTERM); 1945 return TRUE; 1946 break; 1947 default: 1948 break; 1949 } 1950 return FALSE; 1951 } 1952 1953 // The following code is moved from os.cpp for making this 1954 // code platform specific, which it is by its very nature. 1955 1956 // Return maximum OS signal used + 1 for internal use only 1957 // Used as exit signal for signal_thread 1958 int os::sigexitnum_pd() { 1959 return NSIG; 1960 } 1961 1962 // a counter for each possible signal value, including signal_thread exit signal 1963 static volatile jint pending_signals[NSIG+1] = { 0 }; 1964 static HANDLE sig_sem = NULL; 1965 1966 void os::signal_init_pd() { 1967 // Initialize signal structures 1968 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1969 1970 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1971 1972 // Programs embedding the VM do not want it to attempt to receive 1973 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1974 // shutdown hooks mechanism introduced in 1.3. For example, when 1975 // the VM is run as part of a Windows NT service (i.e., a servlet 1976 // engine in a web server), the correct behavior is for any console 1977 // control handler to return FALSE, not TRUE, because the OS's 1978 // "final" handler for such events allows the process to continue if 1979 // it is a service (while terminating it if it is not a service). 1980 // To make this behavior uniform and the mechanism simpler, we 1981 // completely disable the VM's usage of these console events if -Xrs 1982 // (=ReduceSignalUsage) is specified. This means, for example, that 1983 // the CTRL-BREAK thread dump mechanism is also disabled in this 1984 // case. See bugs 4323062, 4345157, and related bugs. 1985 1986 if (!ReduceSignalUsage) { 1987 // Add a CTRL-C handler 1988 SetConsoleCtrlHandler(consoleHandler, TRUE); 1989 } 1990 } 1991 1992 void os::signal_notify(int signal_number) { 1993 BOOL ret; 1994 if (sig_sem != NULL) { 1995 Atomic::inc(&pending_signals[signal_number]); 1996 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1997 assert(ret != 0, "ReleaseSemaphore() failed"); 1998 } 1999 } 2000 2001 static int check_pending_signals(bool wait_for_signal) { 2002 DWORD ret; 2003 while (true) { 2004 for (int i = 0; i < NSIG + 1; i++) { 2005 jint n = pending_signals[i]; 2006 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2007 return i; 2008 } 2009 } 2010 if (!wait_for_signal) { 2011 return -1; 2012 } 2013 2014 JavaThread *thread = JavaThread::current(); 2015 2016 ThreadBlockInVM tbivm(thread); 2017 2018 bool threadIsSuspended; 2019 do { 2020 thread->set_suspend_equivalent(); 2021 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2022 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2023 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2024 2025 // were we externally suspended while we were waiting? 2026 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2027 if (threadIsSuspended) { 2028 // The semaphore has been incremented, but while we were waiting 2029 // another thread suspended us. We don't want to continue running 2030 // while suspended because that would surprise the thread that 2031 // suspended us. 2032 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2033 assert(ret != 0, "ReleaseSemaphore() failed"); 2034 2035 thread->java_suspend_self(); 2036 } 2037 } while (threadIsSuspended); 2038 } 2039 } 2040 2041 int os::signal_lookup() { 2042 return check_pending_signals(false); 2043 } 2044 2045 int os::signal_wait() { 2046 return check_pending_signals(true); 2047 } 2048 2049 // Implicit OS exception handling 2050 2051 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2052 address handler) { 2053 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2054 // Save pc in thread 2055 #ifdef _M_AMD64 2056 // Do not blow up if no thread info available. 2057 if (thread) { 2058 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2059 } 2060 // Set pc to handler 2061 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2062 #else 2063 // Do not blow up if no thread info available. 2064 if (thread) { 2065 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2066 } 2067 // Set pc to handler 2068 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2069 #endif 2070 2071 // Continue the execution 2072 return EXCEPTION_CONTINUE_EXECUTION; 2073 } 2074 2075 2076 // Used for PostMortemDump 2077 extern "C" void safepoints(); 2078 extern "C" void find(int x); 2079 extern "C" void events(); 2080 2081 // According to Windows API documentation, an illegal instruction sequence should generate 2082 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2083 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2084 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2085 2086 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2087 2088 // From "Execution Protection in the Windows Operating System" draft 0.35 2089 // Once a system header becomes available, the "real" define should be 2090 // included or copied here. 2091 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2092 2093 // Windows Vista/2008 heap corruption check 2094 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2095 2096 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2097 // C++ compiler contain this error code. Because this is a compiler-generated 2098 // error, the code is not listed in the Win32 API header files. 2099 // The code is actually a cryptic mnemonic device, with the initial "E" 2100 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2101 // ASCII values of "msc". 2102 2103 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2104 2105 #define def_excpt(val) { #val, (val) } 2106 2107 static const struct { char* name; uint number; } exceptlabels[] = { 2108 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2109 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2110 def_excpt(EXCEPTION_BREAKPOINT), 2111 def_excpt(EXCEPTION_SINGLE_STEP), 2112 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2113 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2114 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2115 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2116 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2117 def_excpt(EXCEPTION_FLT_OVERFLOW), 2118 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2119 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2120 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2121 def_excpt(EXCEPTION_INT_OVERFLOW), 2122 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2123 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2124 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2125 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2126 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2127 def_excpt(EXCEPTION_STACK_OVERFLOW), 2128 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2129 def_excpt(EXCEPTION_GUARD_PAGE), 2130 def_excpt(EXCEPTION_INVALID_HANDLE), 2131 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2132 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2133 }; 2134 2135 #undef def_excpt 2136 2137 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2138 uint code = static_cast<uint>(exception_code); 2139 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2140 if (exceptlabels[i].number == code) { 2141 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2142 return buf; 2143 } 2144 } 2145 2146 return NULL; 2147 } 2148 2149 //----------------------------------------------------------------------------- 2150 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2151 // handle exception caused by idiv; should only happen for -MinInt/-1 2152 // (division by zero is handled explicitly) 2153 #ifdef _M_AMD64 2154 PCONTEXT ctx = exceptionInfo->ContextRecord; 2155 address pc = (address)ctx->Rip; 2156 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2157 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2158 if (pc[0] == 0xF7) { 2159 // set correct result values and continue after idiv instruction 2160 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2161 } else { 2162 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2163 } 2164 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2165 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2166 // idiv opcode (0xF7). 2167 ctx->Rdx = (DWORD)0; // remainder 2168 // Continue the execution 2169 #else 2170 PCONTEXT ctx = exceptionInfo->ContextRecord; 2171 address pc = (address)ctx->Eip; 2172 assert(pc[0] == 0xF7, "not an idiv opcode"); 2173 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2174 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2175 // set correct result values and continue after idiv instruction 2176 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2177 ctx->Eax = (DWORD)min_jint; // result 2178 ctx->Edx = (DWORD)0; // remainder 2179 // Continue the execution 2180 #endif 2181 return EXCEPTION_CONTINUE_EXECUTION; 2182 } 2183 2184 //----------------------------------------------------------------------------- 2185 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2186 PCONTEXT ctx = exceptionInfo->ContextRecord; 2187 #ifndef _WIN64 2188 // handle exception caused by native method modifying control word 2189 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2190 2191 switch (exception_code) { 2192 case EXCEPTION_FLT_DENORMAL_OPERAND: 2193 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2194 case EXCEPTION_FLT_INEXACT_RESULT: 2195 case EXCEPTION_FLT_INVALID_OPERATION: 2196 case EXCEPTION_FLT_OVERFLOW: 2197 case EXCEPTION_FLT_STACK_CHECK: 2198 case EXCEPTION_FLT_UNDERFLOW: 2199 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2200 if (fp_control_word != ctx->FloatSave.ControlWord) { 2201 // Restore FPCW and mask out FLT exceptions 2202 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2203 // Mask out pending FLT exceptions 2204 ctx->FloatSave.StatusWord &= 0xffffff00; 2205 return EXCEPTION_CONTINUE_EXECUTION; 2206 } 2207 } 2208 2209 if (prev_uef_handler != NULL) { 2210 // We didn't handle this exception so pass it to the previous 2211 // UnhandledExceptionFilter. 2212 return (prev_uef_handler)(exceptionInfo); 2213 } 2214 #else // !_WIN64 2215 // On Windows, the mxcsr control bits are non-volatile across calls 2216 // See also CR 6192333 2217 // 2218 jint MxCsr = INITIAL_MXCSR; 2219 // we can't use StubRoutines::addr_mxcsr_std() 2220 // because in Win64 mxcsr is not saved there 2221 if (MxCsr != ctx->MxCsr) { 2222 ctx->MxCsr = MxCsr; 2223 return EXCEPTION_CONTINUE_EXECUTION; 2224 } 2225 #endif // !_WIN64 2226 2227 return EXCEPTION_CONTINUE_SEARCH; 2228 } 2229 2230 static inline void report_error(Thread* t, DWORD exception_code, 2231 address addr, void* siginfo, void* context) { 2232 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2233 2234 // If UseOsErrorReporting, this will return here and save the error file 2235 // somewhere where we can find it in the minidump. 2236 } 2237 2238 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2239 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2240 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2241 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2242 if (Interpreter::contains(pc)) { 2243 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2244 if (!fr->is_first_java_frame()) { 2245 // get_frame_at_stack_banging_point() is only called when we 2246 // have well defined stacks so java_sender() calls do not need 2247 // to assert safe_for_sender() first. 2248 *fr = fr->java_sender(); 2249 } 2250 } else { 2251 // more complex code with compiled code 2252 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2253 CodeBlob* cb = CodeCache::find_blob(pc); 2254 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2255 // Not sure where the pc points to, fallback to default 2256 // stack overflow handling 2257 return false; 2258 } else { 2259 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2260 // in compiled code, the stack banging is performed just after the return pc 2261 // has been pushed on the stack 2262 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2263 if (!fr->is_java_frame()) { 2264 // See java_sender() comment above. 2265 *fr = fr->java_sender(); 2266 } 2267 } 2268 } 2269 assert(fr->is_java_frame(), "Safety check"); 2270 return true; 2271 } 2272 2273 //----------------------------------------------------------------------------- 2274 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2275 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2276 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2277 #ifdef _M_AMD64 2278 address pc = (address) exceptionInfo->ContextRecord->Rip; 2279 #else 2280 address pc = (address) exceptionInfo->ContextRecord->Eip; 2281 #endif 2282 Thread* t = Thread::current_or_null_safe(); 2283 2284 // Handle SafeFetch32 and SafeFetchN exceptions. 2285 if (StubRoutines::is_safefetch_fault(pc)) { 2286 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2287 } 2288 2289 #ifndef _WIN64 2290 // Execution protection violation - win32 running on AMD64 only 2291 // Handled first to avoid misdiagnosis as a "normal" access violation; 2292 // This is safe to do because we have a new/unique ExceptionInformation 2293 // code for this condition. 2294 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2295 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2296 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2297 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2298 2299 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2300 int page_size = os::vm_page_size(); 2301 2302 // Make sure the pc and the faulting address are sane. 2303 // 2304 // If an instruction spans a page boundary, and the page containing 2305 // the beginning of the instruction is executable but the following 2306 // page is not, the pc and the faulting address might be slightly 2307 // different - we still want to unguard the 2nd page in this case. 2308 // 2309 // 15 bytes seems to be a (very) safe value for max instruction size. 2310 bool pc_is_near_addr = 2311 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2312 bool instr_spans_page_boundary = 2313 (align_down((intptr_t) pc ^ (intptr_t) addr, 2314 (intptr_t) page_size) > 0); 2315 2316 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2317 static volatile address last_addr = 2318 (address) os::non_memory_address_word(); 2319 2320 // In conservative mode, don't unguard unless the address is in the VM 2321 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2322 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2323 2324 // Set memory to RWX and retry 2325 address page_start = align_down(addr, page_size); 2326 bool res = os::protect_memory((char*) page_start, page_size, 2327 os::MEM_PROT_RWX); 2328 2329 log_debug(os)("Execution protection violation " 2330 "at " INTPTR_FORMAT 2331 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2332 p2i(page_start), (res ? "success" : os::strerror(errno))); 2333 2334 // Set last_addr so if we fault again at the same address, we don't 2335 // end up in an endless loop. 2336 // 2337 // There are two potential complications here. Two threads trapping 2338 // at the same address at the same time could cause one of the 2339 // threads to think it already unguarded, and abort the VM. Likely 2340 // very rare. 2341 // 2342 // The other race involves two threads alternately trapping at 2343 // different addresses and failing to unguard the page, resulting in 2344 // an endless loop. This condition is probably even more unlikely 2345 // than the first. 2346 // 2347 // Although both cases could be avoided by using locks or thread 2348 // local last_addr, these solutions are unnecessary complication: 2349 // this handler is a best-effort safety net, not a complete solution. 2350 // It is disabled by default and should only be used as a workaround 2351 // in case we missed any no-execute-unsafe VM code. 2352 2353 last_addr = addr; 2354 2355 return EXCEPTION_CONTINUE_EXECUTION; 2356 } 2357 } 2358 2359 // Last unguard failed or not unguarding 2360 tty->print_raw_cr("Execution protection violation"); 2361 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2362 exceptionInfo->ContextRecord); 2363 return EXCEPTION_CONTINUE_SEARCH; 2364 } 2365 } 2366 #endif // _WIN64 2367 2368 // Check to see if we caught the safepoint code in the 2369 // process of write protecting the memory serialization page. 2370 // It write enables the page immediately after protecting it 2371 // so just return. 2372 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2373 if (t != NULL && t->is_Java_thread()) { 2374 JavaThread* thread = (JavaThread*) t; 2375 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2376 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2377 if (os::is_memory_serialize_page(thread, addr)) { 2378 // Block current thread until the memory serialize page permission restored. 2379 os::block_on_serialize_page_trap(); 2380 return EXCEPTION_CONTINUE_EXECUTION; 2381 } 2382 } 2383 } 2384 2385 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2386 VM_Version::is_cpuinfo_segv_addr(pc)) { 2387 // Verify that OS save/restore AVX registers. 2388 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2389 } 2390 2391 if (t != NULL && t->is_Java_thread()) { 2392 JavaThread* thread = (JavaThread*) t; 2393 bool in_java = thread->thread_state() == _thread_in_Java; 2394 2395 // Handle potential stack overflows up front. 2396 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2397 if (thread->stack_guards_enabled()) { 2398 if (in_java) { 2399 frame fr; 2400 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2401 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2402 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2403 assert(fr.is_java_frame(), "Must be a Java frame"); 2404 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2405 } 2406 } 2407 // Yellow zone violation. The o/s has unprotected the first yellow 2408 // zone page for us. Note: must call disable_stack_yellow_zone to 2409 // update the enabled status, even if the zone contains only one page. 2410 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2411 thread->disable_stack_yellow_reserved_zone(); 2412 // If not in java code, return and hope for the best. 2413 return in_java 2414 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2415 : EXCEPTION_CONTINUE_EXECUTION; 2416 } else { 2417 // Fatal red zone violation. 2418 thread->disable_stack_red_zone(); 2419 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2420 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2421 exceptionInfo->ContextRecord); 2422 return EXCEPTION_CONTINUE_SEARCH; 2423 } 2424 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2425 // Either stack overflow or null pointer exception. 2426 if (in_java) { 2427 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2428 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2429 address stack_end = thread->stack_end(); 2430 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2431 // Stack overflow. 2432 assert(!os::uses_stack_guard_pages(), 2433 "should be caught by red zone code above."); 2434 return Handle_Exception(exceptionInfo, 2435 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2436 } 2437 // Check for safepoint polling and implicit null 2438 // We only expect null pointers in the stubs (vtable) 2439 // the rest are checked explicitly now. 2440 CodeBlob* cb = CodeCache::find_blob(pc); 2441 if (cb != NULL) { 2442 if (os::is_poll_address(addr)) { 2443 address stub = SharedRuntime::get_poll_stub(pc); 2444 return Handle_Exception(exceptionInfo, stub); 2445 } 2446 } 2447 { 2448 #ifdef _WIN64 2449 // If it's a legal stack address map the entire region in 2450 // 2451 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2452 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2453 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2454 addr = (address)((uintptr_t)addr & 2455 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2456 os::commit_memory((char *)addr, thread->stack_base() - addr, 2457 !ExecMem); 2458 return EXCEPTION_CONTINUE_EXECUTION; 2459 } else 2460 #endif 2461 { 2462 // Null pointer exception. 2463 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2464 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2465 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2466 } 2467 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2468 exceptionInfo->ContextRecord); 2469 return EXCEPTION_CONTINUE_SEARCH; 2470 } 2471 } 2472 } 2473 2474 #ifdef _WIN64 2475 // Special care for fast JNI field accessors. 2476 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2477 // in and the heap gets shrunk before the field access. 2478 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2479 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2480 if (addr != (address)-1) { 2481 return Handle_Exception(exceptionInfo, addr); 2482 } 2483 } 2484 #endif 2485 2486 // Stack overflow or null pointer exception in native code. 2487 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2488 exceptionInfo->ContextRecord); 2489 return EXCEPTION_CONTINUE_SEARCH; 2490 } // /EXCEPTION_ACCESS_VIOLATION 2491 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2492 2493 if (in_java) { 2494 switch (exception_code) { 2495 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2496 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2497 2498 case EXCEPTION_INT_OVERFLOW: 2499 return Handle_IDiv_Exception(exceptionInfo); 2500 2501 } // switch 2502 } 2503 if (((thread->thread_state() == _thread_in_Java) || 2504 (thread->thread_state() == _thread_in_native)) && 2505 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2506 LONG result=Handle_FLT_Exception(exceptionInfo); 2507 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2508 } 2509 } 2510 2511 if (exception_code != EXCEPTION_BREAKPOINT) { 2512 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2513 exceptionInfo->ContextRecord); 2514 } 2515 return EXCEPTION_CONTINUE_SEARCH; 2516 } 2517 2518 #ifndef _WIN64 2519 // Special care for fast JNI accessors. 2520 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2521 // the heap gets shrunk before the field access. 2522 // Need to install our own structured exception handler since native code may 2523 // install its own. 2524 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2525 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2526 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2527 address pc = (address) exceptionInfo->ContextRecord->Eip; 2528 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2529 if (addr != (address)-1) { 2530 return Handle_Exception(exceptionInfo, addr); 2531 } 2532 } 2533 return EXCEPTION_CONTINUE_SEARCH; 2534 } 2535 2536 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2537 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2538 jobject obj, \ 2539 jfieldID fieldID) { \ 2540 __try { \ 2541 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2542 obj, \ 2543 fieldID); \ 2544 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2545 _exception_info())) { \ 2546 } \ 2547 return 0; \ 2548 } 2549 2550 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2551 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2552 DEFINE_FAST_GETFIELD(jchar, char, Char) 2553 DEFINE_FAST_GETFIELD(jshort, short, Short) 2554 DEFINE_FAST_GETFIELD(jint, int, Int) 2555 DEFINE_FAST_GETFIELD(jlong, long, Long) 2556 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2557 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2558 2559 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2560 switch (type) { 2561 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2562 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2563 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2564 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2565 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2566 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2567 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2568 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2569 default: ShouldNotReachHere(); 2570 } 2571 return (address)-1; 2572 } 2573 #endif 2574 2575 // Virtual Memory 2576 2577 int os::vm_page_size() { return os::win32::vm_page_size(); } 2578 int os::vm_allocation_granularity() { 2579 return os::win32::vm_allocation_granularity(); 2580 } 2581 2582 // Windows large page support is available on Windows 2003. In order to use 2583 // large page memory, the administrator must first assign additional privilege 2584 // to the user: 2585 // + select Control Panel -> Administrative Tools -> Local Security Policy 2586 // + select Local Policies -> User Rights Assignment 2587 // + double click "Lock pages in memory", add users and/or groups 2588 // + reboot 2589 // Note the above steps are needed for administrator as well, as administrators 2590 // by default do not have the privilege to lock pages in memory. 2591 // 2592 // Note about Windows 2003: although the API supports committing large page 2593 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2594 // scenario, I found through experiment it only uses large page if the entire 2595 // memory region is reserved and committed in a single VirtualAlloc() call. 2596 // This makes Windows large page support more or less like Solaris ISM, in 2597 // that the entire heap must be committed upfront. This probably will change 2598 // in the future, if so the code below needs to be revisited. 2599 2600 #ifndef MEM_LARGE_PAGES 2601 #define MEM_LARGE_PAGES 0x20000000 2602 #endif 2603 2604 static HANDLE _hProcess; 2605 static HANDLE _hToken; 2606 2607 // Container for NUMA node list info 2608 class NUMANodeListHolder { 2609 private: 2610 int *_numa_used_node_list; // allocated below 2611 int _numa_used_node_count; 2612 2613 void free_node_list() { 2614 if (_numa_used_node_list != NULL) { 2615 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2616 } 2617 } 2618 2619 public: 2620 NUMANodeListHolder() { 2621 _numa_used_node_count = 0; 2622 _numa_used_node_list = NULL; 2623 // do rest of initialization in build routine (after function pointers are set up) 2624 } 2625 2626 ~NUMANodeListHolder() { 2627 free_node_list(); 2628 } 2629 2630 bool build() { 2631 DWORD_PTR proc_aff_mask; 2632 DWORD_PTR sys_aff_mask; 2633 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2634 ULONG highest_node_number; 2635 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2636 free_node_list(); 2637 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2638 for (unsigned int i = 0; i <= highest_node_number; i++) { 2639 ULONGLONG proc_mask_numa_node; 2640 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2641 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2642 _numa_used_node_list[_numa_used_node_count++] = i; 2643 } 2644 } 2645 return (_numa_used_node_count > 1); 2646 } 2647 2648 int get_count() { return _numa_used_node_count; } 2649 int get_node_list_entry(int n) { 2650 // for indexes out of range, returns -1 2651 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2652 } 2653 2654 } numa_node_list_holder; 2655 2656 2657 2658 static size_t _large_page_size = 0; 2659 2660 static bool request_lock_memory_privilege() { 2661 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2662 os::current_process_id()); 2663 2664 LUID luid; 2665 if (_hProcess != NULL && 2666 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2667 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2668 2669 TOKEN_PRIVILEGES tp; 2670 tp.PrivilegeCount = 1; 2671 tp.Privileges[0].Luid = luid; 2672 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2673 2674 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2675 // privilege. Check GetLastError() too. See MSDN document. 2676 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2677 (GetLastError() == ERROR_SUCCESS)) { 2678 return true; 2679 } 2680 } 2681 2682 return false; 2683 } 2684 2685 static void cleanup_after_large_page_init() { 2686 if (_hProcess) CloseHandle(_hProcess); 2687 _hProcess = NULL; 2688 if (_hToken) CloseHandle(_hToken); 2689 _hToken = NULL; 2690 } 2691 2692 static bool numa_interleaving_init() { 2693 bool success = false; 2694 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2695 2696 // print a warning if UseNUMAInterleaving flag is specified on command line 2697 bool warn_on_failure = use_numa_interleaving_specified; 2698 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2699 2700 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2701 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2702 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2703 2704 if (numa_node_list_holder.build()) { 2705 if (log_is_enabled(Debug, os, cpu)) { 2706 Log(os, cpu) log; 2707 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2708 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2709 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2710 } 2711 } 2712 success = true; 2713 } else { 2714 WARN("Process does not cover multiple NUMA nodes."); 2715 } 2716 if (!success) { 2717 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2718 } 2719 return success; 2720 #undef WARN 2721 } 2722 2723 // this routine is used whenever we need to reserve a contiguous VA range 2724 // but we need to make separate VirtualAlloc calls for each piece of the range 2725 // Reasons for doing this: 2726 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2727 // * UseNUMAInterleaving requires a separate node for each piece 2728 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2729 DWORD prot, 2730 bool should_inject_error = false) { 2731 char * p_buf; 2732 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2733 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2734 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2735 2736 // first reserve enough address space in advance since we want to be 2737 // able to break a single contiguous virtual address range into multiple 2738 // large page commits but WS2003 does not allow reserving large page space 2739 // so we just use 4K pages for reserve, this gives us a legal contiguous 2740 // address space. then we will deallocate that reservation, and re alloc 2741 // using large pages 2742 const size_t size_of_reserve = bytes + chunk_size; 2743 if (bytes > size_of_reserve) { 2744 // Overflowed. 2745 return NULL; 2746 } 2747 p_buf = (char *) VirtualAlloc(addr, 2748 size_of_reserve, // size of Reserve 2749 MEM_RESERVE, 2750 PAGE_READWRITE); 2751 // If reservation failed, return NULL 2752 if (p_buf == NULL) return NULL; 2753 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2754 os::release_memory(p_buf, bytes + chunk_size); 2755 2756 // we still need to round up to a page boundary (in case we are using large pages) 2757 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2758 // instead we handle this in the bytes_to_rq computation below 2759 p_buf = align_up(p_buf, page_size); 2760 2761 // now go through and allocate one chunk at a time until all bytes are 2762 // allocated 2763 size_t bytes_remaining = bytes; 2764 // An overflow of align_up() would have been caught above 2765 // in the calculation of size_of_reserve. 2766 char * next_alloc_addr = p_buf; 2767 HANDLE hProc = GetCurrentProcess(); 2768 2769 #ifdef ASSERT 2770 // Variable for the failure injection 2771 int ran_num = os::random(); 2772 size_t fail_after = ran_num % bytes; 2773 #endif 2774 2775 int count=0; 2776 while (bytes_remaining) { 2777 // select bytes_to_rq to get to the next chunk_size boundary 2778 2779 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2780 // Note allocate and commit 2781 char * p_new; 2782 2783 #ifdef ASSERT 2784 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2785 #else 2786 const bool inject_error_now = false; 2787 #endif 2788 2789 if (inject_error_now) { 2790 p_new = NULL; 2791 } else { 2792 if (!UseNUMAInterleaving) { 2793 p_new = (char *) VirtualAlloc(next_alloc_addr, 2794 bytes_to_rq, 2795 flags, 2796 prot); 2797 } else { 2798 // get the next node to use from the used_node_list 2799 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2800 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2801 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2802 } 2803 } 2804 2805 if (p_new == NULL) { 2806 // Free any allocated pages 2807 if (next_alloc_addr > p_buf) { 2808 // Some memory was committed so release it. 2809 size_t bytes_to_release = bytes - bytes_remaining; 2810 // NMT has yet to record any individual blocks, so it 2811 // need to create a dummy 'reserve' record to match 2812 // the release. 2813 MemTracker::record_virtual_memory_reserve((address)p_buf, 2814 bytes_to_release, CALLER_PC); 2815 os::release_memory(p_buf, bytes_to_release); 2816 } 2817 #ifdef ASSERT 2818 if (should_inject_error) { 2819 log_develop_debug(pagesize)("Reserving pages individually failed."); 2820 } 2821 #endif 2822 return NULL; 2823 } 2824 2825 bytes_remaining -= bytes_to_rq; 2826 next_alloc_addr += bytes_to_rq; 2827 count++; 2828 } 2829 // Although the memory is allocated individually, it is returned as one. 2830 // NMT records it as one block. 2831 if ((flags & MEM_COMMIT) != 0) { 2832 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2833 } else { 2834 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2835 } 2836 2837 // made it this far, success 2838 return p_buf; 2839 } 2840 2841 2842 2843 void os::large_page_init() { 2844 if (!UseLargePages) return; 2845 2846 // print a warning if any large page related flag is specified on command line 2847 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2848 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2849 bool success = false; 2850 2851 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2852 if (request_lock_memory_privilege()) { 2853 size_t s = GetLargePageMinimum(); 2854 if (s) { 2855 #if defined(IA32) || defined(AMD64) 2856 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2857 WARN("JVM cannot use large pages bigger than 4mb."); 2858 } else { 2859 #endif 2860 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2861 _large_page_size = LargePageSizeInBytes; 2862 } else { 2863 _large_page_size = s; 2864 } 2865 success = true; 2866 #if defined(IA32) || defined(AMD64) 2867 } 2868 #endif 2869 } else { 2870 WARN("Large page is not supported by the processor."); 2871 } 2872 } else { 2873 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2874 } 2875 #undef WARN 2876 2877 const size_t default_page_size = (size_t) vm_page_size(); 2878 if (success && _large_page_size > default_page_size) { 2879 _page_sizes[0] = _large_page_size; 2880 _page_sizes[1] = default_page_size; 2881 _page_sizes[2] = 0; 2882 } 2883 2884 cleanup_after_large_page_init(); 2885 UseLargePages = success; 2886 } 2887 2888 // On win32, one cannot release just a part of reserved memory, it's an 2889 // all or nothing deal. When we split a reservation, we must break the 2890 // reservation into two reservations. 2891 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2892 bool realloc) { 2893 if (size > 0) { 2894 release_memory(base, size); 2895 if (realloc) { 2896 reserve_memory(split, base); 2897 } 2898 if (size != split) { 2899 reserve_memory(size - split, base + split); 2900 } 2901 } 2902 } 2903 2904 // Multiple threads can race in this code but it's not possible to unmap small sections of 2905 // virtual space to get requested alignment, like posix-like os's. 2906 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2907 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 2908 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 2909 "Alignment must be a multiple of allocation granularity (page size)"); 2910 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 2911 2912 size_t extra_size = size + alignment; 2913 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 2914 2915 char* aligned_base = NULL; 2916 2917 do { 2918 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 2919 if (extra_base == NULL) { 2920 return NULL; 2921 } 2922 // Do manual alignment 2923 aligned_base = align_up(extra_base, alignment); 2924 2925 os::release_memory(extra_base, extra_size); 2926 2927 aligned_base = os::reserve_memory(size, aligned_base); 2928 2929 } while (aligned_base == NULL); 2930 2931 return aligned_base; 2932 } 2933 2934 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 2935 assert((size_t)addr % os::vm_allocation_granularity() == 0, 2936 "reserve alignment"); 2937 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 2938 char* res; 2939 // note that if UseLargePages is on, all the areas that require interleaving 2940 // will go thru reserve_memory_special rather than thru here. 2941 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 2942 if (!use_individual) { 2943 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 2944 } else { 2945 elapsedTimer reserveTimer; 2946 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 2947 // in numa interleaving, we have to allocate pages individually 2948 // (well really chunks of NUMAInterleaveGranularity size) 2949 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 2950 if (res == NULL) { 2951 warning("NUMA page allocation failed"); 2952 } 2953 if (Verbose && PrintMiscellaneous) { 2954 reserveTimer.stop(); 2955 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 2956 reserveTimer.milliseconds(), reserveTimer.ticks()); 2957 } 2958 } 2959 assert(res == NULL || addr == NULL || addr == res, 2960 "Unexpected address from reserve."); 2961 2962 return res; 2963 } 2964 2965 // Reserve memory at an arbitrary address, only if that area is 2966 // available (and not reserved for something else). 2967 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2968 // Windows os::reserve_memory() fails of the requested address range is 2969 // not avilable. 2970 return reserve_memory(bytes, requested_addr); 2971 } 2972 2973 size_t os::large_page_size() { 2974 return _large_page_size; 2975 } 2976 2977 bool os::can_commit_large_page_memory() { 2978 // Windows only uses large page memory when the entire region is reserved 2979 // and committed in a single VirtualAlloc() call. This may change in the 2980 // future, but with Windows 2003 it's not possible to commit on demand. 2981 return false; 2982 } 2983 2984 bool os::can_execute_large_page_memory() { 2985 return true; 2986 } 2987 2988 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 2989 bool exec) { 2990 assert(UseLargePages, "only for large pages"); 2991 2992 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 2993 return NULL; // Fallback to small pages. 2994 } 2995 2996 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 2997 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 2998 2999 // with large pages, there are two cases where we need to use Individual Allocation 3000 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3001 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3002 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3003 log_debug(pagesize)("Reserving large pages individually."); 3004 3005 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3006 if (p_buf == NULL) { 3007 // give an appropriate warning message 3008 if (UseNUMAInterleaving) { 3009 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3010 } 3011 if (UseLargePagesIndividualAllocation) { 3012 warning("Individually allocated large pages failed, " 3013 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3014 } 3015 return NULL; 3016 } 3017 3018 return p_buf; 3019 3020 } else { 3021 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3022 3023 // normal policy just allocate it all at once 3024 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3025 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3026 if (res != NULL) { 3027 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3028 } 3029 3030 return res; 3031 } 3032 } 3033 3034 bool os::release_memory_special(char* base, size_t bytes) { 3035 assert(base != NULL, "Sanity check"); 3036 return release_memory(base, bytes); 3037 } 3038 3039 void os::print_statistics() { 3040 } 3041 3042 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3043 int err = os::get_last_error(); 3044 char buf[256]; 3045 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3046 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3047 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3048 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3049 } 3050 3051 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3052 if (bytes == 0) { 3053 // Don't bother the OS with noops. 3054 return true; 3055 } 3056 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3057 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3058 // Don't attempt to print anything if the OS call fails. We're 3059 // probably low on resources, so the print itself may cause crashes. 3060 3061 // unless we have NUMAInterleaving enabled, the range of a commit 3062 // is always within a reserve covered by a single VirtualAlloc 3063 // in that case we can just do a single commit for the requested size 3064 if (!UseNUMAInterleaving) { 3065 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3066 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3067 return false; 3068 } 3069 if (exec) { 3070 DWORD oldprot; 3071 // Windows doc says to use VirtualProtect to get execute permissions 3072 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3073 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3074 return false; 3075 } 3076 } 3077 return true; 3078 } else { 3079 3080 // when NUMAInterleaving is enabled, the commit might cover a range that 3081 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3082 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3083 // returns represents the number of bytes that can be committed in one step. 3084 size_t bytes_remaining = bytes; 3085 char * next_alloc_addr = addr; 3086 while (bytes_remaining > 0) { 3087 MEMORY_BASIC_INFORMATION alloc_info; 3088 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3089 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3090 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3091 PAGE_READWRITE) == NULL) { 3092 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3093 exec);) 3094 return false; 3095 } 3096 if (exec) { 3097 DWORD oldprot; 3098 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3099 PAGE_EXECUTE_READWRITE, &oldprot)) { 3100 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3101 exec);) 3102 return false; 3103 } 3104 } 3105 bytes_remaining -= bytes_to_rq; 3106 next_alloc_addr += bytes_to_rq; 3107 } 3108 } 3109 // if we made it this far, return true 3110 return true; 3111 } 3112 3113 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3114 bool exec) { 3115 // alignment_hint is ignored on this OS 3116 return pd_commit_memory(addr, size, exec); 3117 } 3118 3119 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3120 const char* mesg) { 3121 assert(mesg != NULL, "mesg must be specified"); 3122 if (!pd_commit_memory(addr, size, exec)) { 3123 warn_fail_commit_memory(addr, size, exec); 3124 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3125 } 3126 } 3127 3128 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3129 size_t alignment_hint, bool exec, 3130 const char* mesg) { 3131 // alignment_hint is ignored on this OS 3132 pd_commit_memory_or_exit(addr, size, exec, mesg); 3133 } 3134 3135 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3136 if (bytes == 0) { 3137 // Don't bother the OS with noops. 3138 return true; 3139 } 3140 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3141 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3142 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3143 } 3144 3145 bool os::pd_release_memory(char* addr, size_t bytes) { 3146 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3147 } 3148 3149 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3150 return os::commit_memory(addr, size, !ExecMem); 3151 } 3152 3153 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3154 return os::uncommit_memory(addr, size); 3155 } 3156 3157 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3158 uint count = 0; 3159 bool ret = false; 3160 size_t bytes_remaining = bytes; 3161 char * next_protect_addr = addr; 3162 3163 // Use VirtualQuery() to get the chunk size. 3164 while (bytes_remaining) { 3165 MEMORY_BASIC_INFORMATION alloc_info; 3166 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3167 return false; 3168 } 3169 3170 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3171 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3172 // but we don't distinguish here as both cases are protected by same API. 3173 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3174 warning("Failed protecting pages individually for chunk #%u", count); 3175 if (!ret) { 3176 return false; 3177 } 3178 3179 bytes_remaining -= bytes_to_protect; 3180 next_protect_addr += bytes_to_protect; 3181 count++; 3182 } 3183 return ret; 3184 } 3185 3186 // Set protections specified 3187 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3188 bool is_committed) { 3189 unsigned int p = 0; 3190 switch (prot) { 3191 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3192 case MEM_PROT_READ: p = PAGE_READONLY; break; 3193 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3194 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3195 default: 3196 ShouldNotReachHere(); 3197 } 3198 3199 DWORD old_status; 3200 3201 // Strange enough, but on Win32 one can change protection only for committed 3202 // memory, not a big deal anyway, as bytes less or equal than 64K 3203 if (!is_committed) { 3204 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3205 "cannot commit protection page"); 3206 } 3207 // One cannot use os::guard_memory() here, as on Win32 guard page 3208 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3209 // 3210 // Pages in the region become guard pages. Any attempt to access a guard page 3211 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3212 // the guard page status. Guard pages thus act as a one-time access alarm. 3213 bool ret; 3214 if (UseNUMAInterleaving) { 3215 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3216 // so we must protect the chunks individually. 3217 ret = protect_pages_individually(addr, bytes, p, &old_status); 3218 } else { 3219 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3220 } 3221 #ifdef ASSERT 3222 if (!ret) { 3223 int err = os::get_last_error(); 3224 char buf[256]; 3225 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3226 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3227 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3228 buf_len != 0 ? buf : "<no_error_string>", err); 3229 } 3230 #endif 3231 return ret; 3232 } 3233 3234 bool os::guard_memory(char* addr, size_t bytes) { 3235 DWORD old_status; 3236 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3237 } 3238 3239 bool os::unguard_memory(char* addr, size_t bytes) { 3240 DWORD old_status; 3241 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3242 } 3243 3244 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3245 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3246 void os::numa_make_global(char *addr, size_t bytes) { } 3247 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3248 bool os::numa_topology_changed() { return false; } 3249 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3250 int os::numa_get_group_id() { return 0; } 3251 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3252 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3253 // Provide an answer for UMA systems 3254 ids[0] = 0; 3255 return 1; 3256 } else { 3257 // check for size bigger than actual groups_num 3258 size = MIN2(size, numa_get_groups_num()); 3259 for (int i = 0; i < (int)size; i++) { 3260 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3261 } 3262 return size; 3263 } 3264 } 3265 3266 bool os::get_page_info(char *start, page_info* info) { 3267 return false; 3268 } 3269 3270 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3271 page_info* page_found) { 3272 return end; 3273 } 3274 3275 char* os::non_memory_address_word() { 3276 // Must never look like an address returned by reserve_memory, 3277 // even in its subfields (as defined by the CPU immediate fields, 3278 // if the CPU splits constants across multiple instructions). 3279 return (char*)-1; 3280 } 3281 3282 #define MAX_ERROR_COUNT 100 3283 #define SYS_THREAD_ERROR 0xffffffffUL 3284 3285 void os::pd_start_thread(Thread* thread) { 3286 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3287 // Returns previous suspend state: 3288 // 0: Thread was not suspended 3289 // 1: Thread is running now 3290 // >1: Thread is still suspended. 3291 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3292 } 3293 3294 class HighResolutionInterval : public CHeapObj<mtThread> { 3295 // The default timer resolution seems to be 10 milliseconds. 3296 // (Where is this written down?) 3297 // If someone wants to sleep for only a fraction of the default, 3298 // then we set the timer resolution down to 1 millisecond for 3299 // the duration of their interval. 3300 // We carefully set the resolution back, since otherwise we 3301 // seem to incur an overhead (3%?) that we don't need. 3302 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3303 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3304 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3305 // timeBeginPeriod() if the relative error exceeded some threshold. 3306 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3307 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3308 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3309 // resolution timers running. 3310 private: 3311 jlong resolution; 3312 public: 3313 HighResolutionInterval(jlong ms) { 3314 resolution = ms % 10L; 3315 if (resolution != 0) { 3316 MMRESULT result = timeBeginPeriod(1L); 3317 } 3318 } 3319 ~HighResolutionInterval() { 3320 if (resolution != 0) { 3321 MMRESULT result = timeEndPeriod(1L); 3322 } 3323 resolution = 0L; 3324 } 3325 }; 3326 3327 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3328 jlong limit = (jlong) MAXDWORD; 3329 3330 while (ms > limit) { 3331 int res; 3332 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3333 return res; 3334 } 3335 ms -= limit; 3336 } 3337 3338 assert(thread == Thread::current(), "thread consistency check"); 3339 OSThread* osthread = thread->osthread(); 3340 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3341 int result; 3342 if (interruptable) { 3343 assert(thread->is_Java_thread(), "must be java thread"); 3344 JavaThread *jt = (JavaThread *) thread; 3345 ThreadBlockInVM tbivm(jt); 3346 3347 jt->set_suspend_equivalent(); 3348 // cleared by handle_special_suspend_equivalent_condition() or 3349 // java_suspend_self() via check_and_wait_while_suspended() 3350 3351 HANDLE events[1]; 3352 events[0] = osthread->interrupt_event(); 3353 HighResolutionInterval *phri=NULL; 3354 if (!ForceTimeHighResolution) { 3355 phri = new HighResolutionInterval(ms); 3356 } 3357 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3358 result = OS_TIMEOUT; 3359 } else { 3360 ResetEvent(osthread->interrupt_event()); 3361 osthread->set_interrupted(false); 3362 result = OS_INTRPT; 3363 } 3364 delete phri; //if it is NULL, harmless 3365 3366 // were we externally suspended while we were waiting? 3367 jt->check_and_wait_while_suspended(); 3368 } else { 3369 assert(!thread->is_Java_thread(), "must not be java thread"); 3370 Sleep((long) ms); 3371 result = OS_TIMEOUT; 3372 } 3373 return result; 3374 } 3375 3376 // Short sleep, direct OS call. 3377 // 3378 // ms = 0, means allow others (if any) to run. 3379 // 3380 void os::naked_short_sleep(jlong ms) { 3381 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3382 Sleep(ms); 3383 } 3384 3385 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3386 void os::infinite_sleep() { 3387 while (true) { // sleep forever ... 3388 Sleep(100000); // ... 100 seconds at a time 3389 } 3390 } 3391 3392 typedef BOOL (WINAPI * STTSignature)(void); 3393 3394 void os::naked_yield() { 3395 // Consider passing back the return value from SwitchToThread(). 3396 SwitchToThread(); 3397 } 3398 3399 // Win32 only gives you access to seven real priorities at a time, 3400 // so we compress Java's ten down to seven. It would be better 3401 // if we dynamically adjusted relative priorities. 3402 3403 int os::java_to_os_priority[CriticalPriority + 1] = { 3404 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3405 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3406 THREAD_PRIORITY_LOWEST, // 2 3407 THREAD_PRIORITY_BELOW_NORMAL, // 3 3408 THREAD_PRIORITY_BELOW_NORMAL, // 4 3409 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3410 THREAD_PRIORITY_NORMAL, // 6 3411 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3412 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3413 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3414 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3415 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3416 }; 3417 3418 int prio_policy1[CriticalPriority + 1] = { 3419 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3420 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3421 THREAD_PRIORITY_LOWEST, // 2 3422 THREAD_PRIORITY_BELOW_NORMAL, // 3 3423 THREAD_PRIORITY_BELOW_NORMAL, // 4 3424 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3425 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3426 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3427 THREAD_PRIORITY_HIGHEST, // 8 3428 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3429 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3430 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3431 }; 3432 3433 static int prio_init() { 3434 // If ThreadPriorityPolicy is 1, switch tables 3435 if (ThreadPriorityPolicy == 1) { 3436 int i; 3437 for (i = 0; i < CriticalPriority + 1; i++) { 3438 os::java_to_os_priority[i] = prio_policy1[i]; 3439 } 3440 } 3441 if (UseCriticalJavaThreadPriority) { 3442 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3443 } 3444 return 0; 3445 } 3446 3447 OSReturn os::set_native_priority(Thread* thread, int priority) { 3448 if (!UseThreadPriorities) return OS_OK; 3449 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3450 return ret ? OS_OK : OS_ERR; 3451 } 3452 3453 OSReturn os::get_native_priority(const Thread* const thread, 3454 int* priority_ptr) { 3455 if (!UseThreadPriorities) { 3456 *priority_ptr = java_to_os_priority[NormPriority]; 3457 return OS_OK; 3458 } 3459 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3460 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3461 assert(false, "GetThreadPriority failed"); 3462 return OS_ERR; 3463 } 3464 *priority_ptr = os_prio; 3465 return OS_OK; 3466 } 3467 3468 3469 // Hint to the underlying OS that a task switch would not be good. 3470 // Void return because it's a hint and can fail. 3471 void os::hint_no_preempt() {} 3472 3473 void os::interrupt(Thread* thread) { 3474 assert(!thread->is_Java_thread() || Thread::current() == thread || 3475 Threads_lock->owned_by_self(), 3476 "possibility of dangling Thread pointer"); 3477 3478 OSThread* osthread = thread->osthread(); 3479 osthread->set_interrupted(true); 3480 // More than one thread can get here with the same value of osthread, 3481 // resulting in multiple notifications. We do, however, want the store 3482 // to interrupted() to be visible to other threads before we post 3483 // the interrupt event. 3484 OrderAccess::release(); 3485 SetEvent(osthread->interrupt_event()); 3486 // For JSR166: unpark after setting status 3487 if (thread->is_Java_thread()) { 3488 ((JavaThread*)thread)->parker()->unpark(); 3489 } 3490 3491 ParkEvent * ev = thread->_ParkEvent; 3492 if (ev != NULL) ev->unpark(); 3493 } 3494 3495 3496 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3497 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3498 "possibility of dangling Thread pointer"); 3499 3500 OSThread* osthread = thread->osthread(); 3501 // There is no synchronization between the setting of the interrupt 3502 // and it being cleared here. It is critical - see 6535709 - that 3503 // we only clear the interrupt state, and reset the interrupt event, 3504 // if we are going to report that we were indeed interrupted - else 3505 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3506 // depending on the timing. By checking thread interrupt event to see 3507 // if the thread gets real interrupt thus prevent spurious wakeup. 3508 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3509 if (interrupted && clear_interrupted) { 3510 osthread->set_interrupted(false); 3511 ResetEvent(osthread->interrupt_event()); 3512 } // Otherwise leave the interrupted state alone 3513 3514 return interrupted; 3515 } 3516 3517 // GetCurrentThreadId() returns DWORD 3518 intx os::current_thread_id() { return GetCurrentThreadId(); } 3519 3520 static int _initial_pid = 0; 3521 3522 int os::current_process_id() { 3523 return (_initial_pid ? _initial_pid : _getpid()); 3524 } 3525 3526 int os::win32::_vm_page_size = 0; 3527 int os::win32::_vm_allocation_granularity = 0; 3528 int os::win32::_processor_type = 0; 3529 // Processor level is not available on non-NT systems, use vm_version instead 3530 int os::win32::_processor_level = 0; 3531 julong os::win32::_physical_memory = 0; 3532 size_t os::win32::_default_stack_size = 0; 3533 3534 intx os::win32::_os_thread_limit = 0; 3535 volatile intx os::win32::_os_thread_count = 0; 3536 3537 bool os::win32::_is_windows_server = false; 3538 3539 // 6573254 3540 // Currently, the bug is observed across all the supported Windows releases, 3541 // including the latest one (as of this writing - Windows Server 2012 R2) 3542 bool os::win32::_has_exit_bug = true; 3543 3544 void os::win32::initialize_system_info() { 3545 SYSTEM_INFO si; 3546 GetSystemInfo(&si); 3547 _vm_page_size = si.dwPageSize; 3548 _vm_allocation_granularity = si.dwAllocationGranularity; 3549 _processor_type = si.dwProcessorType; 3550 _processor_level = si.wProcessorLevel; 3551 set_processor_count(si.dwNumberOfProcessors); 3552 3553 MEMORYSTATUSEX ms; 3554 ms.dwLength = sizeof(ms); 3555 3556 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3557 // dwMemoryLoad (% of memory in use) 3558 GlobalMemoryStatusEx(&ms); 3559 _physical_memory = ms.ullTotalPhys; 3560 3561 if (FLAG_IS_DEFAULT(MaxRAM)) { 3562 // Adjust MaxRAM according to the maximum virtual address space available. 3563 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3564 } 3565 3566 OSVERSIONINFOEX oi; 3567 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3568 GetVersionEx((OSVERSIONINFO*)&oi); 3569 switch (oi.dwPlatformId) { 3570 case VER_PLATFORM_WIN32_NT: 3571 { 3572 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3573 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3574 oi.wProductType == VER_NT_SERVER) { 3575 _is_windows_server = true; 3576 } 3577 } 3578 break; 3579 default: fatal("Unknown platform"); 3580 } 3581 3582 _default_stack_size = os::current_stack_size(); 3583 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3584 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3585 "stack size not a multiple of page size"); 3586 3587 initialize_performance_counter(); 3588 } 3589 3590 3591 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3592 int ebuflen) { 3593 char path[MAX_PATH]; 3594 DWORD size; 3595 DWORD pathLen = (DWORD)sizeof(path); 3596 HINSTANCE result = NULL; 3597 3598 // only allow library name without path component 3599 assert(strchr(name, '\\') == NULL, "path not allowed"); 3600 assert(strchr(name, ':') == NULL, "path not allowed"); 3601 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3602 jio_snprintf(ebuf, ebuflen, 3603 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3604 return NULL; 3605 } 3606 3607 // search system directory 3608 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3609 if (size >= pathLen) { 3610 return NULL; // truncated 3611 } 3612 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3613 return NULL; // truncated 3614 } 3615 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3616 return result; 3617 } 3618 } 3619 3620 // try Windows directory 3621 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3622 if (size >= pathLen) { 3623 return NULL; // truncated 3624 } 3625 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3626 return NULL; // truncated 3627 } 3628 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3629 return result; 3630 } 3631 } 3632 3633 jio_snprintf(ebuf, ebuflen, 3634 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3635 return NULL; 3636 } 3637 3638 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3639 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3640 3641 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3642 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3643 return TRUE; 3644 } 3645 3646 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3647 // Basic approach: 3648 // - Each exiting thread registers its intent to exit and then does so. 3649 // - A thread trying to terminate the process must wait for all 3650 // threads currently exiting to complete their exit. 3651 3652 if (os::win32::has_exit_bug()) { 3653 // The array holds handles of the threads that have started exiting by calling 3654 // _endthreadex(). 3655 // Should be large enough to avoid blocking the exiting thread due to lack of 3656 // a free slot. 3657 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3658 static int handle_count = 0; 3659 3660 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3661 static CRITICAL_SECTION crit_sect; 3662 static volatile jint process_exiting = 0; 3663 int i, j; 3664 DWORD res; 3665 HANDLE hproc, hthr; 3666 3667 // We only attempt to register threads until a process exiting 3668 // thread manages to set the process_exiting flag. Any threads 3669 // that come through here after the process_exiting flag is set 3670 // are unregistered and will be caught in the SuspendThread() 3671 // infinite loop below. 3672 bool registered = false; 3673 3674 // The first thread that reached this point, initializes the critical section. 3675 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3676 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3677 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3678 if (what != EPT_THREAD) { 3679 // Atomically set process_exiting before the critical section 3680 // to increase the visibility between racing threads. 3681 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3682 } 3683 EnterCriticalSection(&crit_sect); 3684 3685 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3686 // Remove from the array those handles of the threads that have completed exiting. 3687 for (i = 0, j = 0; i < handle_count; ++i) { 3688 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3689 if (res == WAIT_TIMEOUT) { 3690 handles[j++] = handles[i]; 3691 } else { 3692 if (res == WAIT_FAILED) { 3693 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3694 GetLastError(), __FILE__, __LINE__); 3695 } 3696 // Don't keep the handle, if we failed waiting for it. 3697 CloseHandle(handles[i]); 3698 } 3699 } 3700 3701 // If there's no free slot in the array of the kept handles, we'll have to 3702 // wait until at least one thread completes exiting. 3703 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3704 // Raise the priority of the oldest exiting thread to increase its chances 3705 // to complete sooner. 3706 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3707 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3708 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3709 i = (res - WAIT_OBJECT_0); 3710 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3711 for (; i < handle_count; ++i) { 3712 handles[i] = handles[i + 1]; 3713 } 3714 } else { 3715 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3716 (res == WAIT_FAILED ? "failed" : "timed out"), 3717 GetLastError(), __FILE__, __LINE__); 3718 // Don't keep handles, if we failed waiting for them. 3719 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3720 CloseHandle(handles[i]); 3721 } 3722 handle_count = 0; 3723 } 3724 } 3725 3726 // Store a duplicate of the current thread handle in the array of handles. 3727 hproc = GetCurrentProcess(); 3728 hthr = GetCurrentThread(); 3729 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3730 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3731 warning("DuplicateHandle failed (%u) in %s: %d\n", 3732 GetLastError(), __FILE__, __LINE__); 3733 3734 // We can't register this thread (no more handles) so this thread 3735 // may be racing with a thread that is calling exit(). If the thread 3736 // that is calling exit() has managed to set the process_exiting 3737 // flag, then this thread will be caught in the SuspendThread() 3738 // infinite loop below which closes that race. A small timing 3739 // window remains before the process_exiting flag is set, but it 3740 // is only exposed when we are out of handles. 3741 } else { 3742 ++handle_count; 3743 registered = true; 3744 3745 // The current exiting thread has stored its handle in the array, and now 3746 // should leave the critical section before calling _endthreadex(). 3747 } 3748 3749 } else if (what != EPT_THREAD && handle_count > 0) { 3750 jlong start_time, finish_time, timeout_left; 3751 // Before ending the process, make sure all the threads that had called 3752 // _endthreadex() completed. 3753 3754 // Set the priority level of the current thread to the same value as 3755 // the priority level of exiting threads. 3756 // This is to ensure it will be given a fair chance to execute if 3757 // the timeout expires. 3758 hthr = GetCurrentThread(); 3759 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3760 start_time = os::javaTimeNanos(); 3761 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3762 for (i = 0; ; ) { 3763 int portion_count = handle_count - i; 3764 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3765 portion_count = MAXIMUM_WAIT_OBJECTS; 3766 } 3767 for (j = 0; j < portion_count; ++j) { 3768 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3769 } 3770 timeout_left = (finish_time - start_time) / 1000000L; 3771 if (timeout_left < 0) { 3772 timeout_left = 0; 3773 } 3774 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3775 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3776 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3777 (res == WAIT_FAILED ? "failed" : "timed out"), 3778 GetLastError(), __FILE__, __LINE__); 3779 // Reset portion_count so we close the remaining 3780 // handles due to this error. 3781 portion_count = handle_count - i; 3782 } 3783 for (j = 0; j < portion_count; ++j) { 3784 CloseHandle(handles[i + j]); 3785 } 3786 if ((i += portion_count) >= handle_count) { 3787 break; 3788 } 3789 start_time = os::javaTimeNanos(); 3790 } 3791 handle_count = 0; 3792 } 3793 3794 LeaveCriticalSection(&crit_sect); 3795 } 3796 3797 if (!registered && 3798 OrderAccess::load_acquire(&process_exiting) != 0 && 3799 process_exiting != (jint)GetCurrentThreadId()) { 3800 // Some other thread is about to call exit(), so we don't let 3801 // the current unregistered thread proceed to exit() or _endthreadex() 3802 while (true) { 3803 SuspendThread(GetCurrentThread()); 3804 // Avoid busy-wait loop, if SuspendThread() failed. 3805 Sleep(EXIT_TIMEOUT); 3806 } 3807 } 3808 } 3809 3810 // We are here if either 3811 // - there's no 'race at exit' bug on this OS release; 3812 // - initialization of the critical section failed (unlikely); 3813 // - the current thread has registered itself and left the critical section; 3814 // - the process-exiting thread has raised the flag and left the critical section. 3815 if (what == EPT_THREAD) { 3816 _endthreadex((unsigned)exit_code); 3817 } else if (what == EPT_PROCESS) { 3818 ::exit(exit_code); 3819 } else { 3820 _exit(exit_code); 3821 } 3822 3823 // Should not reach here 3824 return exit_code; 3825 } 3826 3827 #undef EXIT_TIMEOUT 3828 3829 void os::win32::setmode_streams() { 3830 _setmode(_fileno(stdin), _O_BINARY); 3831 _setmode(_fileno(stdout), _O_BINARY); 3832 _setmode(_fileno(stderr), _O_BINARY); 3833 } 3834 3835 3836 bool os::is_debugger_attached() { 3837 return IsDebuggerPresent() ? true : false; 3838 } 3839 3840 3841 void os::wait_for_keypress_at_exit(void) { 3842 if (PauseAtExit) { 3843 fprintf(stderr, "Press any key to continue...\n"); 3844 fgetc(stdin); 3845 } 3846 } 3847 3848 3849 bool os::message_box(const char* title, const char* message) { 3850 int result = MessageBox(NULL, message, title, 3851 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3852 return result == IDYES; 3853 } 3854 3855 #ifndef PRODUCT 3856 #ifndef _WIN64 3857 // Helpers to check whether NX protection is enabled 3858 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3859 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3860 pex->ExceptionRecord->NumberParameters > 0 && 3861 pex->ExceptionRecord->ExceptionInformation[0] == 3862 EXCEPTION_INFO_EXEC_VIOLATION) { 3863 return EXCEPTION_EXECUTE_HANDLER; 3864 } 3865 return EXCEPTION_CONTINUE_SEARCH; 3866 } 3867 3868 void nx_check_protection() { 3869 // If NX is enabled we'll get an exception calling into code on the stack 3870 char code[] = { (char)0xC3 }; // ret 3871 void *code_ptr = (void *)code; 3872 __try { 3873 __asm call code_ptr 3874 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3875 tty->print_raw_cr("NX protection detected."); 3876 } 3877 } 3878 #endif // _WIN64 3879 #endif // PRODUCT 3880 3881 // This is called _before_ the global arguments have been parsed 3882 void os::init(void) { 3883 _initial_pid = _getpid(); 3884 3885 init_random(1234567); 3886 3887 win32::initialize_system_info(); 3888 win32::setmode_streams(); 3889 init_page_sizes((size_t) win32::vm_page_size()); 3890 3891 // This may be overridden later when argument processing is done. 3892 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3893 3894 // Initialize main_process and main_thread 3895 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3896 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3897 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3898 fatal("DuplicateHandle failed\n"); 3899 } 3900 main_thread_id = (int) GetCurrentThreadId(); 3901 3902 // initialize fast thread access - only used for 32-bit 3903 win32::initialize_thread_ptr_offset(); 3904 } 3905 3906 // To install functions for atexit processing 3907 extern "C" { 3908 static void perfMemory_exit_helper() { 3909 perfMemory_exit(); 3910 } 3911 } 3912 3913 static jint initSock(); 3914 3915 // this is called _after_ the global arguments have been parsed 3916 jint os::init_2(void) { 3917 // Allocate a single page and mark it as readable for safepoint polling 3918 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3919 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3920 3921 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3922 guarantee(return_page != NULL, "Commit Failed for polling page"); 3923 3924 os::set_polling_page(polling_page); 3925 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page)); 3926 3927 if (!UseMembar) { 3928 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3929 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3930 3931 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3932 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3933 3934 os::set_memory_serialize_page(mem_serialize_page); 3935 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); 3936 } 3937 3938 // Setup Windows Exceptions 3939 3940 // for debugging float code generation bugs 3941 if (ForceFloatExceptions) { 3942 #ifndef _WIN64 3943 static long fp_control_word = 0; 3944 __asm { fstcw fp_control_word } 3945 // see Intel PPro Manual, Vol. 2, p 7-16 3946 const long precision = 0x20; 3947 const long underflow = 0x10; 3948 const long overflow = 0x08; 3949 const long zero_div = 0x04; 3950 const long denorm = 0x02; 3951 const long invalid = 0x01; 3952 fp_control_word |= invalid; 3953 __asm { fldcw fp_control_word } 3954 #endif 3955 } 3956 3957 // If stack_commit_size is 0, windows will reserve the default size, 3958 // but only commit a small portion of it. 3959 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 3960 size_t default_reserve_size = os::win32::default_stack_size(); 3961 size_t actual_reserve_size = stack_commit_size; 3962 if (stack_commit_size < default_reserve_size) { 3963 // If stack_commit_size == 0, we want this too 3964 actual_reserve_size = default_reserve_size; 3965 } 3966 3967 // Check minimum allowable stack size for thread creation and to initialize 3968 // the java system classes, including StackOverflowError - depends on page 3969 // size. Add two 4K pages for compiler2 recursion in main thread. 3970 // Add in 4*BytesPerWord 4K pages to account for VM stack during 3971 // class initialization depending on 32 or 64 bit VM. 3972 size_t min_stack_allowed = 3973 (size_t)(JavaThread::stack_guard_zone_size() + 3974 JavaThread::stack_shadow_zone_size() + 3975 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 3976 3977 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 3978 3979 if (actual_reserve_size < min_stack_allowed) { 3980 tty->print_cr("\nThe Java thread stack size specified is too small. " 3981 "Specify at least %dk", 3982 min_stack_allowed / K); 3983 return JNI_ERR; 3984 } 3985 3986 JavaThread::set_stack_size_at_create(stack_commit_size); 3987 3988 // Calculate theoretical max. size of Threads to guard gainst artifical 3989 // out-of-memory situations, where all available address-space has been 3990 // reserved by thread stacks. 3991 assert(actual_reserve_size != 0, "Must have a stack"); 3992 3993 // Calculate the thread limit when we should start doing Virtual Memory 3994 // banging. Currently when the threads will have used all but 200Mb of space. 3995 // 3996 // TODO: consider performing a similar calculation for commit size instead 3997 // as reserve size, since on a 64-bit platform we'll run into that more 3998 // often than running out of virtual memory space. We can use the 3999 // lower value of the two calculations as the os_thread_limit. 4000 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4001 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4002 4003 // at exit methods are called in the reverse order of their registration. 4004 // there is no limit to the number of functions registered. atexit does 4005 // not set errno. 4006 4007 if (PerfAllowAtExitRegistration) { 4008 // only register atexit functions if PerfAllowAtExitRegistration is set. 4009 // atexit functions can be delayed until process exit time, which 4010 // can be problematic for embedded VM situations. Embedded VMs should 4011 // call DestroyJavaVM() to assure that VM resources are released. 4012 4013 // note: perfMemory_exit_helper atexit function may be removed in 4014 // the future if the appropriate cleanup code can be added to the 4015 // VM_Exit VMOperation's doit method. 4016 if (atexit(perfMemory_exit_helper) != 0) { 4017 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4018 } 4019 } 4020 4021 #ifndef _WIN64 4022 // Print something if NX is enabled (win32 on AMD64) 4023 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4024 #endif 4025 4026 // initialize thread priority policy 4027 prio_init(); 4028 4029 if (UseNUMA && !ForceNUMA) { 4030 UseNUMA = false; // We don't fully support this yet 4031 } 4032 4033 if (UseNUMAInterleaving) { 4034 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4035 bool success = numa_interleaving_init(); 4036 if (!success) UseNUMAInterleaving = false; 4037 } 4038 4039 if (initSock() != JNI_OK) { 4040 return JNI_ERR; 4041 } 4042 4043 return JNI_OK; 4044 } 4045 4046 // Mark the polling page as unreadable 4047 void os::make_polling_page_unreadable(void) { 4048 DWORD old_status; 4049 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4050 PAGE_NOACCESS, &old_status)) { 4051 fatal("Could not disable polling page"); 4052 } 4053 } 4054 4055 // Mark the polling page as readable 4056 void os::make_polling_page_readable(void) { 4057 DWORD old_status; 4058 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4059 PAGE_READONLY, &old_status)) { 4060 fatal("Could not enable polling page"); 4061 } 4062 } 4063 4064 4065 int os::stat(const char *path, struct stat *sbuf) { 4066 char pathbuf[MAX_PATH]; 4067 if (strlen(path) > MAX_PATH - 1) { 4068 errno = ENAMETOOLONG; 4069 return -1; 4070 } 4071 os::native_path(strcpy(pathbuf, path)); 4072 int ret = ::stat(pathbuf, sbuf); 4073 if (sbuf != NULL && UseUTCFileTimestamp) { 4074 // Fix for 6539723. st_mtime returned from stat() is dependent on 4075 // the system timezone and so can return different values for the 4076 // same file if/when daylight savings time changes. This adjustment 4077 // makes sure the same timestamp is returned regardless of the TZ. 4078 // 4079 // See: 4080 // http://msdn.microsoft.com/library/ 4081 // default.asp?url=/library/en-us/sysinfo/base/ 4082 // time_zone_information_str.asp 4083 // and 4084 // http://msdn.microsoft.com/library/default.asp?url= 4085 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4086 // 4087 // NOTE: there is a insidious bug here: If the timezone is changed 4088 // after the call to stat() but before 'GetTimeZoneInformation()', then 4089 // the adjustment we do here will be wrong and we'll return the wrong 4090 // value (which will likely end up creating an invalid class data 4091 // archive). Absent a better API for this, or some time zone locking 4092 // mechanism, we'll have to live with this risk. 4093 TIME_ZONE_INFORMATION tz; 4094 DWORD tzid = GetTimeZoneInformation(&tz); 4095 int daylightBias = 4096 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4097 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4098 } 4099 return ret; 4100 } 4101 4102 4103 #define FT2INT64(ft) \ 4104 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4105 4106 4107 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4108 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4109 // of a thread. 4110 // 4111 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4112 // the fast estimate available on the platform. 4113 4114 // current_thread_cpu_time() is not optimized for Windows yet 4115 jlong os::current_thread_cpu_time() { 4116 // return user + sys since the cost is the same 4117 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4118 } 4119 4120 jlong os::thread_cpu_time(Thread* thread) { 4121 // consistent with what current_thread_cpu_time() returns. 4122 return os::thread_cpu_time(thread, true /* user+sys */); 4123 } 4124 4125 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4126 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4127 } 4128 4129 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4130 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4131 // If this function changes, os::is_thread_cpu_time_supported() should too 4132 FILETIME CreationTime; 4133 FILETIME ExitTime; 4134 FILETIME KernelTime; 4135 FILETIME UserTime; 4136 4137 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4138 &ExitTime, &KernelTime, &UserTime) == 0) { 4139 return -1; 4140 } else if (user_sys_cpu_time) { 4141 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4142 } else { 4143 return FT2INT64(UserTime) * 100; 4144 } 4145 } 4146 4147 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4148 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4149 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4150 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4151 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4152 } 4153 4154 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4155 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4156 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4157 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4158 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4159 } 4160 4161 bool os::is_thread_cpu_time_supported() { 4162 // see os::thread_cpu_time 4163 FILETIME CreationTime; 4164 FILETIME ExitTime; 4165 FILETIME KernelTime; 4166 FILETIME UserTime; 4167 4168 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4169 &KernelTime, &UserTime) == 0) { 4170 return false; 4171 } else { 4172 return true; 4173 } 4174 } 4175 4176 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4177 // It does have primitives (PDH API) to get CPU usage and run queue length. 4178 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4179 // If we wanted to implement loadavg on Windows, we have a few options: 4180 // 4181 // a) Query CPU usage and run queue length and "fake" an answer by 4182 // returning the CPU usage if it's under 100%, and the run queue 4183 // length otherwise. It turns out that querying is pretty slow 4184 // on Windows, on the order of 200 microseconds on a fast machine. 4185 // Note that on the Windows the CPU usage value is the % usage 4186 // since the last time the API was called (and the first call 4187 // returns 100%), so we'd have to deal with that as well. 4188 // 4189 // b) Sample the "fake" answer using a sampling thread and store 4190 // the answer in a global variable. The call to loadavg would 4191 // just return the value of the global, avoiding the slow query. 4192 // 4193 // c) Sample a better answer using exponential decay to smooth the 4194 // value. This is basically the algorithm used by UNIX kernels. 4195 // 4196 // Note that sampling thread starvation could affect both (b) and (c). 4197 int os::loadavg(double loadavg[], int nelem) { 4198 return -1; 4199 } 4200 4201 4202 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4203 bool os::dont_yield() { 4204 return DontYieldALot; 4205 } 4206 4207 // This method is a slightly reworked copy of JDK's sysOpen 4208 // from src/windows/hpi/src/sys_api_md.c 4209 4210 int os::open(const char *path, int oflag, int mode) { 4211 char pathbuf[MAX_PATH]; 4212 4213 if (strlen(path) > MAX_PATH - 1) { 4214 errno = ENAMETOOLONG; 4215 return -1; 4216 } 4217 os::native_path(strcpy(pathbuf, path)); 4218 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4219 } 4220 4221 FILE* os::open(int fd, const char* mode) { 4222 return ::_fdopen(fd, mode); 4223 } 4224 4225 // Is a (classpath) directory empty? 4226 bool os::dir_is_empty(const char* path) { 4227 WIN32_FIND_DATA fd; 4228 HANDLE f = FindFirstFile(path, &fd); 4229 if (f == INVALID_HANDLE_VALUE) { 4230 return true; 4231 } 4232 FindClose(f); 4233 return false; 4234 } 4235 4236 // create binary file, rewriting existing file if required 4237 int os::create_binary_file(const char* path, bool rewrite_existing) { 4238 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4239 if (!rewrite_existing) { 4240 oflags |= _O_EXCL; 4241 } 4242 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4243 } 4244 4245 // return current position of file pointer 4246 jlong os::current_file_offset(int fd) { 4247 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4248 } 4249 4250 // move file pointer to the specified offset 4251 jlong os::seek_to_file_offset(int fd, jlong offset) { 4252 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4253 } 4254 4255 4256 jlong os::lseek(int fd, jlong offset, int whence) { 4257 return (jlong) ::_lseeki64(fd, offset, whence); 4258 } 4259 4260 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4261 OVERLAPPED ov; 4262 DWORD nread; 4263 BOOL result; 4264 4265 ZeroMemory(&ov, sizeof(ov)); 4266 ov.Offset = (DWORD)offset; 4267 ov.OffsetHigh = (DWORD)(offset >> 32); 4268 4269 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4270 4271 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4272 4273 return result ? nread : 0; 4274 } 4275 4276 4277 // This method is a slightly reworked copy of JDK's sysNativePath 4278 // from src/windows/hpi/src/path_md.c 4279 4280 // Convert a pathname to native format. On win32, this involves forcing all 4281 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4282 // sometimes rejects '/') and removing redundant separators. The input path is 4283 // assumed to have been converted into the character encoding used by the local 4284 // system. Because this might be a double-byte encoding, care is taken to 4285 // treat double-byte lead characters correctly. 4286 // 4287 // This procedure modifies the given path in place, as the result is never 4288 // longer than the original. There is no error return; this operation always 4289 // succeeds. 4290 char * os::native_path(char *path) { 4291 char *src = path, *dst = path, *end = path; 4292 char *colon = NULL; // If a drive specifier is found, this will 4293 // point to the colon following the drive letter 4294 4295 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4296 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4297 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4298 4299 // Check for leading separators 4300 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4301 while (isfilesep(*src)) { 4302 src++; 4303 } 4304 4305 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4306 // Remove leading separators if followed by drive specifier. This 4307 // hack is necessary to support file URLs containing drive 4308 // specifiers (e.g., "file://c:/path"). As a side effect, 4309 // "/c:/path" can be used as an alternative to "c:/path". 4310 *dst++ = *src++; 4311 colon = dst; 4312 *dst++ = ':'; 4313 src++; 4314 } else { 4315 src = path; 4316 if (isfilesep(src[0]) && isfilesep(src[1])) { 4317 // UNC pathname: Retain first separator; leave src pointed at 4318 // second separator so that further separators will be collapsed 4319 // into the second separator. The result will be a pathname 4320 // beginning with "\\\\" followed (most likely) by a host name. 4321 src = dst = path + 1; 4322 path[0] = '\\'; // Force first separator to '\\' 4323 } 4324 } 4325 4326 end = dst; 4327 4328 // Remove redundant separators from remainder of path, forcing all 4329 // separators to be '\\' rather than '/'. Also, single byte space 4330 // characters are removed from the end of the path because those 4331 // are not legal ending characters on this operating system. 4332 // 4333 while (*src != '\0') { 4334 if (isfilesep(*src)) { 4335 *dst++ = '\\'; src++; 4336 while (isfilesep(*src)) src++; 4337 if (*src == '\0') { 4338 // Check for trailing separator 4339 end = dst; 4340 if (colon == dst - 2) break; // "z:\\" 4341 if (dst == path + 1) break; // "\\" 4342 if (dst == path + 2 && isfilesep(path[0])) { 4343 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4344 // beginning of a UNC pathname. Even though it is not, by 4345 // itself, a valid UNC pathname, we leave it as is in order 4346 // to be consistent with the path canonicalizer as well 4347 // as the win32 APIs, which treat this case as an invalid 4348 // UNC pathname rather than as an alias for the root 4349 // directory of the current drive. 4350 break; 4351 } 4352 end = --dst; // Path does not denote a root directory, so 4353 // remove trailing separator 4354 break; 4355 } 4356 end = dst; 4357 } else { 4358 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4359 *dst++ = *src++; 4360 if (*src) *dst++ = *src++; 4361 end = dst; 4362 } else { // Copy a single-byte character 4363 char c = *src++; 4364 *dst++ = c; 4365 // Space is not a legal ending character 4366 if (c != ' ') end = dst; 4367 } 4368 } 4369 } 4370 4371 *end = '\0'; 4372 4373 // For "z:", add "." to work around a bug in the C runtime library 4374 if (colon == dst - 1) { 4375 path[2] = '.'; 4376 path[3] = '\0'; 4377 } 4378 4379 return path; 4380 } 4381 4382 // This code is a copy of JDK's sysSetLength 4383 // from src/windows/hpi/src/sys_api_md.c 4384 4385 int os::ftruncate(int fd, jlong length) { 4386 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4387 long high = (long)(length >> 32); 4388 DWORD ret; 4389 4390 if (h == (HANDLE)(-1)) { 4391 return -1; 4392 } 4393 4394 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4395 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4396 return -1; 4397 } 4398 4399 if (::SetEndOfFile(h) == FALSE) { 4400 return -1; 4401 } 4402 4403 return 0; 4404 } 4405 4406 int os::get_fileno(FILE* fp) { 4407 return _fileno(fp); 4408 } 4409 4410 // This code is a copy of JDK's sysSync 4411 // from src/windows/hpi/src/sys_api_md.c 4412 // except for the legacy workaround for a bug in Win 98 4413 4414 int os::fsync(int fd) { 4415 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4416 4417 if ((!::FlushFileBuffers(handle)) && 4418 (GetLastError() != ERROR_ACCESS_DENIED)) { 4419 // from winerror.h 4420 return -1; 4421 } 4422 return 0; 4423 } 4424 4425 static int nonSeekAvailable(int, long *); 4426 static int stdinAvailable(int, long *); 4427 4428 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4429 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4430 4431 // This code is a copy of JDK's sysAvailable 4432 // from src/windows/hpi/src/sys_api_md.c 4433 4434 int os::available(int fd, jlong *bytes) { 4435 jlong cur, end; 4436 struct _stati64 stbuf64; 4437 4438 if (::_fstati64(fd, &stbuf64) >= 0) { 4439 int mode = stbuf64.st_mode; 4440 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4441 int ret; 4442 long lpbytes; 4443 if (fd == 0) { 4444 ret = stdinAvailable(fd, &lpbytes); 4445 } else { 4446 ret = nonSeekAvailable(fd, &lpbytes); 4447 } 4448 (*bytes) = (jlong)(lpbytes); 4449 return ret; 4450 } 4451 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4452 return FALSE; 4453 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4454 return FALSE; 4455 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4456 return FALSE; 4457 } 4458 *bytes = end - cur; 4459 return TRUE; 4460 } else { 4461 return FALSE; 4462 } 4463 } 4464 4465 void os::flockfile(FILE* fp) { 4466 _lock_file(fp); 4467 } 4468 4469 void os::funlockfile(FILE* fp) { 4470 _unlock_file(fp); 4471 } 4472 4473 // This code is a copy of JDK's nonSeekAvailable 4474 // from src/windows/hpi/src/sys_api_md.c 4475 4476 static int nonSeekAvailable(int fd, long *pbytes) { 4477 // This is used for available on non-seekable devices 4478 // (like both named and anonymous pipes, such as pipes 4479 // connected to an exec'd process). 4480 // Standard Input is a special case. 4481 HANDLE han; 4482 4483 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4484 return FALSE; 4485 } 4486 4487 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4488 // PeekNamedPipe fails when at EOF. In that case we 4489 // simply make *pbytes = 0 which is consistent with the 4490 // behavior we get on Solaris when an fd is at EOF. 4491 // The only alternative is to raise an Exception, 4492 // which isn't really warranted. 4493 // 4494 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4495 return FALSE; 4496 } 4497 *pbytes = 0; 4498 } 4499 return TRUE; 4500 } 4501 4502 #define MAX_INPUT_EVENTS 2000 4503 4504 // This code is a copy of JDK's stdinAvailable 4505 // from src/windows/hpi/src/sys_api_md.c 4506 4507 static int stdinAvailable(int fd, long *pbytes) { 4508 HANDLE han; 4509 DWORD numEventsRead = 0; // Number of events read from buffer 4510 DWORD numEvents = 0; // Number of events in buffer 4511 DWORD i = 0; // Loop index 4512 DWORD curLength = 0; // Position marker 4513 DWORD actualLength = 0; // Number of bytes readable 4514 BOOL error = FALSE; // Error holder 4515 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4516 4517 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4518 return FALSE; 4519 } 4520 4521 // Construct an array of input records in the console buffer 4522 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4523 if (error == 0) { 4524 return nonSeekAvailable(fd, pbytes); 4525 } 4526 4527 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4528 if (numEvents > MAX_INPUT_EVENTS) { 4529 numEvents = MAX_INPUT_EVENTS; 4530 } 4531 4532 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4533 if (lpBuffer == NULL) { 4534 return FALSE; 4535 } 4536 4537 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4538 if (error == 0) { 4539 os::free(lpBuffer); 4540 return FALSE; 4541 } 4542 4543 // Examine input records for the number of bytes available 4544 for (i=0; i<numEvents; i++) { 4545 if (lpBuffer[i].EventType == KEY_EVENT) { 4546 4547 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4548 &(lpBuffer[i].Event); 4549 if (keyRecord->bKeyDown == TRUE) { 4550 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4551 curLength++; 4552 if (*keyPressed == '\r') { 4553 actualLength = curLength; 4554 } 4555 } 4556 } 4557 } 4558 4559 if (lpBuffer != NULL) { 4560 os::free(lpBuffer); 4561 } 4562 4563 *pbytes = (long) actualLength; 4564 return TRUE; 4565 } 4566 4567 // Map a block of memory. 4568 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4569 char *addr, size_t bytes, bool read_only, 4570 bool allow_exec) { 4571 HANDLE hFile; 4572 char* base; 4573 4574 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4575 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4576 if (hFile == NULL) { 4577 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4578 return NULL; 4579 } 4580 4581 if (allow_exec) { 4582 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4583 // unless it comes from a PE image (which the shared archive is not.) 4584 // Even VirtualProtect refuses to give execute access to mapped memory 4585 // that was not previously executable. 4586 // 4587 // Instead, stick the executable region in anonymous memory. Yuck. 4588 // Penalty is that ~4 pages will not be shareable - in the future 4589 // we might consider DLLizing the shared archive with a proper PE 4590 // header so that mapping executable + sharing is possible. 4591 4592 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4593 PAGE_READWRITE); 4594 if (base == NULL) { 4595 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4596 CloseHandle(hFile); 4597 return NULL; 4598 } 4599 4600 DWORD bytes_read; 4601 OVERLAPPED overlapped; 4602 overlapped.Offset = (DWORD)file_offset; 4603 overlapped.OffsetHigh = 0; 4604 overlapped.hEvent = NULL; 4605 // ReadFile guarantees that if the return value is true, the requested 4606 // number of bytes were read before returning. 4607 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4608 if (!res) { 4609 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4610 release_memory(base, bytes); 4611 CloseHandle(hFile); 4612 return NULL; 4613 } 4614 } else { 4615 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4616 NULL /* file_name */); 4617 if (hMap == NULL) { 4618 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4619 CloseHandle(hFile); 4620 return NULL; 4621 } 4622 4623 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4624 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4625 (DWORD)bytes, addr); 4626 if (base == NULL) { 4627 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4628 CloseHandle(hMap); 4629 CloseHandle(hFile); 4630 return NULL; 4631 } 4632 4633 if (CloseHandle(hMap) == 0) { 4634 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4635 CloseHandle(hFile); 4636 return base; 4637 } 4638 } 4639 4640 if (allow_exec) { 4641 DWORD old_protect; 4642 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4643 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4644 4645 if (!res) { 4646 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4647 // Don't consider this a hard error, on IA32 even if the 4648 // VirtualProtect fails, we should still be able to execute 4649 CloseHandle(hFile); 4650 return base; 4651 } 4652 } 4653 4654 if (CloseHandle(hFile) == 0) { 4655 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4656 return base; 4657 } 4658 4659 return base; 4660 } 4661 4662 4663 // Remap a block of memory. 4664 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4665 char *addr, size_t bytes, bool read_only, 4666 bool allow_exec) { 4667 // This OS does not allow existing memory maps to be remapped so we 4668 // have to unmap the memory before we remap it. 4669 if (!os::unmap_memory(addr, bytes)) { 4670 return NULL; 4671 } 4672 4673 // There is a very small theoretical window between the unmap_memory() 4674 // call above and the map_memory() call below where a thread in native 4675 // code may be able to access an address that is no longer mapped. 4676 4677 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4678 read_only, allow_exec); 4679 } 4680 4681 4682 // Unmap a block of memory. 4683 // Returns true=success, otherwise false. 4684 4685 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4686 MEMORY_BASIC_INFORMATION mem_info; 4687 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4688 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4689 return false; 4690 } 4691 4692 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4693 // Instead, executable region was allocated using VirtualAlloc(). See 4694 // pd_map_memory() above. 4695 // 4696 // The following flags should match the 'exec_access' flages used for 4697 // VirtualProtect() in pd_map_memory(). 4698 if (mem_info.Protect == PAGE_EXECUTE_READ || 4699 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4700 return pd_release_memory(addr, bytes); 4701 } 4702 4703 BOOL result = UnmapViewOfFile(addr); 4704 if (result == 0) { 4705 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4706 return false; 4707 } 4708 return true; 4709 } 4710 4711 void os::pause() { 4712 char filename[MAX_PATH]; 4713 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4714 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4715 } else { 4716 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4717 } 4718 4719 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4720 if (fd != -1) { 4721 struct stat buf; 4722 ::close(fd); 4723 while (::stat(filename, &buf) == 0) { 4724 Sleep(100); 4725 } 4726 } else { 4727 jio_fprintf(stderr, 4728 "Could not open pause file '%s', continuing immediately.\n", filename); 4729 } 4730 } 4731 4732 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4733 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4734 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4735 4736 os::ThreadCrashProtection::ThreadCrashProtection() { 4737 } 4738 4739 // See the caveats for this class in os_windows.hpp 4740 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4741 // into this method and returns false. If no OS EXCEPTION was raised, returns 4742 // true. 4743 // The callback is supposed to provide the method that should be protected. 4744 // 4745 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4746 4747 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4748 4749 _protected_thread = Thread::current_or_null(); 4750 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4751 4752 bool success = true; 4753 __try { 4754 _crash_protection = this; 4755 cb.call(); 4756 } __except(EXCEPTION_EXECUTE_HANDLER) { 4757 // only for protection, nothing to do 4758 success = false; 4759 } 4760 _crash_protection = NULL; 4761 _protected_thread = NULL; 4762 Thread::muxRelease(&_crash_mux); 4763 return success; 4764 } 4765 4766 // An Event wraps a win32 "CreateEvent" kernel handle. 4767 // 4768 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4769 // 4770 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4771 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4772 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4773 // In addition, an unpark() operation might fetch the handle field, but the 4774 // event could recycle between the fetch and the SetEvent() operation. 4775 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4776 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4777 // on an stale but recycled handle would be harmless, but in practice this might 4778 // confuse other non-Sun code, so it's not a viable approach. 4779 // 4780 // 2: Once a win32 event handle is associated with an Event, it remains associated 4781 // with the Event. The event handle is never closed. This could be construed 4782 // as handle leakage, but only up to the maximum # of threads that have been extant 4783 // at any one time. This shouldn't be an issue, as windows platforms typically 4784 // permit a process to have hundreds of thousands of open handles. 4785 // 4786 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4787 // and release unused handles. 4788 // 4789 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4790 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4791 // 4792 // 5. Use an RCU-like mechanism (Read-Copy Update). 4793 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4794 // 4795 // We use (2). 4796 // 4797 // TODO-FIXME: 4798 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4799 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4800 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4801 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4802 // into a single win32 CreateEvent() handle. 4803 // 4804 // Assumption: 4805 // Only one parker can exist on an event, which is why we allocate 4806 // them per-thread. Multiple unparkers can coexist. 4807 // 4808 // _Event transitions in park() 4809 // -1 => -1 : illegal 4810 // 1 => 0 : pass - return immediately 4811 // 0 => -1 : block; then set _Event to 0 before returning 4812 // 4813 // _Event transitions in unpark() 4814 // 0 => 1 : just return 4815 // 1 => 1 : just return 4816 // -1 => either 0 or 1; must signal target thread 4817 // That is, we can safely transition _Event from -1 to either 4818 // 0 or 1. 4819 // 4820 // _Event serves as a restricted-range semaphore. 4821 // -1 : thread is blocked, i.e. there is a waiter 4822 // 0 : neutral: thread is running or ready, 4823 // could have been signaled after a wait started 4824 // 1 : signaled - thread is running or ready 4825 // 4826 // Another possible encoding of _Event would be with 4827 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4828 // 4829 4830 int os::PlatformEvent::park(jlong Millis) { 4831 // Transitions for _Event: 4832 // -1 => -1 : illegal 4833 // 1 => 0 : pass - return immediately 4834 // 0 => -1 : block; then set _Event to 0 before returning 4835 4836 guarantee(_ParkHandle != NULL , "Invariant"); 4837 guarantee(Millis > 0 , "Invariant"); 4838 4839 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4840 // the initial park() operation. 4841 // Consider: use atomic decrement instead of CAS-loop 4842 4843 int v; 4844 for (;;) { 4845 v = _Event; 4846 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4847 } 4848 guarantee((v == 0) || (v == 1), "invariant"); 4849 if (v != 0) return OS_OK; 4850 4851 // Do this the hard way by blocking ... 4852 // TODO: consider a brief spin here, gated on the success of recent 4853 // spin attempts by this thread. 4854 // 4855 // We decompose long timeouts into series of shorter timed waits. 4856 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4857 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4858 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4859 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4860 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4861 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4862 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4863 // for the already waited time. This policy does not admit any new outcomes. 4864 // In the future, however, we might want to track the accumulated wait time and 4865 // adjust Millis accordingly if we encounter a spurious wakeup. 4866 4867 const int MAXTIMEOUT = 0x10000000; 4868 DWORD rv = WAIT_TIMEOUT; 4869 while (_Event < 0 && Millis > 0) { 4870 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4871 if (Millis > MAXTIMEOUT) { 4872 prd = MAXTIMEOUT; 4873 } 4874 rv = ::WaitForSingleObject(_ParkHandle, prd); 4875 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4876 if (rv == WAIT_TIMEOUT) { 4877 Millis -= prd; 4878 } 4879 } 4880 v = _Event; 4881 _Event = 0; 4882 // see comment at end of os::PlatformEvent::park() below: 4883 OrderAccess::fence(); 4884 // If we encounter a nearly simultanous timeout expiry and unpark() 4885 // we return OS_OK indicating we awoke via unpark(). 4886 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4887 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4888 } 4889 4890 void os::PlatformEvent::park() { 4891 // Transitions for _Event: 4892 // -1 => -1 : illegal 4893 // 1 => 0 : pass - return immediately 4894 // 0 => -1 : block; then set _Event to 0 before returning 4895 4896 guarantee(_ParkHandle != NULL, "Invariant"); 4897 // Invariant: Only the thread associated with the Event/PlatformEvent 4898 // may call park(). 4899 // Consider: use atomic decrement instead of CAS-loop 4900 int v; 4901 for (;;) { 4902 v = _Event; 4903 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4904 } 4905 guarantee((v == 0) || (v == 1), "invariant"); 4906 if (v != 0) return; 4907 4908 // Do this the hard way by blocking ... 4909 // TODO: consider a brief spin here, gated on the success of recent 4910 // spin attempts by this thread. 4911 while (_Event < 0) { 4912 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4913 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4914 } 4915 4916 // Usually we'll find _Event == 0 at this point, but as 4917 // an optional optimization we clear it, just in case can 4918 // multiple unpark() operations drove _Event up to 1. 4919 _Event = 0; 4920 OrderAccess::fence(); 4921 guarantee(_Event >= 0, "invariant"); 4922 } 4923 4924 void os::PlatformEvent::unpark() { 4925 guarantee(_ParkHandle != NULL, "Invariant"); 4926 4927 // Transitions for _Event: 4928 // 0 => 1 : just return 4929 // 1 => 1 : just return 4930 // -1 => either 0 or 1; must signal target thread 4931 // That is, we can safely transition _Event from -1 to either 4932 // 0 or 1. 4933 // See also: "Semaphores in Plan 9" by Mullender & Cox 4934 // 4935 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4936 // that it will take two back-to-back park() calls for the owning 4937 // thread to block. This has the benefit of forcing a spurious return 4938 // from the first park() call after an unpark() call which will help 4939 // shake out uses of park() and unpark() without condition variables. 4940 4941 if (Atomic::xchg(1, &_Event) >= 0) return; 4942 4943 ::SetEvent(_ParkHandle); 4944 } 4945 4946 4947 // JSR166 4948 // ------------------------------------------------------- 4949 4950 // The Windows implementation of Park is very straightforward: Basic 4951 // operations on Win32 Events turn out to have the right semantics to 4952 // use them directly. We opportunistically resuse the event inherited 4953 // from Monitor. 4954 4955 void Parker::park(bool isAbsolute, jlong time) { 4956 guarantee(_ParkEvent != NULL, "invariant"); 4957 // First, demultiplex/decode time arguments 4958 if (time < 0) { // don't wait 4959 return; 4960 } else if (time == 0 && !isAbsolute) { 4961 time = INFINITE; 4962 } else if (isAbsolute) { 4963 time -= os::javaTimeMillis(); // convert to relative time 4964 if (time <= 0) { // already elapsed 4965 return; 4966 } 4967 } else { // relative 4968 time /= 1000000; // Must coarsen from nanos to millis 4969 if (time == 0) { // Wait for the minimal time unit if zero 4970 time = 1; 4971 } 4972 } 4973 4974 JavaThread* thread = JavaThread::current(); 4975 4976 // Don't wait if interrupted or already triggered 4977 if (Thread::is_interrupted(thread, false) || 4978 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4979 ResetEvent(_ParkEvent); 4980 return; 4981 } else { 4982 ThreadBlockInVM tbivm(thread); 4983 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4984 thread->set_suspend_equivalent(); 4985 4986 WaitForSingleObject(_ParkEvent, time); 4987 ResetEvent(_ParkEvent); 4988 4989 // If externally suspended while waiting, re-suspend 4990 if (thread->handle_special_suspend_equivalent_condition()) { 4991 thread->java_suspend_self(); 4992 } 4993 } 4994 } 4995 4996 void Parker::unpark() { 4997 guarantee(_ParkEvent != NULL, "invariant"); 4998 SetEvent(_ParkEvent); 4999 } 5000 5001 // Run the specified command in a separate process. Return its exit value, 5002 // or -1 on failure (e.g. can't create a new process). 5003 int os::fork_and_exec(char* cmd) { 5004 STARTUPINFO si; 5005 PROCESS_INFORMATION pi; 5006 DWORD exit_code; 5007 5008 char * cmd_string; 5009 char * cmd_prefix = "cmd /C "; 5010 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5011 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5012 if (cmd_string == NULL) { 5013 return -1; 5014 } 5015 cmd_string[0] = '\0'; 5016 strcat(cmd_string, cmd_prefix); 5017 strcat(cmd_string, cmd); 5018 5019 // now replace all '\n' with '&' 5020 char * substring = cmd_string; 5021 while ((substring = strchr(substring, '\n')) != NULL) { 5022 substring[0] = '&'; 5023 substring++; 5024 } 5025 memset(&si, 0, sizeof(si)); 5026 si.cb = sizeof(si); 5027 memset(&pi, 0, sizeof(pi)); 5028 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5029 cmd_string, // command line 5030 NULL, // process security attribute 5031 NULL, // thread security attribute 5032 TRUE, // inherits system handles 5033 0, // no creation flags 5034 NULL, // use parent's environment block 5035 NULL, // use parent's starting directory 5036 &si, // (in) startup information 5037 &pi); // (out) process information 5038 5039 if (rslt) { 5040 // Wait until child process exits. 5041 WaitForSingleObject(pi.hProcess, INFINITE); 5042 5043 GetExitCodeProcess(pi.hProcess, &exit_code); 5044 5045 // Close process and thread handles. 5046 CloseHandle(pi.hProcess); 5047 CloseHandle(pi.hThread); 5048 } else { 5049 exit_code = -1; 5050 } 5051 5052 FREE_C_HEAP_ARRAY(char, cmd_string); 5053 return (int)exit_code; 5054 } 5055 5056 bool os::find(address addr, outputStream* st) { 5057 int offset = -1; 5058 bool result = false; 5059 char buf[256]; 5060 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5061 st->print(PTR_FORMAT " ", addr); 5062 if (strlen(buf) < sizeof(buf) - 1) { 5063 char* p = strrchr(buf, '\\'); 5064 if (p) { 5065 st->print("%s", p + 1); 5066 } else { 5067 st->print("%s", buf); 5068 } 5069 } else { 5070 // The library name is probably truncated. Let's omit the library name. 5071 // See also JDK-8147512. 5072 } 5073 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5074 st->print("::%s + 0x%x", buf, offset); 5075 } 5076 st->cr(); 5077 result = true; 5078 } 5079 return result; 5080 } 5081 5082 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5083 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5084 5085 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5086 JavaThread* thread = JavaThread::current(); 5087 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5088 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5089 5090 if (os::is_memory_serialize_page(thread, addr)) { 5091 return EXCEPTION_CONTINUE_EXECUTION; 5092 } 5093 } 5094 5095 return EXCEPTION_CONTINUE_SEARCH; 5096 } 5097 5098 // We don't build a headless jre for Windows 5099 bool os::is_headless_jre() { return false; } 5100 5101 static jint initSock() { 5102 WSADATA wsadata; 5103 5104 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5105 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5106 ::GetLastError()); 5107 return JNI_ERR; 5108 } 5109 return JNI_OK; 5110 } 5111 5112 struct hostent* os::get_host_by_name(char* name) { 5113 return (struct hostent*)gethostbyname(name); 5114 } 5115 5116 int os::socket_close(int fd) { 5117 return ::closesocket(fd); 5118 } 5119 5120 int os::socket(int domain, int type, int protocol) { 5121 return ::socket(domain, type, protocol); 5122 } 5123 5124 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5125 return ::connect(fd, him, len); 5126 } 5127 5128 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5129 return ::recv(fd, buf, (int)nBytes, flags); 5130 } 5131 5132 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5133 return ::send(fd, buf, (int)nBytes, flags); 5134 } 5135 5136 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5137 return ::send(fd, buf, (int)nBytes, flags); 5138 } 5139 5140 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5141 #if defined(IA32) 5142 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5143 #elif defined (AMD64) 5144 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5145 #endif 5146 5147 // returns true if thread could be suspended, 5148 // false otherwise 5149 static bool do_suspend(HANDLE* h) { 5150 if (h != NULL) { 5151 if (SuspendThread(*h) != ~0) { 5152 return true; 5153 } 5154 } 5155 return false; 5156 } 5157 5158 // resume the thread 5159 // calling resume on an active thread is a no-op 5160 static void do_resume(HANDLE* h) { 5161 if (h != NULL) { 5162 ResumeThread(*h); 5163 } 5164 } 5165 5166 // retrieve a suspend/resume context capable handle 5167 // from the tid. Caller validates handle return value. 5168 void get_thread_handle_for_extended_context(HANDLE* h, 5169 OSThread::thread_id_t tid) { 5170 if (h != NULL) { 5171 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5172 } 5173 } 5174 5175 // Thread sampling implementation 5176 // 5177 void os::SuspendedThreadTask::internal_do_task() { 5178 CONTEXT ctxt; 5179 HANDLE h = NULL; 5180 5181 // get context capable handle for thread 5182 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5183 5184 // sanity 5185 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5186 return; 5187 } 5188 5189 // suspend the thread 5190 if (do_suspend(&h)) { 5191 ctxt.ContextFlags = sampling_context_flags; 5192 // get thread context 5193 GetThreadContext(h, &ctxt); 5194 SuspendedThreadTaskContext context(_thread, &ctxt); 5195 // pass context to Thread Sampling impl 5196 do_task(context); 5197 // resume thread 5198 do_resume(&h); 5199 } 5200 5201 // close handle 5202 CloseHandle(h); 5203 } 5204 5205 bool os::start_debugging(char *buf, int buflen) { 5206 int len = (int)strlen(buf); 5207 char *p = &buf[len]; 5208 5209 jio_snprintf(p, buflen-len, 5210 "\n\n" 5211 "Do you want to debug the problem?\n\n" 5212 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5213 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5214 "Otherwise, select 'No' to abort...", 5215 os::current_process_id(), os::current_thread_id()); 5216 5217 bool yes = os::message_box("Unexpected Error", buf); 5218 5219 if (yes) { 5220 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5221 // exception. If VM is running inside a debugger, the debugger will 5222 // catch the exception. Otherwise, the breakpoint exception will reach 5223 // the default windows exception handler, which can spawn a debugger and 5224 // automatically attach to the dying VM. 5225 os::breakpoint(); 5226 yes = false; 5227 } 5228 return yes; 5229 } 5230 5231 void* os::get_default_process_handle() { 5232 return (void*)GetModuleHandle(NULL); 5233 } 5234 5235 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5236 // which is used to find statically linked in agents. 5237 // Additionally for windows, takes into account __stdcall names. 5238 // Parameters: 5239 // sym_name: Symbol in library we are looking for 5240 // lib_name: Name of library to look in, NULL for shared libs. 5241 // is_absolute_path == true if lib_name is absolute path to agent 5242 // such as "C:/a/b/L.dll" 5243 // == false if only the base name of the library is passed in 5244 // such as "L" 5245 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5246 bool is_absolute_path) { 5247 char *agent_entry_name; 5248 size_t len; 5249 size_t name_len; 5250 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5251 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5252 const char *start; 5253 5254 if (lib_name != NULL) { 5255 len = name_len = strlen(lib_name); 5256 if (is_absolute_path) { 5257 // Need to strip path, prefix and suffix 5258 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5259 lib_name = ++start; 5260 } else { 5261 // Need to check for drive prefix 5262 if ((start = strchr(lib_name, ':')) != NULL) { 5263 lib_name = ++start; 5264 } 5265 } 5266 if (len <= (prefix_len + suffix_len)) { 5267 return NULL; 5268 } 5269 lib_name += prefix_len; 5270 name_len = strlen(lib_name) - suffix_len; 5271 } 5272 } 5273 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5274 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5275 if (agent_entry_name == NULL) { 5276 return NULL; 5277 } 5278 if (lib_name != NULL) { 5279 const char *p = strrchr(sym_name, '@'); 5280 if (p != NULL && p != sym_name) { 5281 // sym_name == _Agent_OnLoad@XX 5282 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5283 agent_entry_name[(p-sym_name)] = '\0'; 5284 // agent_entry_name == _Agent_OnLoad 5285 strcat(agent_entry_name, "_"); 5286 strncat(agent_entry_name, lib_name, name_len); 5287 strcat(agent_entry_name, p); 5288 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5289 } else { 5290 strcpy(agent_entry_name, sym_name); 5291 strcat(agent_entry_name, "_"); 5292 strncat(agent_entry_name, lib_name, name_len); 5293 } 5294 } else { 5295 strcpy(agent_entry_name, sym_name); 5296 } 5297 return agent_entry_name; 5298 } 5299 5300 #ifndef PRODUCT 5301 5302 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5303 // contiguous memory block at a particular address. 5304 // The test first tries to find a good approximate address to allocate at by using the same 5305 // method to allocate some memory at any address. The test then tries to allocate memory in 5306 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5307 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5308 // the previously allocated memory is available for allocation. The only actual failure 5309 // that is reported is when the test tries to allocate at a particular location but gets a 5310 // different valid one. A NULL return value at this point is not considered an error but may 5311 // be legitimate. 5312 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5313 void TestReserveMemorySpecial_test() { 5314 if (!UseLargePages) { 5315 if (VerboseInternalVMTests) { 5316 tty->print("Skipping test because large pages are disabled"); 5317 } 5318 return; 5319 } 5320 // save current value of globals 5321 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5322 bool old_use_numa_interleaving = UseNUMAInterleaving; 5323 5324 // set globals to make sure we hit the correct code path 5325 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5326 5327 // do an allocation at an address selected by the OS to get a good one. 5328 const size_t large_allocation_size = os::large_page_size() * 4; 5329 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5330 if (result == NULL) { 5331 if (VerboseInternalVMTests) { 5332 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5333 large_allocation_size); 5334 } 5335 } else { 5336 os::release_memory_special(result, large_allocation_size); 5337 5338 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5339 // we managed to get it once. 5340 const size_t expected_allocation_size = os::large_page_size(); 5341 char* expected_location = result + os::large_page_size(); 5342 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5343 if (actual_location == NULL) { 5344 if (VerboseInternalVMTests) { 5345 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5346 expected_location, large_allocation_size); 5347 } 5348 } else { 5349 // release memory 5350 os::release_memory_special(actual_location, expected_allocation_size); 5351 // only now check, after releasing any memory to avoid any leaks. 5352 assert(actual_location == expected_location, 5353 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5354 expected_location, expected_allocation_size, actual_location); 5355 } 5356 } 5357 5358 // restore globals 5359 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5360 UseNUMAInterleaving = old_use_numa_interleaving; 5361 } 5362 #endif // PRODUCT 5363 5364 /* 5365 All the defined signal names for Windows. 5366 5367 NOTE that not all of these names are accepted by FindSignal! 5368 5369 For various reasons some of these may be rejected at runtime. 5370 5371 Here are the names currently accepted by a user of sun.misc.Signal with 5372 1.4.1 (ignoring potential interaction with use of chaining, etc): 5373 5374 (LIST TBD) 5375 5376 */ 5377 int os::get_signal_number(const char* name) { 5378 static const struct { 5379 char* name; 5380 int number; 5381 } siglabels [] = 5382 // derived from version 6.0 VC98/include/signal.h 5383 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5384 "FPE", SIGFPE, // floating point exception 5385 "SEGV", SIGSEGV, // segment violation 5386 "INT", SIGINT, // interrupt 5387 "TERM", SIGTERM, // software term signal from kill 5388 "BREAK", SIGBREAK, // Ctrl-Break sequence 5389 "ILL", SIGILL}; // illegal instruction 5390 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5391 if (strcmp(name, siglabels[i].name) == 0) { 5392 return siglabels[i].number; 5393 } 5394 } 5395 return -1; 5396 } 5397 5398 // Fast current thread access 5399 5400 int os::win32::_thread_ptr_offset = 0; 5401 5402 static void call_wrapper_dummy() {} 5403 5404 // We need to call the os_exception_wrapper once so that it sets 5405 // up the offset from FS of the thread pointer. 5406 void os::win32::initialize_thread_ptr_offset() { 5407 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5408 NULL, NULL, NULL, NULL); 5409 }