1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/align.hpp" 71 #include "utilities/decoder.hpp" 72 #include "utilities/defaultStream.hpp" 73 #include "utilities/events.hpp" 74 #include "utilities/growableArray.hpp" 75 #include "utilities/macros.hpp" 76 #include "utilities/vmError.hpp" 77 #include "symbolengine.hpp" 78 #include "windbghelp.hpp" 79 80 81 #ifdef _DEBUG 82 #include <crtdbg.h> 83 #endif 84 85 86 #include <windows.h> 87 #include <sys/types.h> 88 #include <sys/stat.h> 89 #include <sys/timeb.h> 90 #include <objidl.h> 91 #include <shlobj.h> 92 93 #include <malloc.h> 94 #include <signal.h> 95 #include <direct.h> 96 #include <errno.h> 97 #include <fcntl.h> 98 #include <io.h> 99 #include <process.h> // For _beginthreadex(), _endthreadex() 100 #include <imagehlp.h> // For os::dll_address_to_function_name 101 // for enumerating dll libraries 102 #include <vdmdbg.h> 103 104 // for timer info max values which include all bits 105 #define ALL_64_BITS CONST64(-1) 106 107 // For DLL loading/load error detection 108 // Values of PE COFF 109 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 110 #define IMAGE_FILE_SIGNATURE_LENGTH 4 111 112 static HANDLE main_process; 113 static HANDLE main_thread; 114 static int main_thread_id; 115 116 static FILETIME process_creation_time; 117 static FILETIME process_exit_time; 118 static FILETIME process_user_time; 119 static FILETIME process_kernel_time; 120 121 #ifdef _M_AMD64 122 #define __CPU__ amd64 123 #else 124 #define __CPU__ i486 125 #endif 126 127 // save DLL module handle, used by GetModuleFileName 128 129 HINSTANCE vm_lib_handle; 130 131 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 132 switch (reason) { 133 case DLL_PROCESS_ATTACH: 134 vm_lib_handle = hinst; 135 if (ForceTimeHighResolution) { 136 timeBeginPeriod(1L); 137 } 138 WindowsDbgHelp::pre_initialize(); 139 SymbolEngine::pre_initialize(); 140 break; 141 case DLL_PROCESS_DETACH: 142 if (ForceTimeHighResolution) { 143 timeEndPeriod(1L); 144 } 145 break; 146 default: 147 break; 148 } 149 return true; 150 } 151 152 static inline double fileTimeAsDouble(FILETIME* time) { 153 const double high = (double) ((unsigned int) ~0); 154 const double split = 10000000.0; 155 double result = (time->dwLowDateTime / split) + 156 time->dwHighDateTime * (high/split); 157 return result; 158 } 159 160 // Implementation of os 161 162 bool os::unsetenv(const char* name) { 163 assert(name != NULL, "Null pointer"); 164 return (SetEnvironmentVariable(name, NULL) == TRUE); 165 } 166 167 // No setuid programs under Windows. 168 bool os::have_special_privileges() { 169 return false; 170 } 171 172 173 // This method is a periodic task to check for misbehaving JNI applications 174 // under CheckJNI, we can add any periodic checks here. 175 // For Windows at the moment does nothing 176 void os::run_periodic_checks() { 177 return; 178 } 179 180 // previous UnhandledExceptionFilter, if there is one 181 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 182 183 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 184 185 void os::init_system_properties_values() { 186 // sysclasspath, java_home, dll_dir 187 { 188 char *home_path; 189 char *dll_path; 190 char *pslash; 191 char *bin = "\\bin"; 192 char home_dir[MAX_PATH + 1]; 193 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 194 195 if (alt_home_dir != NULL) { 196 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 197 home_dir[MAX_PATH] = '\0'; 198 } else { 199 os::jvm_path(home_dir, sizeof(home_dir)); 200 // Found the full path to jvm.dll. 201 // Now cut the path to <java_home>/jre if we can. 202 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 203 pslash = strrchr(home_dir, '\\'); 204 if (pslash != NULL) { 205 *pslash = '\0'; // get rid of \{client|server} 206 pslash = strrchr(home_dir, '\\'); 207 if (pslash != NULL) { 208 *pslash = '\0'; // get rid of \bin 209 } 210 } 211 } 212 213 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 214 if (home_path == NULL) { 215 return; 216 } 217 strcpy(home_path, home_dir); 218 Arguments::set_java_home(home_path); 219 FREE_C_HEAP_ARRAY(char, home_path); 220 221 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 222 mtInternal); 223 if (dll_path == NULL) { 224 return; 225 } 226 strcpy(dll_path, home_dir); 227 strcat(dll_path, bin); 228 Arguments::set_dll_dir(dll_path); 229 FREE_C_HEAP_ARRAY(char, dll_path); 230 231 if (!set_boot_path('\\', ';')) { 232 return; 233 } 234 } 235 236 // library_path 237 #define EXT_DIR "\\lib\\ext" 238 #define BIN_DIR "\\bin" 239 #define PACKAGE_DIR "\\Sun\\Java" 240 { 241 // Win32 library search order (See the documentation for LoadLibrary): 242 // 243 // 1. The directory from which application is loaded. 244 // 2. The system wide Java Extensions directory (Java only) 245 // 3. System directory (GetSystemDirectory) 246 // 4. Windows directory (GetWindowsDirectory) 247 // 5. The PATH environment variable 248 // 6. The current directory 249 250 char *library_path; 251 char tmp[MAX_PATH]; 252 char *path_str = ::getenv("PATH"); 253 254 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 255 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 256 257 library_path[0] = '\0'; 258 259 GetModuleFileName(NULL, tmp, sizeof(tmp)); 260 *(strrchr(tmp, '\\')) = '\0'; 261 strcat(library_path, tmp); 262 263 GetWindowsDirectory(tmp, sizeof(tmp)); 264 strcat(library_path, ";"); 265 strcat(library_path, tmp); 266 strcat(library_path, PACKAGE_DIR BIN_DIR); 267 268 GetSystemDirectory(tmp, sizeof(tmp)); 269 strcat(library_path, ";"); 270 strcat(library_path, tmp); 271 272 GetWindowsDirectory(tmp, sizeof(tmp)); 273 strcat(library_path, ";"); 274 strcat(library_path, tmp); 275 276 if (path_str) { 277 strcat(library_path, ";"); 278 strcat(library_path, path_str); 279 } 280 281 strcat(library_path, ";."); 282 283 Arguments::set_library_path(library_path); 284 FREE_C_HEAP_ARRAY(char, library_path); 285 } 286 287 // Default extensions directory 288 { 289 char path[MAX_PATH]; 290 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 291 GetWindowsDirectory(path, MAX_PATH); 292 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 293 path, PACKAGE_DIR, EXT_DIR); 294 Arguments::set_ext_dirs(buf); 295 } 296 #undef EXT_DIR 297 #undef BIN_DIR 298 #undef PACKAGE_DIR 299 300 #ifndef _WIN64 301 // set our UnhandledExceptionFilter and save any previous one 302 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 303 #endif 304 305 // Done 306 return; 307 } 308 309 void os::breakpoint() { 310 DebugBreak(); 311 } 312 313 // Invoked from the BREAKPOINT Macro 314 extern "C" void breakpoint() { 315 os::breakpoint(); 316 } 317 318 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 319 // So far, this method is only used by Native Memory Tracking, which is 320 // only supported on Windows XP or later. 321 // 322 int os::get_native_stack(address* stack, int frames, int toSkip) { 323 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 324 for (int index = captured; index < frames; index ++) { 325 stack[index] = NULL; 326 } 327 return captured; 328 } 329 330 331 // os::current_stack_base() 332 // 333 // Returns the base of the stack, which is the stack's 334 // starting address. This function must be called 335 // while running on the stack of the thread being queried. 336 337 address os::current_stack_base() { 338 MEMORY_BASIC_INFORMATION minfo; 339 address stack_bottom; 340 size_t stack_size; 341 342 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 343 stack_bottom = (address)minfo.AllocationBase; 344 stack_size = minfo.RegionSize; 345 346 // Add up the sizes of all the regions with the same 347 // AllocationBase. 348 while (1) { 349 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 350 if (stack_bottom == (address)minfo.AllocationBase) { 351 stack_size += minfo.RegionSize; 352 } else { 353 break; 354 } 355 } 356 return stack_bottom + stack_size; 357 } 358 359 size_t os::current_stack_size() { 360 size_t sz; 361 MEMORY_BASIC_INFORMATION minfo; 362 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 363 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 364 return sz; 365 } 366 367 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 368 const struct tm* time_struct_ptr = localtime(clock); 369 if (time_struct_ptr != NULL) { 370 *res = *time_struct_ptr; 371 return res; 372 } 373 return NULL; 374 } 375 376 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 377 const struct tm* time_struct_ptr = gmtime(clock); 378 if (time_struct_ptr != NULL) { 379 *res = *time_struct_ptr; 380 return res; 381 } 382 return NULL; 383 } 384 385 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 386 387 // Thread start routine for all newly created threads 388 static unsigned __stdcall thread_native_entry(Thread* thread) { 389 // Try to randomize the cache line index of hot stack frames. 390 // This helps when threads of the same stack traces evict each other's 391 // cache lines. The threads can be either from the same JVM instance, or 392 // from different JVM instances. The benefit is especially true for 393 // processors with hyperthreading technology. 394 static int counter = 0; 395 int pid = os::current_process_id(); 396 _alloca(((pid ^ counter++) & 7) * 128); 397 398 thread->initialize_thread_current(); 399 400 OSThread* osthr = thread->osthread(); 401 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 402 403 if (UseNUMA) { 404 int lgrp_id = os::numa_get_group_id(); 405 if (lgrp_id != -1) { 406 thread->set_lgrp_id(lgrp_id); 407 } 408 } 409 410 // Diagnostic code to investigate JDK-6573254 411 int res = 30115; // non-java thread 412 if (thread->is_Java_thread()) { 413 res = 20115; // java thread 414 } 415 416 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 417 418 // Install a win32 structured exception handler around every thread created 419 // by VM, so VM can generate error dump when an exception occurred in non- 420 // Java thread (e.g. VM thread). 421 __try { 422 thread->run(); 423 } __except(topLevelExceptionFilter( 424 (_EXCEPTION_POINTERS*)_exception_info())) { 425 // Nothing to do. 426 } 427 428 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 429 430 // One less thread is executing 431 // When the VMThread gets here, the main thread may have already exited 432 // which frees the CodeHeap containing the Atomic::add code 433 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 434 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 435 } 436 437 // If a thread has not deleted itself ("delete this") as part of its 438 // termination sequence, we have to ensure thread-local-storage is 439 // cleared before we actually terminate. No threads should ever be 440 // deleted asynchronously with respect to their termination. 441 if (Thread::current_or_null_safe() != NULL) { 442 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 443 thread->clear_thread_current(); 444 } 445 446 // Thread must not return from exit_process_or_thread(), but if it does, 447 // let it proceed to exit normally 448 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 449 } 450 451 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 452 int thread_id) { 453 // Allocate the OSThread object 454 OSThread* osthread = new OSThread(NULL, NULL); 455 if (osthread == NULL) return NULL; 456 457 // Initialize support for Java interrupts 458 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 459 if (interrupt_event == NULL) { 460 delete osthread; 461 return NULL; 462 } 463 osthread->set_interrupt_event(interrupt_event); 464 465 // Store info on the Win32 thread into the OSThread 466 osthread->set_thread_handle(thread_handle); 467 osthread->set_thread_id(thread_id); 468 469 if (UseNUMA) { 470 int lgrp_id = os::numa_get_group_id(); 471 if (lgrp_id != -1) { 472 thread->set_lgrp_id(lgrp_id); 473 } 474 } 475 476 // Initial thread state is INITIALIZED, not SUSPENDED 477 osthread->set_state(INITIALIZED); 478 479 return osthread; 480 } 481 482 483 bool os::create_attached_thread(JavaThread* thread) { 484 #ifdef ASSERT 485 thread->verify_not_published(); 486 #endif 487 HANDLE thread_h; 488 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 489 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 490 fatal("DuplicateHandle failed\n"); 491 } 492 OSThread* osthread = create_os_thread(thread, thread_h, 493 (int)current_thread_id()); 494 if (osthread == NULL) { 495 return false; 496 } 497 498 // Initial thread state is RUNNABLE 499 osthread->set_state(RUNNABLE); 500 501 thread->set_osthread(osthread); 502 503 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 504 os::current_thread_id()); 505 506 return true; 507 } 508 509 bool os::create_main_thread(JavaThread* thread) { 510 #ifdef ASSERT 511 thread->verify_not_published(); 512 #endif 513 if (_starting_thread == NULL) { 514 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 515 if (_starting_thread == NULL) { 516 return false; 517 } 518 } 519 520 // The primordial thread is runnable from the start) 521 _starting_thread->set_state(RUNNABLE); 522 523 thread->set_osthread(_starting_thread); 524 return true; 525 } 526 527 // Helper function to trace _beginthreadex attributes, 528 // similar to os::Posix::describe_pthread_attr() 529 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 530 size_t stacksize, unsigned initflag) { 531 stringStream ss(buf, buflen); 532 if (stacksize == 0) { 533 ss.print("stacksize: default, "); 534 } else { 535 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 536 } 537 ss.print("flags: "); 538 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 539 #define ALL(X) \ 540 X(CREATE_SUSPENDED) \ 541 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 542 ALL(PRINT_FLAG) 543 #undef ALL 544 #undef PRINT_FLAG 545 return buf; 546 } 547 548 // Allocate and initialize a new OSThread 549 bool os::create_thread(Thread* thread, ThreadType thr_type, 550 size_t stack_size) { 551 unsigned thread_id; 552 553 // Allocate the OSThread object 554 OSThread* osthread = new OSThread(NULL, NULL); 555 if (osthread == NULL) { 556 return false; 557 } 558 559 // Initialize support for Java interrupts 560 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 561 if (interrupt_event == NULL) { 562 delete osthread; 563 return NULL; 564 } 565 osthread->set_interrupt_event(interrupt_event); 566 osthread->set_interrupted(false); 567 568 thread->set_osthread(osthread); 569 570 if (stack_size == 0) { 571 switch (thr_type) { 572 case os::java_thread: 573 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 574 if (JavaThread::stack_size_at_create() > 0) { 575 stack_size = JavaThread::stack_size_at_create(); 576 } 577 break; 578 case os::compiler_thread: 579 if (CompilerThreadStackSize > 0) { 580 stack_size = (size_t)(CompilerThreadStackSize * K); 581 break; 582 } // else fall through: 583 // use VMThreadStackSize if CompilerThreadStackSize is not defined 584 case os::vm_thread: 585 case os::pgc_thread: 586 case os::cgc_thread: 587 case os::watcher_thread: 588 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 589 break; 590 } 591 } 592 593 // Create the Win32 thread 594 // 595 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 596 // does not specify stack size. Instead, it specifies the size of 597 // initially committed space. The stack size is determined by 598 // PE header in the executable. If the committed "stack_size" is larger 599 // than default value in the PE header, the stack is rounded up to the 600 // nearest multiple of 1MB. For example if the launcher has default 601 // stack size of 320k, specifying any size less than 320k does not 602 // affect the actual stack size at all, it only affects the initial 603 // commitment. On the other hand, specifying 'stack_size' larger than 604 // default value may cause significant increase in memory usage, because 605 // not only the stack space will be rounded up to MB, but also the 606 // entire space is committed upfront. 607 // 608 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 609 // for CreateThread() that can treat 'stack_size' as stack size. However we 610 // are not supposed to call CreateThread() directly according to MSDN 611 // document because JVM uses C runtime library. The good news is that the 612 // flag appears to work with _beginthredex() as well. 613 614 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 615 HANDLE thread_handle = 616 (HANDLE)_beginthreadex(NULL, 617 (unsigned)stack_size, 618 (unsigned (__stdcall *)(void*)) thread_native_entry, 619 thread, 620 initflag, 621 &thread_id); 622 623 char buf[64]; 624 if (thread_handle != NULL) { 625 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 626 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 627 } else { 628 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 629 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 630 } 631 632 if (thread_handle == NULL) { 633 // Need to clean up stuff we've allocated so far 634 CloseHandle(osthread->interrupt_event()); 635 thread->set_osthread(NULL); 636 delete osthread; 637 return NULL; 638 } 639 640 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 641 642 // Store info on the Win32 thread into the OSThread 643 osthread->set_thread_handle(thread_handle); 644 osthread->set_thread_id(thread_id); 645 646 // Initial thread state is INITIALIZED, not SUSPENDED 647 osthread->set_state(INITIALIZED); 648 649 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 650 return true; 651 } 652 653 654 // Free Win32 resources related to the OSThread 655 void os::free_thread(OSThread* osthread) { 656 assert(osthread != NULL, "osthread not set"); 657 658 // We are told to free resources of the argument thread, 659 // but we can only really operate on the current thread. 660 assert(Thread::current()->osthread() == osthread, 661 "os::free_thread but not current thread"); 662 663 CloseHandle(osthread->thread_handle()); 664 CloseHandle(osthread->interrupt_event()); 665 delete osthread; 666 } 667 668 static jlong first_filetime; 669 static jlong initial_performance_count; 670 static jlong performance_frequency; 671 672 673 jlong as_long(LARGE_INTEGER x) { 674 jlong result = 0; // initialization to avoid warning 675 set_high(&result, x.HighPart); 676 set_low(&result, x.LowPart); 677 return result; 678 } 679 680 681 jlong os::elapsed_counter() { 682 LARGE_INTEGER count; 683 QueryPerformanceCounter(&count); 684 return as_long(count) - initial_performance_count; 685 } 686 687 688 jlong os::elapsed_frequency() { 689 return performance_frequency; 690 } 691 692 693 julong os::available_memory() { 694 return win32::available_memory(); 695 } 696 697 julong os::win32::available_memory() { 698 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 699 // value if total memory is larger than 4GB 700 MEMORYSTATUSEX ms; 701 ms.dwLength = sizeof(ms); 702 GlobalMemoryStatusEx(&ms); 703 704 return (julong)ms.ullAvailPhys; 705 } 706 707 julong os::physical_memory() { 708 return win32::physical_memory(); 709 } 710 711 bool os::has_allocatable_memory_limit(julong* limit) { 712 MEMORYSTATUSEX ms; 713 ms.dwLength = sizeof(ms); 714 GlobalMemoryStatusEx(&ms); 715 #ifdef _LP64 716 *limit = (julong)ms.ullAvailVirtual; 717 return true; 718 #else 719 // Limit to 1400m because of the 2gb address space wall 720 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 721 return true; 722 #endif 723 } 724 725 int os::active_processor_count() { 726 DWORD_PTR lpProcessAffinityMask = 0; 727 DWORD_PTR lpSystemAffinityMask = 0; 728 int proc_count = processor_count(); 729 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 730 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 731 // Nof active processors is number of bits in process affinity mask 732 int bitcount = 0; 733 while (lpProcessAffinityMask != 0) { 734 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 735 bitcount++; 736 } 737 return bitcount; 738 } else { 739 return proc_count; 740 } 741 } 742 743 void os::set_native_thread_name(const char *name) { 744 745 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 746 // 747 // Note that unfortunately this only works if the process 748 // is already attached to a debugger; debugger must observe 749 // the exception below to show the correct name. 750 751 // If there is no debugger attached skip raising the exception 752 if (!IsDebuggerPresent()) { 753 return; 754 } 755 756 const DWORD MS_VC_EXCEPTION = 0x406D1388; 757 struct { 758 DWORD dwType; // must be 0x1000 759 LPCSTR szName; // pointer to name (in user addr space) 760 DWORD dwThreadID; // thread ID (-1=caller thread) 761 DWORD dwFlags; // reserved for future use, must be zero 762 } info; 763 764 info.dwType = 0x1000; 765 info.szName = name; 766 info.dwThreadID = -1; 767 info.dwFlags = 0; 768 769 __try { 770 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 771 } __except(EXCEPTION_EXECUTE_HANDLER) {} 772 } 773 774 bool os::distribute_processes(uint length, uint* distribution) { 775 // Not yet implemented. 776 return false; 777 } 778 779 bool os::bind_to_processor(uint processor_id) { 780 // Not yet implemented. 781 return false; 782 } 783 784 void os::win32::initialize_performance_counter() { 785 LARGE_INTEGER count; 786 QueryPerformanceFrequency(&count); 787 performance_frequency = as_long(count); 788 QueryPerformanceCounter(&count); 789 initial_performance_count = as_long(count); 790 } 791 792 793 double os::elapsedTime() { 794 return (double) elapsed_counter() / (double) elapsed_frequency(); 795 } 796 797 798 // Windows format: 799 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 800 // Java format: 801 // Java standards require the number of milliseconds since 1/1/1970 802 803 // Constant offset - calculated using offset() 804 static jlong _offset = 116444736000000000; 805 // Fake time counter for reproducible results when debugging 806 static jlong fake_time = 0; 807 808 #ifdef ASSERT 809 // Just to be safe, recalculate the offset in debug mode 810 static jlong _calculated_offset = 0; 811 static int _has_calculated_offset = 0; 812 813 jlong offset() { 814 if (_has_calculated_offset) return _calculated_offset; 815 SYSTEMTIME java_origin; 816 java_origin.wYear = 1970; 817 java_origin.wMonth = 1; 818 java_origin.wDayOfWeek = 0; // ignored 819 java_origin.wDay = 1; 820 java_origin.wHour = 0; 821 java_origin.wMinute = 0; 822 java_origin.wSecond = 0; 823 java_origin.wMilliseconds = 0; 824 FILETIME jot; 825 if (!SystemTimeToFileTime(&java_origin, &jot)) { 826 fatal("Error = %d\nWindows error", GetLastError()); 827 } 828 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 829 _has_calculated_offset = 1; 830 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 831 return _calculated_offset; 832 } 833 #else 834 jlong offset() { 835 return _offset; 836 } 837 #endif 838 839 jlong windows_to_java_time(FILETIME wt) { 840 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 841 return (a - offset()) / 10000; 842 } 843 844 // Returns time ticks in (10th of micro seconds) 845 jlong windows_to_time_ticks(FILETIME wt) { 846 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 847 return (a - offset()); 848 } 849 850 FILETIME java_to_windows_time(jlong l) { 851 jlong a = (l * 10000) + offset(); 852 FILETIME result; 853 result.dwHighDateTime = high(a); 854 result.dwLowDateTime = low(a); 855 return result; 856 } 857 858 bool os::supports_vtime() { return true; } 859 bool os::enable_vtime() { return false; } 860 bool os::vtime_enabled() { return false; } 861 862 double os::elapsedVTime() { 863 FILETIME created; 864 FILETIME exited; 865 FILETIME kernel; 866 FILETIME user; 867 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 868 // the resolution of windows_to_java_time() should be sufficient (ms) 869 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 870 } else { 871 return elapsedTime(); 872 } 873 } 874 875 jlong os::javaTimeMillis() { 876 if (UseFakeTimers) { 877 return fake_time++; 878 } else { 879 FILETIME wt; 880 GetSystemTimeAsFileTime(&wt); 881 return windows_to_java_time(wt); 882 } 883 } 884 885 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 886 FILETIME wt; 887 GetSystemTimeAsFileTime(&wt); 888 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 889 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 890 seconds = secs; 891 nanos = jlong(ticks - (secs*10000000)) * 100; 892 } 893 894 jlong os::javaTimeNanos() { 895 LARGE_INTEGER current_count; 896 QueryPerformanceCounter(¤t_count); 897 double current = as_long(current_count); 898 double freq = performance_frequency; 899 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 900 return time; 901 } 902 903 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 904 jlong freq = performance_frequency; 905 if (freq < NANOSECS_PER_SEC) { 906 // the performance counter is 64 bits and we will 907 // be multiplying it -- so no wrap in 64 bits 908 info_ptr->max_value = ALL_64_BITS; 909 } else if (freq > NANOSECS_PER_SEC) { 910 // use the max value the counter can reach to 911 // determine the max value which could be returned 912 julong max_counter = (julong)ALL_64_BITS; 913 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 914 } else { 915 // the performance counter is 64 bits and we will 916 // be using it directly -- so no wrap in 64 bits 917 info_ptr->max_value = ALL_64_BITS; 918 } 919 920 // using a counter, so no skipping 921 info_ptr->may_skip_backward = false; 922 info_ptr->may_skip_forward = false; 923 924 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 925 } 926 927 char* os::local_time_string(char *buf, size_t buflen) { 928 SYSTEMTIME st; 929 GetLocalTime(&st); 930 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 931 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 932 return buf; 933 } 934 935 bool os::getTimesSecs(double* process_real_time, 936 double* process_user_time, 937 double* process_system_time) { 938 HANDLE h_process = GetCurrentProcess(); 939 FILETIME create_time, exit_time, kernel_time, user_time; 940 BOOL result = GetProcessTimes(h_process, 941 &create_time, 942 &exit_time, 943 &kernel_time, 944 &user_time); 945 if (result != 0) { 946 FILETIME wt; 947 GetSystemTimeAsFileTime(&wt); 948 jlong rtc_millis = windows_to_java_time(wt); 949 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 950 *process_user_time = 951 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 952 *process_system_time = 953 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 954 return true; 955 } else { 956 return false; 957 } 958 } 959 960 void os::shutdown() { 961 // allow PerfMemory to attempt cleanup of any persistent resources 962 perfMemory_exit(); 963 964 // flush buffered output, finish log files 965 ostream_abort(); 966 967 // Check for abort hook 968 abort_hook_t abort_hook = Arguments::abort_hook(); 969 if (abort_hook != NULL) { 970 abort_hook(); 971 } 972 } 973 974 975 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 976 PMINIDUMP_EXCEPTION_INFORMATION, 977 PMINIDUMP_USER_STREAM_INFORMATION, 978 PMINIDUMP_CALLBACK_INFORMATION); 979 980 static HANDLE dumpFile = NULL; 981 982 // Check if dump file can be created. 983 void os::check_dump_limit(char* buffer, size_t buffsz) { 984 bool status = true; 985 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 986 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 987 status = false; 988 } 989 990 #ifndef ASSERT 991 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 992 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 993 status = false; 994 } 995 #endif 996 997 if (status) { 998 const char* cwd = get_current_directory(NULL, 0); 999 int pid = current_process_id(); 1000 if (cwd != NULL) { 1001 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1002 } else { 1003 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1004 } 1005 1006 if (dumpFile == NULL && 1007 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1008 == INVALID_HANDLE_VALUE) { 1009 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1010 status = false; 1011 } 1012 } 1013 VMError::record_coredump_status(buffer, status); 1014 } 1015 1016 void os::abort(bool dump_core, void* siginfo, const void* context) { 1017 EXCEPTION_POINTERS ep; 1018 MINIDUMP_EXCEPTION_INFORMATION mei; 1019 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1020 1021 HANDLE hProcess = GetCurrentProcess(); 1022 DWORD processId = GetCurrentProcessId(); 1023 MINIDUMP_TYPE dumpType; 1024 1025 shutdown(); 1026 if (!dump_core || dumpFile == NULL) { 1027 if (dumpFile != NULL) { 1028 CloseHandle(dumpFile); 1029 } 1030 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1031 } 1032 1033 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1034 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1035 1036 if (siginfo != NULL && context != NULL) { 1037 ep.ContextRecord = (PCONTEXT) context; 1038 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1039 1040 mei.ThreadId = GetCurrentThreadId(); 1041 mei.ExceptionPointers = &ep; 1042 pmei = &mei; 1043 } else { 1044 pmei = NULL; 1045 } 1046 1047 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1048 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1049 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1050 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1051 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1052 } 1053 CloseHandle(dumpFile); 1054 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1055 } 1056 1057 // Die immediately, no exit hook, no abort hook, no cleanup. 1058 void os::die() { 1059 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1060 } 1061 1062 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1063 // * dirent_md.c 1.15 00/02/02 1064 // 1065 // The declarations for DIR and struct dirent are in jvm_win32.h. 1066 1067 // Caller must have already run dirname through JVM_NativePath, which removes 1068 // duplicate slashes and converts all instances of '/' into '\\'. 1069 1070 DIR * os::opendir(const char *dirname) { 1071 assert(dirname != NULL, "just checking"); // hotspot change 1072 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1073 DWORD fattr; // hotspot change 1074 char alt_dirname[4] = { 0, 0, 0, 0 }; 1075 1076 if (dirp == 0) { 1077 errno = ENOMEM; 1078 return 0; 1079 } 1080 1081 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1082 // as a directory in FindFirstFile(). We detect this case here and 1083 // prepend the current drive name. 1084 // 1085 if (dirname[1] == '\0' && dirname[0] == '\\') { 1086 alt_dirname[0] = _getdrive() + 'A' - 1; 1087 alt_dirname[1] = ':'; 1088 alt_dirname[2] = '\\'; 1089 alt_dirname[3] = '\0'; 1090 dirname = alt_dirname; 1091 } 1092 1093 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1094 if (dirp->path == 0) { 1095 free(dirp); 1096 errno = ENOMEM; 1097 return 0; 1098 } 1099 strcpy(dirp->path, dirname); 1100 1101 fattr = GetFileAttributes(dirp->path); 1102 if (fattr == 0xffffffff) { 1103 free(dirp->path); 1104 free(dirp); 1105 errno = ENOENT; 1106 return 0; 1107 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1108 free(dirp->path); 1109 free(dirp); 1110 errno = ENOTDIR; 1111 return 0; 1112 } 1113 1114 // Append "*.*", or possibly "\\*.*", to path 1115 if (dirp->path[1] == ':' && 1116 (dirp->path[2] == '\0' || 1117 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1118 // No '\\' needed for cases like "Z:" or "Z:\" 1119 strcat(dirp->path, "*.*"); 1120 } else { 1121 strcat(dirp->path, "\\*.*"); 1122 } 1123 1124 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1125 if (dirp->handle == INVALID_HANDLE_VALUE) { 1126 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1127 free(dirp->path); 1128 free(dirp); 1129 errno = EACCES; 1130 return 0; 1131 } 1132 } 1133 return dirp; 1134 } 1135 1136 // parameter dbuf unused on Windows 1137 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1138 assert(dirp != NULL, "just checking"); // hotspot change 1139 if (dirp->handle == INVALID_HANDLE_VALUE) { 1140 return 0; 1141 } 1142 1143 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1144 1145 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1146 if (GetLastError() == ERROR_INVALID_HANDLE) { 1147 errno = EBADF; 1148 return 0; 1149 } 1150 FindClose(dirp->handle); 1151 dirp->handle = INVALID_HANDLE_VALUE; 1152 } 1153 1154 return &dirp->dirent; 1155 } 1156 1157 int os::closedir(DIR *dirp) { 1158 assert(dirp != NULL, "just checking"); // hotspot change 1159 if (dirp->handle != INVALID_HANDLE_VALUE) { 1160 if (!FindClose(dirp->handle)) { 1161 errno = EBADF; 1162 return -1; 1163 } 1164 dirp->handle = INVALID_HANDLE_VALUE; 1165 } 1166 free(dirp->path); 1167 free(dirp); 1168 return 0; 1169 } 1170 1171 // This must be hard coded because it's the system's temporary 1172 // directory not the java application's temp directory, ala java.io.tmpdir. 1173 const char* os::get_temp_directory() { 1174 static char path_buf[MAX_PATH]; 1175 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1176 return path_buf; 1177 } else { 1178 path_buf[0] = '\0'; 1179 return path_buf; 1180 } 1181 } 1182 1183 // Needs to be in os specific directory because windows requires another 1184 // header file <direct.h> 1185 const char* os::get_current_directory(char *buf, size_t buflen) { 1186 int n = static_cast<int>(buflen); 1187 if (buflen > INT_MAX) n = INT_MAX; 1188 return _getcwd(buf, n); 1189 } 1190 1191 //----------------------------------------------------------- 1192 // Helper functions for fatal error handler 1193 #ifdef _WIN64 1194 // Helper routine which returns true if address in 1195 // within the NTDLL address space. 1196 // 1197 static bool _addr_in_ntdll(address addr) { 1198 HMODULE hmod; 1199 MODULEINFO minfo; 1200 1201 hmod = GetModuleHandle("NTDLL.DLL"); 1202 if (hmod == NULL) return false; 1203 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1204 &minfo, sizeof(MODULEINFO))) { 1205 return false; 1206 } 1207 1208 if ((addr >= minfo.lpBaseOfDll) && 1209 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1210 return true; 1211 } else { 1212 return false; 1213 } 1214 } 1215 #endif 1216 1217 struct _modinfo { 1218 address addr; 1219 char* full_path; // point to a char buffer 1220 int buflen; // size of the buffer 1221 address base_addr; 1222 }; 1223 1224 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1225 address top_address, void * param) { 1226 struct _modinfo *pmod = (struct _modinfo *)param; 1227 if (!pmod) return -1; 1228 1229 if (base_addr <= pmod->addr && 1230 top_address > pmod->addr) { 1231 // if a buffer is provided, copy path name to the buffer 1232 if (pmod->full_path) { 1233 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1234 } 1235 pmod->base_addr = base_addr; 1236 return 1; 1237 } 1238 return 0; 1239 } 1240 1241 bool os::dll_address_to_library_name(address addr, char* buf, 1242 int buflen, int* offset) { 1243 // buf is not optional, but offset is optional 1244 assert(buf != NULL, "sanity check"); 1245 1246 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1247 // return the full path to the DLL file, sometimes it returns path 1248 // to the corresponding PDB file (debug info); sometimes it only 1249 // returns partial path, which makes life painful. 1250 1251 struct _modinfo mi; 1252 mi.addr = addr; 1253 mi.full_path = buf; 1254 mi.buflen = buflen; 1255 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1256 // buf already contains path name 1257 if (offset) *offset = addr - mi.base_addr; 1258 return true; 1259 } 1260 1261 buf[0] = '\0'; 1262 if (offset) *offset = -1; 1263 return false; 1264 } 1265 1266 bool os::dll_address_to_function_name(address addr, char *buf, 1267 int buflen, int *offset, 1268 bool demangle) { 1269 // buf is not optional, but offset is optional 1270 assert(buf != NULL, "sanity check"); 1271 1272 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1273 return true; 1274 } 1275 if (offset != NULL) *offset = -1; 1276 buf[0] = '\0'; 1277 return false; 1278 } 1279 1280 // save the start and end address of jvm.dll into param[0] and param[1] 1281 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1282 address top_address, void * param) { 1283 if (!param) return -1; 1284 1285 if (base_addr <= (address)_locate_jvm_dll && 1286 top_address > (address)_locate_jvm_dll) { 1287 ((address*)param)[0] = base_addr; 1288 ((address*)param)[1] = top_address; 1289 return 1; 1290 } 1291 return 0; 1292 } 1293 1294 address vm_lib_location[2]; // start and end address of jvm.dll 1295 1296 // check if addr is inside jvm.dll 1297 bool os::address_is_in_vm(address addr) { 1298 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1299 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1300 assert(false, "Can't find jvm module."); 1301 return false; 1302 } 1303 } 1304 1305 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1306 } 1307 1308 // print module info; param is outputStream* 1309 static int _print_module(const char* fname, address base_address, 1310 address top_address, void* param) { 1311 if (!param) return -1; 1312 1313 outputStream* st = (outputStream*)param; 1314 1315 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1316 return 0; 1317 } 1318 1319 // Loads .dll/.so and 1320 // in case of error it checks if .dll/.so was built for the 1321 // same architecture as Hotspot is running on 1322 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1323 void * result = LoadLibrary(name); 1324 if (result != NULL) { 1325 if (InitializeDbgHelpEarly) { 1326 // Recalculate pdb search path if a DLL was loaded successfully. 1327 SymbolEngine::recalc_search_path(); 1328 } 1329 return result; 1330 } 1331 1332 DWORD errcode = GetLastError(); 1333 if (errcode == ERROR_MOD_NOT_FOUND) { 1334 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1335 ebuf[ebuflen - 1] = '\0'; 1336 return NULL; 1337 } 1338 1339 // Parsing dll below 1340 // If we can read dll-info and find that dll was built 1341 // for an architecture other than Hotspot is running in 1342 // - then print to buffer "DLL was built for a different architecture" 1343 // else call os::lasterror to obtain system error message 1344 1345 // Read system error message into ebuf 1346 // It may or may not be overwritten below (in the for loop and just above) 1347 lasterror(ebuf, (size_t) ebuflen); 1348 ebuf[ebuflen - 1] = '\0'; 1349 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1350 if (fd < 0) { 1351 return NULL; 1352 } 1353 1354 uint32_t signature_offset; 1355 uint16_t lib_arch = 0; 1356 bool failed_to_get_lib_arch = 1357 ( // Go to position 3c in the dll 1358 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1359 || 1360 // Read location of signature 1361 (sizeof(signature_offset) != 1362 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1363 || 1364 // Go to COFF File Header in dll 1365 // that is located after "signature" (4 bytes long) 1366 (os::seek_to_file_offset(fd, 1367 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1368 || 1369 // Read field that contains code of architecture 1370 // that dll was built for 1371 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1372 ); 1373 1374 ::close(fd); 1375 if (failed_to_get_lib_arch) { 1376 // file i/o error - report os::lasterror(...) msg 1377 return NULL; 1378 } 1379 1380 typedef struct { 1381 uint16_t arch_code; 1382 char* arch_name; 1383 } arch_t; 1384 1385 static const arch_t arch_array[] = { 1386 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1387 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1388 }; 1389 #if (defined _M_AMD64) 1390 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1391 #elif (defined _M_IX86) 1392 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1393 #else 1394 #error Method os::dll_load requires that one of following \ 1395 is defined :_M_AMD64 or _M_IX86 1396 #endif 1397 1398 1399 // Obtain a string for printf operation 1400 // lib_arch_str shall contain string what platform this .dll was built for 1401 // running_arch_str shall string contain what platform Hotspot was built for 1402 char *running_arch_str = NULL, *lib_arch_str = NULL; 1403 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1404 if (lib_arch == arch_array[i].arch_code) { 1405 lib_arch_str = arch_array[i].arch_name; 1406 } 1407 if (running_arch == arch_array[i].arch_code) { 1408 running_arch_str = arch_array[i].arch_name; 1409 } 1410 } 1411 1412 assert(running_arch_str, 1413 "Didn't find running architecture code in arch_array"); 1414 1415 // If the architecture is right 1416 // but some other error took place - report os::lasterror(...) msg 1417 if (lib_arch == running_arch) { 1418 return NULL; 1419 } 1420 1421 if (lib_arch_str != NULL) { 1422 ::_snprintf(ebuf, ebuflen - 1, 1423 "Can't load %s-bit .dll on a %s-bit platform", 1424 lib_arch_str, running_arch_str); 1425 } else { 1426 // don't know what architecture this dll was build for 1427 ::_snprintf(ebuf, ebuflen - 1, 1428 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1429 lib_arch, running_arch_str); 1430 } 1431 1432 return NULL; 1433 } 1434 1435 void os::print_dll_info(outputStream *st) { 1436 st->print_cr("Dynamic libraries:"); 1437 get_loaded_modules_info(_print_module, (void *)st); 1438 } 1439 1440 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1441 HANDLE hProcess; 1442 1443 # define MAX_NUM_MODULES 128 1444 HMODULE modules[MAX_NUM_MODULES]; 1445 static char filename[MAX_PATH]; 1446 int result = 0; 1447 1448 int pid = os::current_process_id(); 1449 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1450 FALSE, pid); 1451 if (hProcess == NULL) return 0; 1452 1453 DWORD size_needed; 1454 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1455 CloseHandle(hProcess); 1456 return 0; 1457 } 1458 1459 // number of modules that are currently loaded 1460 int num_modules = size_needed / sizeof(HMODULE); 1461 1462 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1463 // Get Full pathname: 1464 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1465 filename[0] = '\0'; 1466 } 1467 1468 MODULEINFO modinfo; 1469 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1470 modinfo.lpBaseOfDll = NULL; 1471 modinfo.SizeOfImage = 0; 1472 } 1473 1474 // Invoke callback function 1475 result = callback(filename, (address)modinfo.lpBaseOfDll, 1476 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1477 if (result) break; 1478 } 1479 1480 CloseHandle(hProcess); 1481 return result; 1482 } 1483 1484 bool os::get_host_name(char* buf, size_t buflen) { 1485 DWORD size = (DWORD)buflen; 1486 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1487 } 1488 1489 void os::get_summary_os_info(char* buf, size_t buflen) { 1490 stringStream sst(buf, buflen); 1491 os::win32::print_windows_version(&sst); 1492 // chop off newline character 1493 char* nl = strchr(buf, '\n'); 1494 if (nl != NULL) *nl = '\0'; 1495 } 1496 1497 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1498 int ret = vsnprintf(buf, len, fmt, args); 1499 // Get the correct buffer size if buf is too small 1500 if (ret < 0) { 1501 return _vscprintf(fmt, args); 1502 } 1503 return ret; 1504 } 1505 1506 static inline time_t get_mtime(const char* filename) { 1507 struct stat st; 1508 int ret = os::stat(filename, &st); 1509 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1510 return st.st_mtime; 1511 } 1512 1513 int os::compare_file_modified_times(const char* file1, const char* file2) { 1514 time_t t1 = get_mtime(file1); 1515 time_t t2 = get_mtime(file2); 1516 return t1 - t2; 1517 } 1518 1519 void os::print_os_info_brief(outputStream* st) { 1520 os::print_os_info(st); 1521 } 1522 1523 void os::print_os_info(outputStream* st) { 1524 #ifdef ASSERT 1525 char buffer[1024]; 1526 st->print("HostName: "); 1527 if (get_host_name(buffer, sizeof(buffer))) { 1528 st->print("%s ", buffer); 1529 } else { 1530 st->print("N/A "); 1531 } 1532 #endif 1533 st->print("OS:"); 1534 os::win32::print_windows_version(st); 1535 } 1536 1537 void os::win32::print_windows_version(outputStream* st) { 1538 OSVERSIONINFOEX osvi; 1539 VS_FIXEDFILEINFO *file_info; 1540 TCHAR kernel32_path[MAX_PATH]; 1541 UINT len, ret; 1542 1543 // Use the GetVersionEx information to see if we're on a server or 1544 // workstation edition of Windows. Starting with Windows 8.1 we can't 1545 // trust the OS version information returned by this API. 1546 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1547 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1548 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1549 st->print_cr("Call to GetVersionEx failed"); 1550 return; 1551 } 1552 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1553 1554 // Get the full path to \Windows\System32\kernel32.dll and use that for 1555 // determining what version of Windows we're running on. 1556 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1557 ret = GetSystemDirectory(kernel32_path, len); 1558 if (ret == 0 || ret > len) { 1559 st->print_cr("Call to GetSystemDirectory failed"); 1560 return; 1561 } 1562 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1563 1564 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1565 if (version_size == 0) { 1566 st->print_cr("Call to GetFileVersionInfoSize failed"); 1567 return; 1568 } 1569 1570 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1571 if (version_info == NULL) { 1572 st->print_cr("Failed to allocate version_info"); 1573 return; 1574 } 1575 1576 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1577 os::free(version_info); 1578 st->print_cr("Call to GetFileVersionInfo failed"); 1579 return; 1580 } 1581 1582 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1583 os::free(version_info); 1584 st->print_cr("Call to VerQueryValue failed"); 1585 return; 1586 } 1587 1588 int major_version = HIWORD(file_info->dwProductVersionMS); 1589 int minor_version = LOWORD(file_info->dwProductVersionMS); 1590 int build_number = HIWORD(file_info->dwProductVersionLS); 1591 int build_minor = LOWORD(file_info->dwProductVersionLS); 1592 int os_vers = major_version * 1000 + minor_version; 1593 os::free(version_info); 1594 1595 st->print(" Windows "); 1596 switch (os_vers) { 1597 1598 case 6000: 1599 if (is_workstation) { 1600 st->print("Vista"); 1601 } else { 1602 st->print("Server 2008"); 1603 } 1604 break; 1605 1606 case 6001: 1607 if (is_workstation) { 1608 st->print("7"); 1609 } else { 1610 st->print("Server 2008 R2"); 1611 } 1612 break; 1613 1614 case 6002: 1615 if (is_workstation) { 1616 st->print("8"); 1617 } else { 1618 st->print("Server 2012"); 1619 } 1620 break; 1621 1622 case 6003: 1623 if (is_workstation) { 1624 st->print("8.1"); 1625 } else { 1626 st->print("Server 2012 R2"); 1627 } 1628 break; 1629 1630 case 10000: 1631 if (is_workstation) { 1632 st->print("10"); 1633 } else { 1634 st->print("Server 2016"); 1635 } 1636 break; 1637 1638 default: 1639 // Unrecognized windows, print out its major and minor versions 1640 st->print("%d.%d", major_version, minor_version); 1641 break; 1642 } 1643 1644 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1645 // find out whether we are running on 64 bit processor or not 1646 SYSTEM_INFO si; 1647 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1648 GetNativeSystemInfo(&si); 1649 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1650 st->print(" , 64 bit"); 1651 } 1652 1653 st->print(" Build %d", build_number); 1654 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1655 st->cr(); 1656 } 1657 1658 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1659 // Nothing to do for now. 1660 } 1661 1662 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1663 HKEY key; 1664 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1665 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1666 if (status == ERROR_SUCCESS) { 1667 DWORD size = (DWORD)buflen; 1668 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1669 if (status != ERROR_SUCCESS) { 1670 strncpy(buf, "## __CPU__", buflen); 1671 } 1672 RegCloseKey(key); 1673 } else { 1674 // Put generic cpu info to return 1675 strncpy(buf, "## __CPU__", buflen); 1676 } 1677 } 1678 1679 void os::print_memory_info(outputStream* st) { 1680 st->print("Memory:"); 1681 st->print(" %dk page", os::vm_page_size()>>10); 1682 1683 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1684 // value if total memory is larger than 4GB 1685 MEMORYSTATUSEX ms; 1686 ms.dwLength = sizeof(ms); 1687 GlobalMemoryStatusEx(&ms); 1688 1689 st->print(", physical %uk", os::physical_memory() >> 10); 1690 st->print("(%uk free)", os::available_memory() >> 10); 1691 1692 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1693 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1694 st->cr(); 1695 } 1696 1697 void os::print_siginfo(outputStream *st, const void* siginfo) { 1698 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1699 st->print("siginfo:"); 1700 1701 char tmp[64]; 1702 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1703 strcpy(tmp, "EXCEPTION_??"); 1704 } 1705 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1706 1707 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1708 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1709 er->NumberParameters >= 2) { 1710 switch (er->ExceptionInformation[0]) { 1711 case 0: st->print(", reading address"); break; 1712 case 1: st->print(", writing address"); break; 1713 case 8: st->print(", data execution prevention violation at address"); break; 1714 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1715 er->ExceptionInformation[0]); 1716 } 1717 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1718 } else { 1719 int num = er->NumberParameters; 1720 if (num > 0) { 1721 st->print(", ExceptionInformation="); 1722 for (int i = 0; i < num; i++) { 1723 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1724 } 1725 } 1726 } 1727 st->cr(); 1728 } 1729 1730 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1731 // do nothing 1732 } 1733 1734 static char saved_jvm_path[MAX_PATH] = {0}; 1735 1736 // Find the full path to the current module, jvm.dll 1737 void os::jvm_path(char *buf, jint buflen) { 1738 // Error checking. 1739 if (buflen < MAX_PATH) { 1740 assert(false, "must use a large-enough buffer"); 1741 buf[0] = '\0'; 1742 return; 1743 } 1744 // Lazy resolve the path to current module. 1745 if (saved_jvm_path[0] != 0) { 1746 strcpy(buf, saved_jvm_path); 1747 return; 1748 } 1749 1750 buf[0] = '\0'; 1751 if (Arguments::sun_java_launcher_is_altjvm()) { 1752 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1753 // for a JAVA_HOME environment variable and fix up the path so it 1754 // looks like jvm.dll is installed there (append a fake suffix 1755 // hotspot/jvm.dll). 1756 char* java_home_var = ::getenv("JAVA_HOME"); 1757 if (java_home_var != NULL && java_home_var[0] != 0 && 1758 strlen(java_home_var) < (size_t)buflen) { 1759 strncpy(buf, java_home_var, buflen); 1760 1761 // determine if this is a legacy image or modules image 1762 // modules image doesn't have "jre" subdirectory 1763 size_t len = strlen(buf); 1764 char* jrebin_p = buf + len; 1765 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1766 if (0 != _access(buf, 0)) { 1767 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1768 } 1769 len = strlen(buf); 1770 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1771 } 1772 } 1773 1774 if (buf[0] == '\0') { 1775 GetModuleFileName(vm_lib_handle, buf, buflen); 1776 } 1777 strncpy(saved_jvm_path, buf, MAX_PATH); 1778 saved_jvm_path[MAX_PATH - 1] = '\0'; 1779 } 1780 1781 1782 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1783 #ifndef _WIN64 1784 st->print("_"); 1785 #endif 1786 } 1787 1788 1789 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1790 #ifndef _WIN64 1791 st->print("@%d", args_size * sizeof(int)); 1792 #endif 1793 } 1794 1795 // This method is a copy of JDK's sysGetLastErrorString 1796 // from src/windows/hpi/src/system_md.c 1797 1798 size_t os::lasterror(char* buf, size_t len) { 1799 DWORD errval; 1800 1801 if ((errval = GetLastError()) != 0) { 1802 // DOS error 1803 size_t n = (size_t)FormatMessage( 1804 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1805 NULL, 1806 errval, 1807 0, 1808 buf, 1809 (DWORD)len, 1810 NULL); 1811 if (n > 3) { 1812 // Drop final '.', CR, LF 1813 if (buf[n - 1] == '\n') n--; 1814 if (buf[n - 1] == '\r') n--; 1815 if (buf[n - 1] == '.') n--; 1816 buf[n] = '\0'; 1817 } 1818 return n; 1819 } 1820 1821 if (errno != 0) { 1822 // C runtime error that has no corresponding DOS error code 1823 const char* s = os::strerror(errno); 1824 size_t n = strlen(s); 1825 if (n >= len) n = len - 1; 1826 strncpy(buf, s, n); 1827 buf[n] = '\0'; 1828 return n; 1829 } 1830 1831 return 0; 1832 } 1833 1834 int os::get_last_error() { 1835 DWORD error = GetLastError(); 1836 if (error == 0) { 1837 error = errno; 1838 } 1839 return (int)error; 1840 } 1841 1842 WindowsSemaphore::WindowsSemaphore(uint value) { 1843 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1844 1845 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1846 } 1847 1848 WindowsSemaphore::~WindowsSemaphore() { 1849 ::CloseHandle(_semaphore); 1850 } 1851 1852 void WindowsSemaphore::signal(uint count) { 1853 if (count > 0) { 1854 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1855 1856 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1857 } 1858 } 1859 1860 void WindowsSemaphore::wait() { 1861 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1862 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1863 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1864 } 1865 1866 bool WindowsSemaphore::trywait() { 1867 DWORD ret = ::WaitForSingleObject(_semaphore, 0); 1868 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1869 return ret == WAIT_OBJECT_0; 1870 } 1871 1872 // sun.misc.Signal 1873 // NOTE that this is a workaround for an apparent kernel bug where if 1874 // a signal handler for SIGBREAK is installed then that signal handler 1875 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1876 // See bug 4416763. 1877 static void (*sigbreakHandler)(int) = NULL; 1878 1879 static void UserHandler(int sig, void *siginfo, void *context) { 1880 os::signal_notify(sig); 1881 // We need to reinstate the signal handler each time... 1882 os::signal(sig, (void*)UserHandler); 1883 } 1884 1885 void* os::user_handler() { 1886 return (void*) UserHandler; 1887 } 1888 1889 void* os::signal(int signal_number, void* handler) { 1890 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1891 void (*oldHandler)(int) = sigbreakHandler; 1892 sigbreakHandler = (void (*)(int)) handler; 1893 return (void*) oldHandler; 1894 } else { 1895 return (void*)::signal(signal_number, (void (*)(int))handler); 1896 } 1897 } 1898 1899 void os::signal_raise(int signal_number) { 1900 raise(signal_number); 1901 } 1902 1903 // The Win32 C runtime library maps all console control events other than ^C 1904 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1905 // logoff, and shutdown events. We therefore install our own console handler 1906 // that raises SIGTERM for the latter cases. 1907 // 1908 static BOOL WINAPI consoleHandler(DWORD event) { 1909 switch (event) { 1910 case CTRL_C_EVENT: 1911 if (VMError::is_error_reported()) { 1912 // Ctrl-C is pressed during error reporting, likely because the error 1913 // handler fails to abort. Let VM die immediately. 1914 os::die(); 1915 } 1916 1917 os::signal_raise(SIGINT); 1918 return TRUE; 1919 break; 1920 case CTRL_BREAK_EVENT: 1921 if (sigbreakHandler != NULL) { 1922 (*sigbreakHandler)(SIGBREAK); 1923 } 1924 return TRUE; 1925 break; 1926 case CTRL_LOGOFF_EVENT: { 1927 // Don't terminate JVM if it is running in a non-interactive session, 1928 // such as a service process. 1929 USEROBJECTFLAGS flags; 1930 HANDLE handle = GetProcessWindowStation(); 1931 if (handle != NULL && 1932 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1933 sizeof(USEROBJECTFLAGS), NULL)) { 1934 // If it is a non-interactive session, let next handler to deal 1935 // with it. 1936 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1937 return FALSE; 1938 } 1939 } 1940 } 1941 case CTRL_CLOSE_EVENT: 1942 case CTRL_SHUTDOWN_EVENT: 1943 os::signal_raise(SIGTERM); 1944 return TRUE; 1945 break; 1946 default: 1947 break; 1948 } 1949 return FALSE; 1950 } 1951 1952 // The following code is moved from os.cpp for making this 1953 // code platform specific, which it is by its very nature. 1954 1955 // Return maximum OS signal used + 1 for internal use only 1956 // Used as exit signal for signal_thread 1957 int os::sigexitnum_pd() { 1958 return NSIG; 1959 } 1960 1961 // a counter for each possible signal value, including signal_thread exit signal 1962 static volatile jint pending_signals[NSIG+1] = { 0 }; 1963 static HANDLE sig_sem = NULL; 1964 1965 void os::signal_init_pd() { 1966 // Initialize signal structures 1967 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1968 1969 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1970 1971 // Programs embedding the VM do not want it to attempt to receive 1972 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1973 // shutdown hooks mechanism introduced in 1.3. For example, when 1974 // the VM is run as part of a Windows NT service (i.e., a servlet 1975 // engine in a web server), the correct behavior is for any console 1976 // control handler to return FALSE, not TRUE, because the OS's 1977 // "final" handler for such events allows the process to continue if 1978 // it is a service (while terminating it if it is not a service). 1979 // To make this behavior uniform and the mechanism simpler, we 1980 // completely disable the VM's usage of these console events if -Xrs 1981 // (=ReduceSignalUsage) is specified. This means, for example, that 1982 // the CTRL-BREAK thread dump mechanism is also disabled in this 1983 // case. See bugs 4323062, 4345157, and related bugs. 1984 1985 if (!ReduceSignalUsage) { 1986 // Add a CTRL-C handler 1987 SetConsoleCtrlHandler(consoleHandler, TRUE); 1988 } 1989 } 1990 1991 void os::signal_notify(int signal_number) { 1992 BOOL ret; 1993 if (sig_sem != NULL) { 1994 Atomic::inc(&pending_signals[signal_number]); 1995 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1996 assert(ret != 0, "ReleaseSemaphore() failed"); 1997 } 1998 } 1999 2000 static int check_pending_signals(bool wait_for_signal) { 2001 DWORD ret; 2002 while (true) { 2003 for (int i = 0; i < NSIG + 1; i++) { 2004 jint n = pending_signals[i]; 2005 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2006 return i; 2007 } 2008 } 2009 if (!wait_for_signal) { 2010 return -1; 2011 } 2012 2013 JavaThread *thread = JavaThread::current(); 2014 2015 ThreadBlockInVM tbivm(thread); 2016 2017 bool threadIsSuspended; 2018 do { 2019 thread->set_suspend_equivalent(); 2020 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2021 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2022 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2023 2024 // were we externally suspended while we were waiting? 2025 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2026 if (threadIsSuspended) { 2027 // The semaphore has been incremented, but while we were waiting 2028 // another thread suspended us. We don't want to continue running 2029 // while suspended because that would surprise the thread that 2030 // suspended us. 2031 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2032 assert(ret != 0, "ReleaseSemaphore() failed"); 2033 2034 thread->java_suspend_self(); 2035 } 2036 } while (threadIsSuspended); 2037 } 2038 } 2039 2040 int os::signal_lookup() { 2041 return check_pending_signals(false); 2042 } 2043 2044 int os::signal_wait() { 2045 return check_pending_signals(true); 2046 } 2047 2048 // Implicit OS exception handling 2049 2050 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2051 address handler) { 2052 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2053 // Save pc in thread 2054 #ifdef _M_AMD64 2055 // Do not blow up if no thread info available. 2056 if (thread) { 2057 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2058 } 2059 // Set pc to handler 2060 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2061 #else 2062 // Do not blow up if no thread info available. 2063 if (thread) { 2064 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2065 } 2066 // Set pc to handler 2067 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2068 #endif 2069 2070 // Continue the execution 2071 return EXCEPTION_CONTINUE_EXECUTION; 2072 } 2073 2074 2075 // Used for PostMortemDump 2076 extern "C" void safepoints(); 2077 extern "C" void find(int x); 2078 extern "C" void events(); 2079 2080 // According to Windows API documentation, an illegal instruction sequence should generate 2081 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2082 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2083 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2084 2085 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2086 2087 // From "Execution Protection in the Windows Operating System" draft 0.35 2088 // Once a system header becomes available, the "real" define should be 2089 // included or copied here. 2090 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2091 2092 // Windows Vista/2008 heap corruption check 2093 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2094 2095 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2096 // C++ compiler contain this error code. Because this is a compiler-generated 2097 // error, the code is not listed in the Win32 API header files. 2098 // The code is actually a cryptic mnemonic device, with the initial "E" 2099 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2100 // ASCII values of "msc". 2101 2102 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2103 2104 #define def_excpt(val) { #val, (val) } 2105 2106 static const struct { char* name; uint number; } exceptlabels[] = { 2107 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2108 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2109 def_excpt(EXCEPTION_BREAKPOINT), 2110 def_excpt(EXCEPTION_SINGLE_STEP), 2111 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2112 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2113 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2114 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2115 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2116 def_excpt(EXCEPTION_FLT_OVERFLOW), 2117 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2118 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2119 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2120 def_excpt(EXCEPTION_INT_OVERFLOW), 2121 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2122 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2123 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2124 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2125 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2126 def_excpt(EXCEPTION_STACK_OVERFLOW), 2127 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2128 def_excpt(EXCEPTION_GUARD_PAGE), 2129 def_excpt(EXCEPTION_INVALID_HANDLE), 2130 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2131 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2132 }; 2133 2134 #undef def_excpt 2135 2136 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2137 uint code = static_cast<uint>(exception_code); 2138 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2139 if (exceptlabels[i].number == code) { 2140 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2141 return buf; 2142 } 2143 } 2144 2145 return NULL; 2146 } 2147 2148 //----------------------------------------------------------------------------- 2149 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2150 // handle exception caused by idiv; should only happen for -MinInt/-1 2151 // (division by zero is handled explicitly) 2152 #ifdef _M_AMD64 2153 PCONTEXT ctx = exceptionInfo->ContextRecord; 2154 address pc = (address)ctx->Rip; 2155 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2156 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2157 if (pc[0] == 0xF7) { 2158 // set correct result values and continue after idiv instruction 2159 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2160 } else { 2161 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2162 } 2163 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2164 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2165 // idiv opcode (0xF7). 2166 ctx->Rdx = (DWORD)0; // remainder 2167 // Continue the execution 2168 #else 2169 PCONTEXT ctx = exceptionInfo->ContextRecord; 2170 address pc = (address)ctx->Eip; 2171 assert(pc[0] == 0xF7, "not an idiv opcode"); 2172 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2173 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2174 // set correct result values and continue after idiv instruction 2175 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2176 ctx->Eax = (DWORD)min_jint; // result 2177 ctx->Edx = (DWORD)0; // remainder 2178 // Continue the execution 2179 #endif 2180 return EXCEPTION_CONTINUE_EXECUTION; 2181 } 2182 2183 //----------------------------------------------------------------------------- 2184 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2185 PCONTEXT ctx = exceptionInfo->ContextRecord; 2186 #ifndef _WIN64 2187 // handle exception caused by native method modifying control word 2188 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2189 2190 switch (exception_code) { 2191 case EXCEPTION_FLT_DENORMAL_OPERAND: 2192 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2193 case EXCEPTION_FLT_INEXACT_RESULT: 2194 case EXCEPTION_FLT_INVALID_OPERATION: 2195 case EXCEPTION_FLT_OVERFLOW: 2196 case EXCEPTION_FLT_STACK_CHECK: 2197 case EXCEPTION_FLT_UNDERFLOW: 2198 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2199 if (fp_control_word != ctx->FloatSave.ControlWord) { 2200 // Restore FPCW and mask out FLT exceptions 2201 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2202 // Mask out pending FLT exceptions 2203 ctx->FloatSave.StatusWord &= 0xffffff00; 2204 return EXCEPTION_CONTINUE_EXECUTION; 2205 } 2206 } 2207 2208 if (prev_uef_handler != NULL) { 2209 // We didn't handle this exception so pass it to the previous 2210 // UnhandledExceptionFilter. 2211 return (prev_uef_handler)(exceptionInfo); 2212 } 2213 #else // !_WIN64 2214 // On Windows, the mxcsr control bits are non-volatile across calls 2215 // See also CR 6192333 2216 // 2217 jint MxCsr = INITIAL_MXCSR; 2218 // we can't use StubRoutines::addr_mxcsr_std() 2219 // because in Win64 mxcsr is not saved there 2220 if (MxCsr != ctx->MxCsr) { 2221 ctx->MxCsr = MxCsr; 2222 return EXCEPTION_CONTINUE_EXECUTION; 2223 } 2224 #endif // !_WIN64 2225 2226 return EXCEPTION_CONTINUE_SEARCH; 2227 } 2228 2229 static inline void report_error(Thread* t, DWORD exception_code, 2230 address addr, void* siginfo, void* context) { 2231 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2232 2233 // If UseOsErrorReporting, this will return here and save the error file 2234 // somewhere where we can find it in the minidump. 2235 } 2236 2237 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2238 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2239 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2240 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2241 if (Interpreter::contains(pc)) { 2242 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2243 if (!fr->is_first_java_frame()) { 2244 // get_frame_at_stack_banging_point() is only called when we 2245 // have well defined stacks so java_sender() calls do not need 2246 // to assert safe_for_sender() first. 2247 *fr = fr->java_sender(); 2248 } 2249 } else { 2250 // more complex code with compiled code 2251 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2252 CodeBlob* cb = CodeCache::find_blob(pc); 2253 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2254 // Not sure where the pc points to, fallback to default 2255 // stack overflow handling 2256 return false; 2257 } else { 2258 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2259 // in compiled code, the stack banging is performed just after the return pc 2260 // has been pushed on the stack 2261 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2262 if (!fr->is_java_frame()) { 2263 // See java_sender() comment above. 2264 *fr = fr->java_sender(); 2265 } 2266 } 2267 } 2268 assert(fr->is_java_frame(), "Safety check"); 2269 return true; 2270 } 2271 2272 //----------------------------------------------------------------------------- 2273 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2274 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2275 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2276 #ifdef _M_AMD64 2277 address pc = (address) exceptionInfo->ContextRecord->Rip; 2278 #else 2279 address pc = (address) exceptionInfo->ContextRecord->Eip; 2280 #endif 2281 Thread* t = Thread::current_or_null_safe(); 2282 2283 // Handle SafeFetch32 and SafeFetchN exceptions. 2284 if (StubRoutines::is_safefetch_fault(pc)) { 2285 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2286 } 2287 2288 #ifndef _WIN64 2289 // Execution protection violation - win32 running on AMD64 only 2290 // Handled first to avoid misdiagnosis as a "normal" access violation; 2291 // This is safe to do because we have a new/unique ExceptionInformation 2292 // code for this condition. 2293 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2294 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2295 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2296 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2297 2298 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2299 int page_size = os::vm_page_size(); 2300 2301 // Make sure the pc and the faulting address are sane. 2302 // 2303 // If an instruction spans a page boundary, and the page containing 2304 // the beginning of the instruction is executable but the following 2305 // page is not, the pc and the faulting address might be slightly 2306 // different - we still want to unguard the 2nd page in this case. 2307 // 2308 // 15 bytes seems to be a (very) safe value for max instruction size. 2309 bool pc_is_near_addr = 2310 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2311 bool instr_spans_page_boundary = 2312 (align_down((intptr_t) pc ^ (intptr_t) addr, 2313 (intptr_t) page_size) > 0); 2314 2315 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2316 static volatile address last_addr = 2317 (address) os::non_memory_address_word(); 2318 2319 // In conservative mode, don't unguard unless the address is in the VM 2320 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2321 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2322 2323 // Set memory to RWX and retry 2324 address page_start = align_down(addr, page_size); 2325 bool res = os::protect_memory((char*) page_start, page_size, 2326 os::MEM_PROT_RWX); 2327 2328 log_debug(os)("Execution protection violation " 2329 "at " INTPTR_FORMAT 2330 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2331 p2i(page_start), (res ? "success" : os::strerror(errno))); 2332 2333 // Set last_addr so if we fault again at the same address, we don't 2334 // end up in an endless loop. 2335 // 2336 // There are two potential complications here. Two threads trapping 2337 // at the same address at the same time could cause one of the 2338 // threads to think it already unguarded, and abort the VM. Likely 2339 // very rare. 2340 // 2341 // The other race involves two threads alternately trapping at 2342 // different addresses and failing to unguard the page, resulting in 2343 // an endless loop. This condition is probably even more unlikely 2344 // than the first. 2345 // 2346 // Although both cases could be avoided by using locks or thread 2347 // local last_addr, these solutions are unnecessary complication: 2348 // this handler is a best-effort safety net, not a complete solution. 2349 // It is disabled by default and should only be used as a workaround 2350 // in case we missed any no-execute-unsafe VM code. 2351 2352 last_addr = addr; 2353 2354 return EXCEPTION_CONTINUE_EXECUTION; 2355 } 2356 } 2357 2358 // Last unguard failed or not unguarding 2359 tty->print_raw_cr("Execution protection violation"); 2360 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2361 exceptionInfo->ContextRecord); 2362 return EXCEPTION_CONTINUE_SEARCH; 2363 } 2364 } 2365 #endif // _WIN64 2366 2367 // Check to see if we caught the safepoint code in the 2368 // process of write protecting the memory serialization page. 2369 // It write enables the page immediately after protecting it 2370 // so just return. 2371 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2372 if (t != NULL && t->is_Java_thread()) { 2373 JavaThread* thread = (JavaThread*) t; 2374 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2375 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2376 if (os::is_memory_serialize_page(thread, addr)) { 2377 // Block current thread until the memory serialize page permission restored. 2378 os::block_on_serialize_page_trap(); 2379 return EXCEPTION_CONTINUE_EXECUTION; 2380 } 2381 } 2382 } 2383 2384 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2385 VM_Version::is_cpuinfo_segv_addr(pc)) { 2386 // Verify that OS save/restore AVX registers. 2387 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2388 } 2389 2390 if (t != NULL && t->is_Java_thread()) { 2391 JavaThread* thread = (JavaThread*) t; 2392 bool in_java = thread->thread_state() == _thread_in_Java; 2393 2394 // Handle potential stack overflows up front. 2395 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2396 if (thread->stack_guards_enabled()) { 2397 if (in_java) { 2398 frame fr; 2399 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2400 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2401 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2402 assert(fr.is_java_frame(), "Must be a Java frame"); 2403 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2404 } 2405 } 2406 // Yellow zone violation. The o/s has unprotected the first yellow 2407 // zone page for us. Note: must call disable_stack_yellow_zone to 2408 // update the enabled status, even if the zone contains only one page. 2409 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2410 thread->disable_stack_yellow_reserved_zone(); 2411 // If not in java code, return and hope for the best. 2412 return in_java 2413 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2414 : EXCEPTION_CONTINUE_EXECUTION; 2415 } else { 2416 // Fatal red zone violation. 2417 thread->disable_stack_red_zone(); 2418 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2419 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2420 exceptionInfo->ContextRecord); 2421 return EXCEPTION_CONTINUE_SEARCH; 2422 } 2423 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2424 // Either stack overflow or null pointer exception. 2425 if (in_java) { 2426 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2427 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2428 address stack_end = thread->stack_end(); 2429 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2430 // Stack overflow. 2431 assert(!os::uses_stack_guard_pages(), 2432 "should be caught by red zone code above."); 2433 return Handle_Exception(exceptionInfo, 2434 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2435 } 2436 // Check for safepoint polling and implicit null 2437 // We only expect null pointers in the stubs (vtable) 2438 // the rest are checked explicitly now. 2439 CodeBlob* cb = CodeCache::find_blob(pc); 2440 if (cb != NULL) { 2441 if (os::is_poll_address(addr)) { 2442 address stub = SharedRuntime::get_poll_stub(pc); 2443 return Handle_Exception(exceptionInfo, stub); 2444 } 2445 } 2446 { 2447 #ifdef _WIN64 2448 // If it's a legal stack address map the entire region in 2449 // 2450 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2451 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2452 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2453 addr = (address)((uintptr_t)addr & 2454 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2455 os::commit_memory((char *)addr, thread->stack_base() - addr, 2456 !ExecMem); 2457 return EXCEPTION_CONTINUE_EXECUTION; 2458 } else 2459 #endif 2460 { 2461 // Null pointer exception. 2462 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2463 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2464 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2465 } 2466 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2467 exceptionInfo->ContextRecord); 2468 return EXCEPTION_CONTINUE_SEARCH; 2469 } 2470 } 2471 } 2472 2473 #ifdef _WIN64 2474 // Special care for fast JNI field accessors. 2475 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2476 // in and the heap gets shrunk before the field access. 2477 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2478 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2479 if (addr != (address)-1) { 2480 return Handle_Exception(exceptionInfo, addr); 2481 } 2482 } 2483 #endif 2484 2485 // Stack overflow or null pointer exception in native code. 2486 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2487 exceptionInfo->ContextRecord); 2488 return EXCEPTION_CONTINUE_SEARCH; 2489 } // /EXCEPTION_ACCESS_VIOLATION 2490 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2491 2492 if (in_java) { 2493 switch (exception_code) { 2494 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2495 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2496 2497 case EXCEPTION_INT_OVERFLOW: 2498 return Handle_IDiv_Exception(exceptionInfo); 2499 2500 } // switch 2501 } 2502 if (((thread->thread_state() == _thread_in_Java) || 2503 (thread->thread_state() == _thread_in_native)) && 2504 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2505 LONG result=Handle_FLT_Exception(exceptionInfo); 2506 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2507 } 2508 } 2509 2510 if (exception_code != EXCEPTION_BREAKPOINT) { 2511 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2512 exceptionInfo->ContextRecord); 2513 } 2514 return EXCEPTION_CONTINUE_SEARCH; 2515 } 2516 2517 #ifndef _WIN64 2518 // Special care for fast JNI accessors. 2519 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2520 // the heap gets shrunk before the field access. 2521 // Need to install our own structured exception handler since native code may 2522 // install its own. 2523 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2524 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2525 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2526 address pc = (address) exceptionInfo->ContextRecord->Eip; 2527 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2528 if (addr != (address)-1) { 2529 return Handle_Exception(exceptionInfo, addr); 2530 } 2531 } 2532 return EXCEPTION_CONTINUE_SEARCH; 2533 } 2534 2535 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2536 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2537 jobject obj, \ 2538 jfieldID fieldID) { \ 2539 __try { \ 2540 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2541 obj, \ 2542 fieldID); \ 2543 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2544 _exception_info())) { \ 2545 } \ 2546 return 0; \ 2547 } 2548 2549 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2550 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2551 DEFINE_FAST_GETFIELD(jchar, char, Char) 2552 DEFINE_FAST_GETFIELD(jshort, short, Short) 2553 DEFINE_FAST_GETFIELD(jint, int, Int) 2554 DEFINE_FAST_GETFIELD(jlong, long, Long) 2555 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2556 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2557 2558 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2559 switch (type) { 2560 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2561 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2562 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2563 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2564 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2565 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2566 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2567 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2568 default: ShouldNotReachHere(); 2569 } 2570 return (address)-1; 2571 } 2572 #endif 2573 2574 // Virtual Memory 2575 2576 int os::vm_page_size() { return os::win32::vm_page_size(); } 2577 int os::vm_allocation_granularity() { 2578 return os::win32::vm_allocation_granularity(); 2579 } 2580 2581 // Windows large page support is available on Windows 2003. In order to use 2582 // large page memory, the administrator must first assign additional privilege 2583 // to the user: 2584 // + select Control Panel -> Administrative Tools -> Local Security Policy 2585 // + select Local Policies -> User Rights Assignment 2586 // + double click "Lock pages in memory", add users and/or groups 2587 // + reboot 2588 // Note the above steps are needed for administrator as well, as administrators 2589 // by default do not have the privilege to lock pages in memory. 2590 // 2591 // Note about Windows 2003: although the API supports committing large page 2592 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2593 // scenario, I found through experiment it only uses large page if the entire 2594 // memory region is reserved and committed in a single VirtualAlloc() call. 2595 // This makes Windows large page support more or less like Solaris ISM, in 2596 // that the entire heap must be committed upfront. This probably will change 2597 // in the future, if so the code below needs to be revisited. 2598 2599 #ifndef MEM_LARGE_PAGES 2600 #define MEM_LARGE_PAGES 0x20000000 2601 #endif 2602 2603 static HANDLE _hProcess; 2604 static HANDLE _hToken; 2605 2606 // Container for NUMA node list info 2607 class NUMANodeListHolder { 2608 private: 2609 int *_numa_used_node_list; // allocated below 2610 int _numa_used_node_count; 2611 2612 void free_node_list() { 2613 if (_numa_used_node_list != NULL) { 2614 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2615 } 2616 } 2617 2618 public: 2619 NUMANodeListHolder() { 2620 _numa_used_node_count = 0; 2621 _numa_used_node_list = NULL; 2622 // do rest of initialization in build routine (after function pointers are set up) 2623 } 2624 2625 ~NUMANodeListHolder() { 2626 free_node_list(); 2627 } 2628 2629 bool build() { 2630 DWORD_PTR proc_aff_mask; 2631 DWORD_PTR sys_aff_mask; 2632 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2633 ULONG highest_node_number; 2634 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2635 free_node_list(); 2636 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2637 for (unsigned int i = 0; i <= highest_node_number; i++) { 2638 ULONGLONG proc_mask_numa_node; 2639 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2640 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2641 _numa_used_node_list[_numa_used_node_count++] = i; 2642 } 2643 } 2644 return (_numa_used_node_count > 1); 2645 } 2646 2647 int get_count() { return _numa_used_node_count; } 2648 int get_node_list_entry(int n) { 2649 // for indexes out of range, returns -1 2650 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2651 } 2652 2653 } numa_node_list_holder; 2654 2655 2656 2657 static size_t _large_page_size = 0; 2658 2659 static bool request_lock_memory_privilege() { 2660 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2661 os::current_process_id()); 2662 2663 LUID luid; 2664 if (_hProcess != NULL && 2665 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2666 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2667 2668 TOKEN_PRIVILEGES tp; 2669 tp.PrivilegeCount = 1; 2670 tp.Privileges[0].Luid = luid; 2671 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2672 2673 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2674 // privilege. Check GetLastError() too. See MSDN document. 2675 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2676 (GetLastError() == ERROR_SUCCESS)) { 2677 return true; 2678 } 2679 } 2680 2681 return false; 2682 } 2683 2684 static void cleanup_after_large_page_init() { 2685 if (_hProcess) CloseHandle(_hProcess); 2686 _hProcess = NULL; 2687 if (_hToken) CloseHandle(_hToken); 2688 _hToken = NULL; 2689 } 2690 2691 static bool numa_interleaving_init() { 2692 bool success = false; 2693 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2694 2695 // print a warning if UseNUMAInterleaving flag is specified on command line 2696 bool warn_on_failure = use_numa_interleaving_specified; 2697 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2698 2699 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2700 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2701 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2702 2703 if (numa_node_list_holder.build()) { 2704 if (log_is_enabled(Debug, os, cpu)) { 2705 Log(os, cpu) log; 2706 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2707 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2708 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2709 } 2710 } 2711 success = true; 2712 } else { 2713 WARN("Process does not cover multiple NUMA nodes."); 2714 } 2715 if (!success) { 2716 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2717 } 2718 return success; 2719 #undef WARN 2720 } 2721 2722 // this routine is used whenever we need to reserve a contiguous VA range 2723 // but we need to make separate VirtualAlloc calls for each piece of the range 2724 // Reasons for doing this: 2725 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2726 // * UseNUMAInterleaving requires a separate node for each piece 2727 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2728 DWORD prot, 2729 bool should_inject_error = false) { 2730 char * p_buf; 2731 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2732 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2733 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2734 2735 // first reserve enough address space in advance since we want to be 2736 // able to break a single contiguous virtual address range into multiple 2737 // large page commits but WS2003 does not allow reserving large page space 2738 // so we just use 4K pages for reserve, this gives us a legal contiguous 2739 // address space. then we will deallocate that reservation, and re alloc 2740 // using large pages 2741 const size_t size_of_reserve = bytes + chunk_size; 2742 if (bytes > size_of_reserve) { 2743 // Overflowed. 2744 return NULL; 2745 } 2746 p_buf = (char *) VirtualAlloc(addr, 2747 size_of_reserve, // size of Reserve 2748 MEM_RESERVE, 2749 PAGE_READWRITE); 2750 // If reservation failed, return NULL 2751 if (p_buf == NULL) return NULL; 2752 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2753 os::release_memory(p_buf, bytes + chunk_size); 2754 2755 // we still need to round up to a page boundary (in case we are using large pages) 2756 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2757 // instead we handle this in the bytes_to_rq computation below 2758 p_buf = align_up(p_buf, page_size); 2759 2760 // now go through and allocate one chunk at a time until all bytes are 2761 // allocated 2762 size_t bytes_remaining = bytes; 2763 // An overflow of align_up() would have been caught above 2764 // in the calculation of size_of_reserve. 2765 char * next_alloc_addr = p_buf; 2766 HANDLE hProc = GetCurrentProcess(); 2767 2768 #ifdef ASSERT 2769 // Variable for the failure injection 2770 int ran_num = os::random(); 2771 size_t fail_after = ran_num % bytes; 2772 #endif 2773 2774 int count=0; 2775 while (bytes_remaining) { 2776 // select bytes_to_rq to get to the next chunk_size boundary 2777 2778 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2779 // Note allocate and commit 2780 char * p_new; 2781 2782 #ifdef ASSERT 2783 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2784 #else 2785 const bool inject_error_now = false; 2786 #endif 2787 2788 if (inject_error_now) { 2789 p_new = NULL; 2790 } else { 2791 if (!UseNUMAInterleaving) { 2792 p_new = (char *) VirtualAlloc(next_alloc_addr, 2793 bytes_to_rq, 2794 flags, 2795 prot); 2796 } else { 2797 // get the next node to use from the used_node_list 2798 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2799 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2800 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2801 } 2802 } 2803 2804 if (p_new == NULL) { 2805 // Free any allocated pages 2806 if (next_alloc_addr > p_buf) { 2807 // Some memory was committed so release it. 2808 size_t bytes_to_release = bytes - bytes_remaining; 2809 // NMT has yet to record any individual blocks, so it 2810 // need to create a dummy 'reserve' record to match 2811 // the release. 2812 MemTracker::record_virtual_memory_reserve((address)p_buf, 2813 bytes_to_release, CALLER_PC); 2814 os::release_memory(p_buf, bytes_to_release); 2815 } 2816 #ifdef ASSERT 2817 if (should_inject_error) { 2818 log_develop_debug(pagesize)("Reserving pages individually failed."); 2819 } 2820 #endif 2821 return NULL; 2822 } 2823 2824 bytes_remaining -= bytes_to_rq; 2825 next_alloc_addr += bytes_to_rq; 2826 count++; 2827 } 2828 // Although the memory is allocated individually, it is returned as one. 2829 // NMT records it as one block. 2830 if ((flags & MEM_COMMIT) != 0) { 2831 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2832 } else { 2833 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2834 } 2835 2836 // made it this far, success 2837 return p_buf; 2838 } 2839 2840 2841 2842 void os::large_page_init() { 2843 if (!UseLargePages) return; 2844 2845 // print a warning if any large page related flag is specified on command line 2846 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2847 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2848 bool success = false; 2849 2850 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2851 if (request_lock_memory_privilege()) { 2852 size_t s = GetLargePageMinimum(); 2853 if (s) { 2854 #if defined(IA32) || defined(AMD64) 2855 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2856 WARN("JVM cannot use large pages bigger than 4mb."); 2857 } else { 2858 #endif 2859 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2860 _large_page_size = LargePageSizeInBytes; 2861 } else { 2862 _large_page_size = s; 2863 } 2864 success = true; 2865 #if defined(IA32) || defined(AMD64) 2866 } 2867 #endif 2868 } else { 2869 WARN("Large page is not supported by the processor."); 2870 } 2871 } else { 2872 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2873 } 2874 #undef WARN 2875 2876 const size_t default_page_size = (size_t) vm_page_size(); 2877 if (success && _large_page_size > default_page_size) { 2878 _page_sizes[0] = _large_page_size; 2879 _page_sizes[1] = default_page_size; 2880 _page_sizes[2] = 0; 2881 } 2882 2883 cleanup_after_large_page_init(); 2884 UseLargePages = success; 2885 } 2886 2887 // On win32, one cannot release just a part of reserved memory, it's an 2888 // all or nothing deal. When we split a reservation, we must break the 2889 // reservation into two reservations. 2890 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2891 bool realloc) { 2892 if (size > 0) { 2893 release_memory(base, size); 2894 if (realloc) { 2895 reserve_memory(split, base); 2896 } 2897 if (size != split) { 2898 reserve_memory(size - split, base + split); 2899 } 2900 } 2901 } 2902 2903 // Multiple threads can race in this code but it's not possible to unmap small sections of 2904 // virtual space to get requested alignment, like posix-like os's. 2905 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2906 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 2907 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 2908 "Alignment must be a multiple of allocation granularity (page size)"); 2909 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 2910 2911 size_t extra_size = size + alignment; 2912 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 2913 2914 char* aligned_base = NULL; 2915 2916 do { 2917 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 2918 if (extra_base == NULL) { 2919 return NULL; 2920 } 2921 // Do manual alignment 2922 aligned_base = align_up(extra_base, alignment); 2923 2924 os::release_memory(extra_base, extra_size); 2925 2926 aligned_base = os::reserve_memory(size, aligned_base); 2927 2928 } while (aligned_base == NULL); 2929 2930 return aligned_base; 2931 } 2932 2933 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 2934 assert((size_t)addr % os::vm_allocation_granularity() == 0, 2935 "reserve alignment"); 2936 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 2937 char* res; 2938 // note that if UseLargePages is on, all the areas that require interleaving 2939 // will go thru reserve_memory_special rather than thru here. 2940 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 2941 if (!use_individual) { 2942 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 2943 } else { 2944 elapsedTimer reserveTimer; 2945 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 2946 // in numa interleaving, we have to allocate pages individually 2947 // (well really chunks of NUMAInterleaveGranularity size) 2948 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 2949 if (res == NULL) { 2950 warning("NUMA page allocation failed"); 2951 } 2952 if (Verbose && PrintMiscellaneous) { 2953 reserveTimer.stop(); 2954 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 2955 reserveTimer.milliseconds(), reserveTimer.ticks()); 2956 } 2957 } 2958 assert(res == NULL || addr == NULL || addr == res, 2959 "Unexpected address from reserve."); 2960 2961 return res; 2962 } 2963 2964 // Reserve memory at an arbitrary address, only if that area is 2965 // available (and not reserved for something else). 2966 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2967 // Windows os::reserve_memory() fails of the requested address range is 2968 // not avilable. 2969 return reserve_memory(bytes, requested_addr); 2970 } 2971 2972 size_t os::large_page_size() { 2973 return _large_page_size; 2974 } 2975 2976 bool os::can_commit_large_page_memory() { 2977 // Windows only uses large page memory when the entire region is reserved 2978 // and committed in a single VirtualAlloc() call. This may change in the 2979 // future, but with Windows 2003 it's not possible to commit on demand. 2980 return false; 2981 } 2982 2983 bool os::can_execute_large_page_memory() { 2984 return true; 2985 } 2986 2987 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 2988 bool exec) { 2989 assert(UseLargePages, "only for large pages"); 2990 2991 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 2992 return NULL; // Fallback to small pages. 2993 } 2994 2995 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 2996 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 2997 2998 // with large pages, there are two cases where we need to use Individual Allocation 2999 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3000 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3001 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3002 log_debug(pagesize)("Reserving large pages individually."); 3003 3004 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3005 if (p_buf == NULL) { 3006 // give an appropriate warning message 3007 if (UseNUMAInterleaving) { 3008 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3009 } 3010 if (UseLargePagesIndividualAllocation) { 3011 warning("Individually allocated large pages failed, " 3012 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3013 } 3014 return NULL; 3015 } 3016 3017 return p_buf; 3018 3019 } else { 3020 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3021 3022 // normal policy just allocate it all at once 3023 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3024 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3025 if (res != NULL) { 3026 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3027 } 3028 3029 return res; 3030 } 3031 } 3032 3033 bool os::release_memory_special(char* base, size_t bytes) { 3034 assert(base != NULL, "Sanity check"); 3035 return release_memory(base, bytes); 3036 } 3037 3038 void os::print_statistics() { 3039 } 3040 3041 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3042 int err = os::get_last_error(); 3043 char buf[256]; 3044 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3045 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3046 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3047 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3048 } 3049 3050 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3051 if (bytes == 0) { 3052 // Don't bother the OS with noops. 3053 return true; 3054 } 3055 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3056 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3057 // Don't attempt to print anything if the OS call fails. We're 3058 // probably low on resources, so the print itself may cause crashes. 3059 3060 // unless we have NUMAInterleaving enabled, the range of a commit 3061 // is always within a reserve covered by a single VirtualAlloc 3062 // in that case we can just do a single commit for the requested size 3063 if (!UseNUMAInterleaving) { 3064 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3065 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3066 return false; 3067 } 3068 if (exec) { 3069 DWORD oldprot; 3070 // Windows doc says to use VirtualProtect to get execute permissions 3071 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3072 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3073 return false; 3074 } 3075 } 3076 return true; 3077 } else { 3078 3079 // when NUMAInterleaving is enabled, the commit might cover a range that 3080 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3081 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3082 // returns represents the number of bytes that can be committed in one step. 3083 size_t bytes_remaining = bytes; 3084 char * next_alloc_addr = addr; 3085 while (bytes_remaining > 0) { 3086 MEMORY_BASIC_INFORMATION alloc_info; 3087 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3088 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3089 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3090 PAGE_READWRITE) == NULL) { 3091 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3092 exec);) 3093 return false; 3094 } 3095 if (exec) { 3096 DWORD oldprot; 3097 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3098 PAGE_EXECUTE_READWRITE, &oldprot)) { 3099 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3100 exec);) 3101 return false; 3102 } 3103 } 3104 bytes_remaining -= bytes_to_rq; 3105 next_alloc_addr += bytes_to_rq; 3106 } 3107 } 3108 // if we made it this far, return true 3109 return true; 3110 } 3111 3112 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3113 bool exec) { 3114 // alignment_hint is ignored on this OS 3115 return pd_commit_memory(addr, size, exec); 3116 } 3117 3118 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3119 const char* mesg) { 3120 assert(mesg != NULL, "mesg must be specified"); 3121 if (!pd_commit_memory(addr, size, exec)) { 3122 warn_fail_commit_memory(addr, size, exec); 3123 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3124 } 3125 } 3126 3127 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3128 size_t alignment_hint, bool exec, 3129 const char* mesg) { 3130 // alignment_hint is ignored on this OS 3131 pd_commit_memory_or_exit(addr, size, exec, mesg); 3132 } 3133 3134 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3135 if (bytes == 0) { 3136 // Don't bother the OS with noops. 3137 return true; 3138 } 3139 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3140 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3141 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3142 } 3143 3144 bool os::pd_release_memory(char* addr, size_t bytes) { 3145 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3146 } 3147 3148 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3149 return os::commit_memory(addr, size, !ExecMem); 3150 } 3151 3152 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3153 return os::uncommit_memory(addr, size); 3154 } 3155 3156 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3157 uint count = 0; 3158 bool ret = false; 3159 size_t bytes_remaining = bytes; 3160 char * next_protect_addr = addr; 3161 3162 // Use VirtualQuery() to get the chunk size. 3163 while (bytes_remaining) { 3164 MEMORY_BASIC_INFORMATION alloc_info; 3165 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3166 return false; 3167 } 3168 3169 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3170 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3171 // but we don't distinguish here as both cases are protected by same API. 3172 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3173 warning("Failed protecting pages individually for chunk #%u", count); 3174 if (!ret) { 3175 return false; 3176 } 3177 3178 bytes_remaining -= bytes_to_protect; 3179 next_protect_addr += bytes_to_protect; 3180 count++; 3181 } 3182 return ret; 3183 } 3184 3185 // Set protections specified 3186 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3187 bool is_committed) { 3188 unsigned int p = 0; 3189 switch (prot) { 3190 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3191 case MEM_PROT_READ: p = PAGE_READONLY; break; 3192 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3193 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3194 default: 3195 ShouldNotReachHere(); 3196 } 3197 3198 DWORD old_status; 3199 3200 // Strange enough, but on Win32 one can change protection only for committed 3201 // memory, not a big deal anyway, as bytes less or equal than 64K 3202 if (!is_committed) { 3203 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3204 "cannot commit protection page"); 3205 } 3206 // One cannot use os::guard_memory() here, as on Win32 guard page 3207 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3208 // 3209 // Pages in the region become guard pages. Any attempt to access a guard page 3210 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3211 // the guard page status. Guard pages thus act as a one-time access alarm. 3212 bool ret; 3213 if (UseNUMAInterleaving) { 3214 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3215 // so we must protect the chunks individually. 3216 ret = protect_pages_individually(addr, bytes, p, &old_status); 3217 } else { 3218 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3219 } 3220 #ifdef ASSERT 3221 if (!ret) { 3222 int err = os::get_last_error(); 3223 char buf[256]; 3224 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3225 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3226 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3227 buf_len != 0 ? buf : "<no_error_string>", err); 3228 } 3229 #endif 3230 return ret; 3231 } 3232 3233 bool os::guard_memory(char* addr, size_t bytes) { 3234 DWORD old_status; 3235 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3236 } 3237 3238 bool os::unguard_memory(char* addr, size_t bytes) { 3239 DWORD old_status; 3240 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3241 } 3242 3243 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3244 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3245 void os::numa_make_global(char *addr, size_t bytes) { } 3246 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3247 bool os::numa_topology_changed() { return false; } 3248 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3249 int os::numa_get_group_id() { return 0; } 3250 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3251 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3252 // Provide an answer for UMA systems 3253 ids[0] = 0; 3254 return 1; 3255 } else { 3256 // check for size bigger than actual groups_num 3257 size = MIN2(size, numa_get_groups_num()); 3258 for (int i = 0; i < (int)size; i++) { 3259 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3260 } 3261 return size; 3262 } 3263 } 3264 3265 bool os::get_page_info(char *start, page_info* info) { 3266 return false; 3267 } 3268 3269 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3270 page_info* page_found) { 3271 return end; 3272 } 3273 3274 char* os::non_memory_address_word() { 3275 // Must never look like an address returned by reserve_memory, 3276 // even in its subfields (as defined by the CPU immediate fields, 3277 // if the CPU splits constants across multiple instructions). 3278 return (char*)-1; 3279 } 3280 3281 #define MAX_ERROR_COUNT 100 3282 #define SYS_THREAD_ERROR 0xffffffffUL 3283 3284 void os::pd_start_thread(Thread* thread) { 3285 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3286 // Returns previous suspend state: 3287 // 0: Thread was not suspended 3288 // 1: Thread is running now 3289 // >1: Thread is still suspended. 3290 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3291 } 3292 3293 class HighResolutionInterval : public CHeapObj<mtThread> { 3294 // The default timer resolution seems to be 10 milliseconds. 3295 // (Where is this written down?) 3296 // If someone wants to sleep for only a fraction of the default, 3297 // then we set the timer resolution down to 1 millisecond for 3298 // the duration of their interval. 3299 // We carefully set the resolution back, since otherwise we 3300 // seem to incur an overhead (3%?) that we don't need. 3301 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3302 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3303 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3304 // timeBeginPeriod() if the relative error exceeded some threshold. 3305 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3306 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3307 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3308 // resolution timers running. 3309 private: 3310 jlong resolution; 3311 public: 3312 HighResolutionInterval(jlong ms) { 3313 resolution = ms % 10L; 3314 if (resolution != 0) { 3315 MMRESULT result = timeBeginPeriod(1L); 3316 } 3317 } 3318 ~HighResolutionInterval() { 3319 if (resolution != 0) { 3320 MMRESULT result = timeEndPeriod(1L); 3321 } 3322 resolution = 0L; 3323 } 3324 }; 3325 3326 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3327 jlong limit = (jlong) MAXDWORD; 3328 3329 while (ms > limit) { 3330 int res; 3331 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3332 return res; 3333 } 3334 ms -= limit; 3335 } 3336 3337 assert(thread == Thread::current(), "thread consistency check"); 3338 OSThread* osthread = thread->osthread(); 3339 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3340 int result; 3341 if (interruptable) { 3342 assert(thread->is_Java_thread(), "must be java thread"); 3343 JavaThread *jt = (JavaThread *) thread; 3344 ThreadBlockInVM tbivm(jt); 3345 3346 jt->set_suspend_equivalent(); 3347 // cleared by handle_special_suspend_equivalent_condition() or 3348 // java_suspend_self() via check_and_wait_while_suspended() 3349 3350 HANDLE events[1]; 3351 events[0] = osthread->interrupt_event(); 3352 HighResolutionInterval *phri=NULL; 3353 if (!ForceTimeHighResolution) { 3354 phri = new HighResolutionInterval(ms); 3355 } 3356 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3357 result = OS_TIMEOUT; 3358 } else { 3359 ResetEvent(osthread->interrupt_event()); 3360 osthread->set_interrupted(false); 3361 result = OS_INTRPT; 3362 } 3363 delete phri; //if it is NULL, harmless 3364 3365 // were we externally suspended while we were waiting? 3366 jt->check_and_wait_while_suspended(); 3367 } else { 3368 assert(!thread->is_Java_thread(), "must not be java thread"); 3369 Sleep((long) ms); 3370 result = OS_TIMEOUT; 3371 } 3372 return result; 3373 } 3374 3375 // Short sleep, direct OS call. 3376 // 3377 // ms = 0, means allow others (if any) to run. 3378 // 3379 void os::naked_short_sleep(jlong ms) { 3380 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3381 Sleep(ms); 3382 } 3383 3384 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3385 void os::infinite_sleep() { 3386 while (true) { // sleep forever ... 3387 Sleep(100000); // ... 100 seconds at a time 3388 } 3389 } 3390 3391 typedef BOOL (WINAPI * STTSignature)(void); 3392 3393 void os::naked_yield() { 3394 // Consider passing back the return value from SwitchToThread(). 3395 SwitchToThread(); 3396 } 3397 3398 // Win32 only gives you access to seven real priorities at a time, 3399 // so we compress Java's ten down to seven. It would be better 3400 // if we dynamically adjusted relative priorities. 3401 3402 int os::java_to_os_priority[CriticalPriority + 1] = { 3403 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3404 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3405 THREAD_PRIORITY_LOWEST, // 2 3406 THREAD_PRIORITY_BELOW_NORMAL, // 3 3407 THREAD_PRIORITY_BELOW_NORMAL, // 4 3408 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3409 THREAD_PRIORITY_NORMAL, // 6 3410 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3411 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3412 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3413 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3414 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3415 }; 3416 3417 int prio_policy1[CriticalPriority + 1] = { 3418 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3419 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3420 THREAD_PRIORITY_LOWEST, // 2 3421 THREAD_PRIORITY_BELOW_NORMAL, // 3 3422 THREAD_PRIORITY_BELOW_NORMAL, // 4 3423 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3424 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3425 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3426 THREAD_PRIORITY_HIGHEST, // 8 3427 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3428 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3429 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3430 }; 3431 3432 static int prio_init() { 3433 // If ThreadPriorityPolicy is 1, switch tables 3434 if (ThreadPriorityPolicy == 1) { 3435 int i; 3436 for (i = 0; i < CriticalPriority + 1; i++) { 3437 os::java_to_os_priority[i] = prio_policy1[i]; 3438 } 3439 } 3440 if (UseCriticalJavaThreadPriority) { 3441 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3442 } 3443 return 0; 3444 } 3445 3446 OSReturn os::set_native_priority(Thread* thread, int priority) { 3447 if (!UseThreadPriorities) return OS_OK; 3448 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3449 return ret ? OS_OK : OS_ERR; 3450 } 3451 3452 OSReturn os::get_native_priority(const Thread* const thread, 3453 int* priority_ptr) { 3454 if (!UseThreadPriorities) { 3455 *priority_ptr = java_to_os_priority[NormPriority]; 3456 return OS_OK; 3457 } 3458 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3459 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3460 assert(false, "GetThreadPriority failed"); 3461 return OS_ERR; 3462 } 3463 *priority_ptr = os_prio; 3464 return OS_OK; 3465 } 3466 3467 3468 // Hint to the underlying OS that a task switch would not be good. 3469 // Void return because it's a hint and can fail. 3470 void os::hint_no_preempt() {} 3471 3472 void os::interrupt(Thread* thread) { 3473 assert(!thread->is_Java_thread() || Thread::current() == thread || 3474 Threads_lock->owned_by_self(), 3475 "possibility of dangling Thread pointer"); 3476 3477 OSThread* osthread = thread->osthread(); 3478 osthread->set_interrupted(true); 3479 // More than one thread can get here with the same value of osthread, 3480 // resulting in multiple notifications. We do, however, want the store 3481 // to interrupted() to be visible to other threads before we post 3482 // the interrupt event. 3483 OrderAccess::release(); 3484 SetEvent(osthread->interrupt_event()); 3485 // For JSR166: unpark after setting status 3486 if (thread->is_Java_thread()) { 3487 ((JavaThread*)thread)->parker()->unpark(); 3488 } 3489 3490 ParkEvent * ev = thread->_ParkEvent; 3491 if (ev != NULL) ev->unpark(); 3492 } 3493 3494 3495 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3496 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3497 "possibility of dangling Thread pointer"); 3498 3499 OSThread* osthread = thread->osthread(); 3500 // There is no synchronization between the setting of the interrupt 3501 // and it being cleared here. It is critical - see 6535709 - that 3502 // we only clear the interrupt state, and reset the interrupt event, 3503 // if we are going to report that we were indeed interrupted - else 3504 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3505 // depending on the timing. By checking thread interrupt event to see 3506 // if the thread gets real interrupt thus prevent spurious wakeup. 3507 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3508 if (interrupted && clear_interrupted) { 3509 osthread->set_interrupted(false); 3510 ResetEvent(osthread->interrupt_event()); 3511 } // Otherwise leave the interrupted state alone 3512 3513 return interrupted; 3514 } 3515 3516 // GetCurrentThreadId() returns DWORD 3517 intx os::current_thread_id() { return GetCurrentThreadId(); } 3518 3519 static int _initial_pid = 0; 3520 3521 int os::current_process_id() { 3522 return (_initial_pid ? _initial_pid : _getpid()); 3523 } 3524 3525 int os::win32::_vm_page_size = 0; 3526 int os::win32::_vm_allocation_granularity = 0; 3527 int os::win32::_processor_type = 0; 3528 // Processor level is not available on non-NT systems, use vm_version instead 3529 int os::win32::_processor_level = 0; 3530 julong os::win32::_physical_memory = 0; 3531 size_t os::win32::_default_stack_size = 0; 3532 3533 intx os::win32::_os_thread_limit = 0; 3534 volatile intx os::win32::_os_thread_count = 0; 3535 3536 bool os::win32::_is_windows_server = false; 3537 3538 // 6573254 3539 // Currently, the bug is observed across all the supported Windows releases, 3540 // including the latest one (as of this writing - Windows Server 2012 R2) 3541 bool os::win32::_has_exit_bug = true; 3542 3543 void os::win32::initialize_system_info() { 3544 SYSTEM_INFO si; 3545 GetSystemInfo(&si); 3546 _vm_page_size = si.dwPageSize; 3547 _vm_allocation_granularity = si.dwAllocationGranularity; 3548 _processor_type = si.dwProcessorType; 3549 _processor_level = si.wProcessorLevel; 3550 set_processor_count(si.dwNumberOfProcessors); 3551 3552 MEMORYSTATUSEX ms; 3553 ms.dwLength = sizeof(ms); 3554 3555 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3556 // dwMemoryLoad (% of memory in use) 3557 GlobalMemoryStatusEx(&ms); 3558 _physical_memory = ms.ullTotalPhys; 3559 3560 if (FLAG_IS_DEFAULT(MaxRAM)) { 3561 // Adjust MaxRAM according to the maximum virtual address space available. 3562 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3563 } 3564 3565 OSVERSIONINFOEX oi; 3566 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3567 GetVersionEx((OSVERSIONINFO*)&oi); 3568 switch (oi.dwPlatformId) { 3569 case VER_PLATFORM_WIN32_NT: 3570 { 3571 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3572 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3573 oi.wProductType == VER_NT_SERVER) { 3574 _is_windows_server = true; 3575 } 3576 } 3577 break; 3578 default: fatal("Unknown platform"); 3579 } 3580 3581 _default_stack_size = os::current_stack_size(); 3582 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3583 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3584 "stack size not a multiple of page size"); 3585 3586 initialize_performance_counter(); 3587 } 3588 3589 3590 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3591 int ebuflen) { 3592 char path[MAX_PATH]; 3593 DWORD size; 3594 DWORD pathLen = (DWORD)sizeof(path); 3595 HINSTANCE result = NULL; 3596 3597 // only allow library name without path component 3598 assert(strchr(name, '\\') == NULL, "path not allowed"); 3599 assert(strchr(name, ':') == NULL, "path not allowed"); 3600 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3601 jio_snprintf(ebuf, ebuflen, 3602 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3603 return NULL; 3604 } 3605 3606 // search system directory 3607 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3608 if (size >= pathLen) { 3609 return NULL; // truncated 3610 } 3611 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3612 return NULL; // truncated 3613 } 3614 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3615 return result; 3616 } 3617 } 3618 3619 // try Windows directory 3620 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3621 if (size >= pathLen) { 3622 return NULL; // truncated 3623 } 3624 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3625 return NULL; // truncated 3626 } 3627 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3628 return result; 3629 } 3630 } 3631 3632 jio_snprintf(ebuf, ebuflen, 3633 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3634 return NULL; 3635 } 3636 3637 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3638 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3639 3640 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3641 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3642 return TRUE; 3643 } 3644 3645 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3646 // Basic approach: 3647 // - Each exiting thread registers its intent to exit and then does so. 3648 // - A thread trying to terminate the process must wait for all 3649 // threads currently exiting to complete their exit. 3650 3651 if (os::win32::has_exit_bug()) { 3652 // The array holds handles of the threads that have started exiting by calling 3653 // _endthreadex(). 3654 // Should be large enough to avoid blocking the exiting thread due to lack of 3655 // a free slot. 3656 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3657 static int handle_count = 0; 3658 3659 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3660 static CRITICAL_SECTION crit_sect; 3661 static volatile jint process_exiting = 0; 3662 int i, j; 3663 DWORD res; 3664 HANDLE hproc, hthr; 3665 3666 // We only attempt to register threads until a process exiting 3667 // thread manages to set the process_exiting flag. Any threads 3668 // that come through here after the process_exiting flag is set 3669 // are unregistered and will be caught in the SuspendThread() 3670 // infinite loop below. 3671 bool registered = false; 3672 3673 // The first thread that reached this point, initializes the critical section. 3674 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3675 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3676 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3677 if (what != EPT_THREAD) { 3678 // Atomically set process_exiting before the critical section 3679 // to increase the visibility between racing threads. 3680 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3681 } 3682 EnterCriticalSection(&crit_sect); 3683 3684 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3685 // Remove from the array those handles of the threads that have completed exiting. 3686 for (i = 0, j = 0; i < handle_count; ++i) { 3687 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3688 if (res == WAIT_TIMEOUT) { 3689 handles[j++] = handles[i]; 3690 } else { 3691 if (res == WAIT_FAILED) { 3692 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3693 GetLastError(), __FILE__, __LINE__); 3694 } 3695 // Don't keep the handle, if we failed waiting for it. 3696 CloseHandle(handles[i]); 3697 } 3698 } 3699 3700 // If there's no free slot in the array of the kept handles, we'll have to 3701 // wait until at least one thread completes exiting. 3702 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3703 // Raise the priority of the oldest exiting thread to increase its chances 3704 // to complete sooner. 3705 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3706 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3707 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3708 i = (res - WAIT_OBJECT_0); 3709 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3710 for (; i < handle_count; ++i) { 3711 handles[i] = handles[i + 1]; 3712 } 3713 } else { 3714 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3715 (res == WAIT_FAILED ? "failed" : "timed out"), 3716 GetLastError(), __FILE__, __LINE__); 3717 // Don't keep handles, if we failed waiting for them. 3718 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3719 CloseHandle(handles[i]); 3720 } 3721 handle_count = 0; 3722 } 3723 } 3724 3725 // Store a duplicate of the current thread handle in the array of handles. 3726 hproc = GetCurrentProcess(); 3727 hthr = GetCurrentThread(); 3728 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3729 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3730 warning("DuplicateHandle failed (%u) in %s: %d\n", 3731 GetLastError(), __FILE__, __LINE__); 3732 3733 // We can't register this thread (no more handles) so this thread 3734 // may be racing with a thread that is calling exit(). If the thread 3735 // that is calling exit() has managed to set the process_exiting 3736 // flag, then this thread will be caught in the SuspendThread() 3737 // infinite loop below which closes that race. A small timing 3738 // window remains before the process_exiting flag is set, but it 3739 // is only exposed when we are out of handles. 3740 } else { 3741 ++handle_count; 3742 registered = true; 3743 3744 // The current exiting thread has stored its handle in the array, and now 3745 // should leave the critical section before calling _endthreadex(). 3746 } 3747 3748 } else if (what != EPT_THREAD && handle_count > 0) { 3749 jlong start_time, finish_time, timeout_left; 3750 // Before ending the process, make sure all the threads that had called 3751 // _endthreadex() completed. 3752 3753 // Set the priority level of the current thread to the same value as 3754 // the priority level of exiting threads. 3755 // This is to ensure it will be given a fair chance to execute if 3756 // the timeout expires. 3757 hthr = GetCurrentThread(); 3758 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3759 start_time = os::javaTimeNanos(); 3760 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3761 for (i = 0; ; ) { 3762 int portion_count = handle_count - i; 3763 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3764 portion_count = MAXIMUM_WAIT_OBJECTS; 3765 } 3766 for (j = 0; j < portion_count; ++j) { 3767 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3768 } 3769 timeout_left = (finish_time - start_time) / 1000000L; 3770 if (timeout_left < 0) { 3771 timeout_left = 0; 3772 } 3773 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3774 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3775 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3776 (res == WAIT_FAILED ? "failed" : "timed out"), 3777 GetLastError(), __FILE__, __LINE__); 3778 // Reset portion_count so we close the remaining 3779 // handles due to this error. 3780 portion_count = handle_count - i; 3781 } 3782 for (j = 0; j < portion_count; ++j) { 3783 CloseHandle(handles[i + j]); 3784 } 3785 if ((i += portion_count) >= handle_count) { 3786 break; 3787 } 3788 start_time = os::javaTimeNanos(); 3789 } 3790 handle_count = 0; 3791 } 3792 3793 LeaveCriticalSection(&crit_sect); 3794 } 3795 3796 if (!registered && 3797 OrderAccess::load_acquire(&process_exiting) != 0 && 3798 process_exiting != (jint)GetCurrentThreadId()) { 3799 // Some other thread is about to call exit(), so we don't let 3800 // the current unregistered thread proceed to exit() or _endthreadex() 3801 while (true) { 3802 SuspendThread(GetCurrentThread()); 3803 // Avoid busy-wait loop, if SuspendThread() failed. 3804 Sleep(EXIT_TIMEOUT); 3805 } 3806 } 3807 } 3808 3809 // We are here if either 3810 // - there's no 'race at exit' bug on this OS release; 3811 // - initialization of the critical section failed (unlikely); 3812 // - the current thread has registered itself and left the critical section; 3813 // - the process-exiting thread has raised the flag and left the critical section. 3814 if (what == EPT_THREAD) { 3815 _endthreadex((unsigned)exit_code); 3816 } else if (what == EPT_PROCESS) { 3817 ::exit(exit_code); 3818 } else { 3819 _exit(exit_code); 3820 } 3821 3822 // Should not reach here 3823 return exit_code; 3824 } 3825 3826 #undef EXIT_TIMEOUT 3827 3828 void os::win32::setmode_streams() { 3829 _setmode(_fileno(stdin), _O_BINARY); 3830 _setmode(_fileno(stdout), _O_BINARY); 3831 _setmode(_fileno(stderr), _O_BINARY); 3832 } 3833 3834 3835 bool os::is_debugger_attached() { 3836 return IsDebuggerPresent() ? true : false; 3837 } 3838 3839 3840 void os::wait_for_keypress_at_exit(void) { 3841 if (PauseAtExit) { 3842 fprintf(stderr, "Press any key to continue...\n"); 3843 fgetc(stdin); 3844 } 3845 } 3846 3847 3848 bool os::message_box(const char* title, const char* message) { 3849 int result = MessageBox(NULL, message, title, 3850 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3851 return result == IDYES; 3852 } 3853 3854 #ifndef PRODUCT 3855 #ifndef _WIN64 3856 // Helpers to check whether NX protection is enabled 3857 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3858 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3859 pex->ExceptionRecord->NumberParameters > 0 && 3860 pex->ExceptionRecord->ExceptionInformation[0] == 3861 EXCEPTION_INFO_EXEC_VIOLATION) { 3862 return EXCEPTION_EXECUTE_HANDLER; 3863 } 3864 return EXCEPTION_CONTINUE_SEARCH; 3865 } 3866 3867 void nx_check_protection() { 3868 // If NX is enabled we'll get an exception calling into code on the stack 3869 char code[] = { (char)0xC3 }; // ret 3870 void *code_ptr = (void *)code; 3871 __try { 3872 __asm call code_ptr 3873 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3874 tty->print_raw_cr("NX protection detected."); 3875 } 3876 } 3877 #endif // _WIN64 3878 #endif // PRODUCT 3879 3880 // This is called _before_ the global arguments have been parsed 3881 void os::init(void) { 3882 _initial_pid = _getpid(); 3883 3884 init_random(1234567); 3885 3886 win32::initialize_system_info(); 3887 win32::setmode_streams(); 3888 init_page_sizes((size_t) win32::vm_page_size()); 3889 3890 // This may be overridden later when argument processing is done. 3891 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3892 3893 // Initialize main_process and main_thread 3894 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3895 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3896 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3897 fatal("DuplicateHandle failed\n"); 3898 } 3899 main_thread_id = (int) GetCurrentThreadId(); 3900 3901 // initialize fast thread access - only used for 32-bit 3902 win32::initialize_thread_ptr_offset(); 3903 } 3904 3905 // To install functions for atexit processing 3906 extern "C" { 3907 static void perfMemory_exit_helper() { 3908 perfMemory_exit(); 3909 } 3910 } 3911 3912 static jint initSock(); 3913 3914 // this is called _after_ the global arguments have been parsed 3915 jint os::init_2(void) { 3916 // Allocate a single page and mark it as readable for safepoint polling 3917 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3918 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3919 3920 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3921 guarantee(return_page != NULL, "Commit Failed for polling page"); 3922 3923 os::set_polling_page(polling_page); 3924 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page)); 3925 3926 if (!UseMembar) { 3927 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3928 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3929 3930 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3931 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3932 3933 os::set_memory_serialize_page(mem_serialize_page); 3934 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); 3935 } 3936 3937 // Setup Windows Exceptions 3938 3939 // for debugging float code generation bugs 3940 if (ForceFloatExceptions) { 3941 #ifndef _WIN64 3942 static long fp_control_word = 0; 3943 __asm { fstcw fp_control_word } 3944 // see Intel PPro Manual, Vol. 2, p 7-16 3945 const long precision = 0x20; 3946 const long underflow = 0x10; 3947 const long overflow = 0x08; 3948 const long zero_div = 0x04; 3949 const long denorm = 0x02; 3950 const long invalid = 0x01; 3951 fp_control_word |= invalid; 3952 __asm { fldcw fp_control_word } 3953 #endif 3954 } 3955 3956 // If stack_commit_size is 0, windows will reserve the default size, 3957 // but only commit a small portion of it. 3958 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 3959 size_t default_reserve_size = os::win32::default_stack_size(); 3960 size_t actual_reserve_size = stack_commit_size; 3961 if (stack_commit_size < default_reserve_size) { 3962 // If stack_commit_size == 0, we want this too 3963 actual_reserve_size = default_reserve_size; 3964 } 3965 3966 // Check minimum allowable stack size for thread creation and to initialize 3967 // the java system classes, including StackOverflowError - depends on page 3968 // size. Add two 4K pages for compiler2 recursion in main thread. 3969 // Add in 4*BytesPerWord 4K pages to account for VM stack during 3970 // class initialization depending on 32 or 64 bit VM. 3971 size_t min_stack_allowed = 3972 (size_t)(JavaThread::stack_guard_zone_size() + 3973 JavaThread::stack_shadow_zone_size() + 3974 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 3975 3976 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 3977 3978 if (actual_reserve_size < min_stack_allowed) { 3979 tty->print_cr("\nThe Java thread stack size specified is too small. " 3980 "Specify at least %dk", 3981 min_stack_allowed / K); 3982 return JNI_ERR; 3983 } 3984 3985 JavaThread::set_stack_size_at_create(stack_commit_size); 3986 3987 // Calculate theoretical max. size of Threads to guard gainst artifical 3988 // out-of-memory situations, where all available address-space has been 3989 // reserved by thread stacks. 3990 assert(actual_reserve_size != 0, "Must have a stack"); 3991 3992 // Calculate the thread limit when we should start doing Virtual Memory 3993 // banging. Currently when the threads will have used all but 200Mb of space. 3994 // 3995 // TODO: consider performing a similar calculation for commit size instead 3996 // as reserve size, since on a 64-bit platform we'll run into that more 3997 // often than running out of virtual memory space. We can use the 3998 // lower value of the two calculations as the os_thread_limit. 3999 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4000 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4001 4002 // at exit methods are called in the reverse order of their registration. 4003 // there is no limit to the number of functions registered. atexit does 4004 // not set errno. 4005 4006 if (PerfAllowAtExitRegistration) { 4007 // only register atexit functions if PerfAllowAtExitRegistration is set. 4008 // atexit functions can be delayed until process exit time, which 4009 // can be problematic for embedded VM situations. Embedded VMs should 4010 // call DestroyJavaVM() to assure that VM resources are released. 4011 4012 // note: perfMemory_exit_helper atexit function may be removed in 4013 // the future if the appropriate cleanup code can be added to the 4014 // VM_Exit VMOperation's doit method. 4015 if (atexit(perfMemory_exit_helper) != 0) { 4016 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4017 } 4018 } 4019 4020 #ifndef _WIN64 4021 // Print something if NX is enabled (win32 on AMD64) 4022 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4023 #endif 4024 4025 // initialize thread priority policy 4026 prio_init(); 4027 4028 if (UseNUMA && !ForceNUMA) { 4029 UseNUMA = false; // We don't fully support this yet 4030 } 4031 4032 if (UseNUMAInterleaving) { 4033 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4034 bool success = numa_interleaving_init(); 4035 if (!success) UseNUMAInterleaving = false; 4036 } 4037 4038 if (initSock() != JNI_OK) { 4039 return JNI_ERR; 4040 } 4041 4042 if (InitializeDbgHelpEarly) { 4043 SymbolEngine::recalc_search_path(); 4044 } 4045 4046 return JNI_OK; 4047 } 4048 4049 // Mark the polling page as unreadable 4050 void os::make_polling_page_unreadable(void) { 4051 DWORD old_status; 4052 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4053 PAGE_NOACCESS, &old_status)) { 4054 fatal("Could not disable polling page"); 4055 } 4056 } 4057 4058 // Mark the polling page as readable 4059 void os::make_polling_page_readable(void) { 4060 DWORD old_status; 4061 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4062 PAGE_READONLY, &old_status)) { 4063 fatal("Could not enable polling page"); 4064 } 4065 } 4066 4067 4068 int os::stat(const char *path, struct stat *sbuf) { 4069 char pathbuf[MAX_PATH]; 4070 if (strlen(path) > MAX_PATH - 1) { 4071 errno = ENAMETOOLONG; 4072 return -1; 4073 } 4074 os::native_path(strcpy(pathbuf, path)); 4075 int ret = ::stat(pathbuf, sbuf); 4076 if (sbuf != NULL && UseUTCFileTimestamp) { 4077 // Fix for 6539723. st_mtime returned from stat() is dependent on 4078 // the system timezone and so can return different values for the 4079 // same file if/when daylight savings time changes. This adjustment 4080 // makes sure the same timestamp is returned regardless of the TZ. 4081 // 4082 // See: 4083 // http://msdn.microsoft.com/library/ 4084 // default.asp?url=/library/en-us/sysinfo/base/ 4085 // time_zone_information_str.asp 4086 // and 4087 // http://msdn.microsoft.com/library/default.asp?url= 4088 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4089 // 4090 // NOTE: there is a insidious bug here: If the timezone is changed 4091 // after the call to stat() but before 'GetTimeZoneInformation()', then 4092 // the adjustment we do here will be wrong and we'll return the wrong 4093 // value (which will likely end up creating an invalid class data 4094 // archive). Absent a better API for this, or some time zone locking 4095 // mechanism, we'll have to live with this risk. 4096 TIME_ZONE_INFORMATION tz; 4097 DWORD tzid = GetTimeZoneInformation(&tz); 4098 int daylightBias = 4099 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4100 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4101 } 4102 return ret; 4103 } 4104 4105 4106 #define FT2INT64(ft) \ 4107 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4108 4109 4110 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4111 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4112 // of a thread. 4113 // 4114 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4115 // the fast estimate available on the platform. 4116 4117 // current_thread_cpu_time() is not optimized for Windows yet 4118 jlong os::current_thread_cpu_time() { 4119 // return user + sys since the cost is the same 4120 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4121 } 4122 4123 jlong os::thread_cpu_time(Thread* thread) { 4124 // consistent with what current_thread_cpu_time() returns. 4125 return os::thread_cpu_time(thread, true /* user+sys */); 4126 } 4127 4128 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4129 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4130 } 4131 4132 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4133 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4134 // If this function changes, os::is_thread_cpu_time_supported() should too 4135 FILETIME CreationTime; 4136 FILETIME ExitTime; 4137 FILETIME KernelTime; 4138 FILETIME UserTime; 4139 4140 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4141 &ExitTime, &KernelTime, &UserTime) == 0) { 4142 return -1; 4143 } else if (user_sys_cpu_time) { 4144 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4145 } else { 4146 return FT2INT64(UserTime) * 100; 4147 } 4148 } 4149 4150 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4151 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4152 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4153 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4154 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4155 } 4156 4157 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4158 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4159 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4160 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4161 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4162 } 4163 4164 bool os::is_thread_cpu_time_supported() { 4165 // see os::thread_cpu_time 4166 FILETIME CreationTime; 4167 FILETIME ExitTime; 4168 FILETIME KernelTime; 4169 FILETIME UserTime; 4170 4171 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4172 &KernelTime, &UserTime) == 0) { 4173 return false; 4174 } else { 4175 return true; 4176 } 4177 } 4178 4179 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4180 // It does have primitives (PDH API) to get CPU usage and run queue length. 4181 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4182 // If we wanted to implement loadavg on Windows, we have a few options: 4183 // 4184 // a) Query CPU usage and run queue length and "fake" an answer by 4185 // returning the CPU usage if it's under 100%, and the run queue 4186 // length otherwise. It turns out that querying is pretty slow 4187 // on Windows, on the order of 200 microseconds on a fast machine. 4188 // Note that on the Windows the CPU usage value is the % usage 4189 // since the last time the API was called (and the first call 4190 // returns 100%), so we'd have to deal with that as well. 4191 // 4192 // b) Sample the "fake" answer using a sampling thread and store 4193 // the answer in a global variable. The call to loadavg would 4194 // just return the value of the global, avoiding the slow query. 4195 // 4196 // c) Sample a better answer using exponential decay to smooth the 4197 // value. This is basically the algorithm used by UNIX kernels. 4198 // 4199 // Note that sampling thread starvation could affect both (b) and (c). 4200 int os::loadavg(double loadavg[], int nelem) { 4201 return -1; 4202 } 4203 4204 4205 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4206 bool os::dont_yield() { 4207 return DontYieldALot; 4208 } 4209 4210 // This method is a slightly reworked copy of JDK's sysOpen 4211 // from src/windows/hpi/src/sys_api_md.c 4212 4213 int os::open(const char *path, int oflag, int mode) { 4214 char pathbuf[MAX_PATH]; 4215 4216 if (strlen(path) > MAX_PATH - 1) { 4217 errno = ENAMETOOLONG; 4218 return -1; 4219 } 4220 os::native_path(strcpy(pathbuf, path)); 4221 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4222 } 4223 4224 FILE* os::open(int fd, const char* mode) { 4225 return ::_fdopen(fd, mode); 4226 } 4227 4228 // Is a (classpath) directory empty? 4229 bool os::dir_is_empty(const char* path) { 4230 WIN32_FIND_DATA fd; 4231 HANDLE f = FindFirstFile(path, &fd); 4232 if (f == INVALID_HANDLE_VALUE) { 4233 return true; 4234 } 4235 FindClose(f); 4236 return false; 4237 } 4238 4239 // create binary file, rewriting existing file if required 4240 int os::create_binary_file(const char* path, bool rewrite_existing) { 4241 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4242 if (!rewrite_existing) { 4243 oflags |= _O_EXCL; 4244 } 4245 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4246 } 4247 4248 // return current position of file pointer 4249 jlong os::current_file_offset(int fd) { 4250 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4251 } 4252 4253 // move file pointer to the specified offset 4254 jlong os::seek_to_file_offset(int fd, jlong offset) { 4255 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4256 } 4257 4258 4259 jlong os::lseek(int fd, jlong offset, int whence) { 4260 return (jlong) ::_lseeki64(fd, offset, whence); 4261 } 4262 4263 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4264 OVERLAPPED ov; 4265 DWORD nread; 4266 BOOL result; 4267 4268 ZeroMemory(&ov, sizeof(ov)); 4269 ov.Offset = (DWORD)offset; 4270 ov.OffsetHigh = (DWORD)(offset >> 32); 4271 4272 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4273 4274 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4275 4276 return result ? nread : 0; 4277 } 4278 4279 4280 // This method is a slightly reworked copy of JDK's sysNativePath 4281 // from src/windows/hpi/src/path_md.c 4282 4283 // Convert a pathname to native format. On win32, this involves forcing all 4284 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4285 // sometimes rejects '/') and removing redundant separators. The input path is 4286 // assumed to have been converted into the character encoding used by the local 4287 // system. Because this might be a double-byte encoding, care is taken to 4288 // treat double-byte lead characters correctly. 4289 // 4290 // This procedure modifies the given path in place, as the result is never 4291 // longer than the original. There is no error return; this operation always 4292 // succeeds. 4293 char * os::native_path(char *path) { 4294 char *src = path, *dst = path, *end = path; 4295 char *colon = NULL; // If a drive specifier is found, this will 4296 // point to the colon following the drive letter 4297 4298 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4299 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4300 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4301 4302 // Check for leading separators 4303 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4304 while (isfilesep(*src)) { 4305 src++; 4306 } 4307 4308 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4309 // Remove leading separators if followed by drive specifier. This 4310 // hack is necessary to support file URLs containing drive 4311 // specifiers (e.g., "file://c:/path"). As a side effect, 4312 // "/c:/path" can be used as an alternative to "c:/path". 4313 *dst++ = *src++; 4314 colon = dst; 4315 *dst++ = ':'; 4316 src++; 4317 } else { 4318 src = path; 4319 if (isfilesep(src[0]) && isfilesep(src[1])) { 4320 // UNC pathname: Retain first separator; leave src pointed at 4321 // second separator so that further separators will be collapsed 4322 // into the second separator. The result will be a pathname 4323 // beginning with "\\\\" followed (most likely) by a host name. 4324 src = dst = path + 1; 4325 path[0] = '\\'; // Force first separator to '\\' 4326 } 4327 } 4328 4329 end = dst; 4330 4331 // Remove redundant separators from remainder of path, forcing all 4332 // separators to be '\\' rather than '/'. Also, single byte space 4333 // characters are removed from the end of the path because those 4334 // are not legal ending characters on this operating system. 4335 // 4336 while (*src != '\0') { 4337 if (isfilesep(*src)) { 4338 *dst++ = '\\'; src++; 4339 while (isfilesep(*src)) src++; 4340 if (*src == '\0') { 4341 // Check for trailing separator 4342 end = dst; 4343 if (colon == dst - 2) break; // "z:\\" 4344 if (dst == path + 1) break; // "\\" 4345 if (dst == path + 2 && isfilesep(path[0])) { 4346 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4347 // beginning of a UNC pathname. Even though it is not, by 4348 // itself, a valid UNC pathname, we leave it as is in order 4349 // to be consistent with the path canonicalizer as well 4350 // as the win32 APIs, which treat this case as an invalid 4351 // UNC pathname rather than as an alias for the root 4352 // directory of the current drive. 4353 break; 4354 } 4355 end = --dst; // Path does not denote a root directory, so 4356 // remove trailing separator 4357 break; 4358 } 4359 end = dst; 4360 } else { 4361 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4362 *dst++ = *src++; 4363 if (*src) *dst++ = *src++; 4364 end = dst; 4365 } else { // Copy a single-byte character 4366 char c = *src++; 4367 *dst++ = c; 4368 // Space is not a legal ending character 4369 if (c != ' ') end = dst; 4370 } 4371 } 4372 } 4373 4374 *end = '\0'; 4375 4376 // For "z:", add "." to work around a bug in the C runtime library 4377 if (colon == dst - 1) { 4378 path[2] = '.'; 4379 path[3] = '\0'; 4380 } 4381 4382 return path; 4383 } 4384 4385 // This code is a copy of JDK's sysSetLength 4386 // from src/windows/hpi/src/sys_api_md.c 4387 4388 int os::ftruncate(int fd, jlong length) { 4389 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4390 long high = (long)(length >> 32); 4391 DWORD ret; 4392 4393 if (h == (HANDLE)(-1)) { 4394 return -1; 4395 } 4396 4397 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4398 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4399 return -1; 4400 } 4401 4402 if (::SetEndOfFile(h) == FALSE) { 4403 return -1; 4404 } 4405 4406 return 0; 4407 } 4408 4409 int os::get_fileno(FILE* fp) { 4410 return _fileno(fp); 4411 } 4412 4413 // This code is a copy of JDK's sysSync 4414 // from src/windows/hpi/src/sys_api_md.c 4415 // except for the legacy workaround for a bug in Win 98 4416 4417 int os::fsync(int fd) { 4418 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4419 4420 if ((!::FlushFileBuffers(handle)) && 4421 (GetLastError() != ERROR_ACCESS_DENIED)) { 4422 // from winerror.h 4423 return -1; 4424 } 4425 return 0; 4426 } 4427 4428 static int nonSeekAvailable(int, long *); 4429 static int stdinAvailable(int, long *); 4430 4431 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4432 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4433 4434 // This code is a copy of JDK's sysAvailable 4435 // from src/windows/hpi/src/sys_api_md.c 4436 4437 int os::available(int fd, jlong *bytes) { 4438 jlong cur, end; 4439 struct _stati64 stbuf64; 4440 4441 if (::_fstati64(fd, &stbuf64) >= 0) { 4442 int mode = stbuf64.st_mode; 4443 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4444 int ret; 4445 long lpbytes; 4446 if (fd == 0) { 4447 ret = stdinAvailable(fd, &lpbytes); 4448 } else { 4449 ret = nonSeekAvailable(fd, &lpbytes); 4450 } 4451 (*bytes) = (jlong)(lpbytes); 4452 return ret; 4453 } 4454 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4455 return FALSE; 4456 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4457 return FALSE; 4458 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4459 return FALSE; 4460 } 4461 *bytes = end - cur; 4462 return TRUE; 4463 } else { 4464 return FALSE; 4465 } 4466 } 4467 4468 void os::flockfile(FILE* fp) { 4469 _lock_file(fp); 4470 } 4471 4472 void os::funlockfile(FILE* fp) { 4473 _unlock_file(fp); 4474 } 4475 4476 // This code is a copy of JDK's nonSeekAvailable 4477 // from src/windows/hpi/src/sys_api_md.c 4478 4479 static int nonSeekAvailable(int fd, long *pbytes) { 4480 // This is used for available on non-seekable devices 4481 // (like both named and anonymous pipes, such as pipes 4482 // connected to an exec'd process). 4483 // Standard Input is a special case. 4484 HANDLE han; 4485 4486 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4487 return FALSE; 4488 } 4489 4490 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4491 // PeekNamedPipe fails when at EOF. In that case we 4492 // simply make *pbytes = 0 which is consistent with the 4493 // behavior we get on Solaris when an fd is at EOF. 4494 // The only alternative is to raise an Exception, 4495 // which isn't really warranted. 4496 // 4497 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4498 return FALSE; 4499 } 4500 *pbytes = 0; 4501 } 4502 return TRUE; 4503 } 4504 4505 #define MAX_INPUT_EVENTS 2000 4506 4507 // This code is a copy of JDK's stdinAvailable 4508 // from src/windows/hpi/src/sys_api_md.c 4509 4510 static int stdinAvailable(int fd, long *pbytes) { 4511 HANDLE han; 4512 DWORD numEventsRead = 0; // Number of events read from buffer 4513 DWORD numEvents = 0; // Number of events in buffer 4514 DWORD i = 0; // Loop index 4515 DWORD curLength = 0; // Position marker 4516 DWORD actualLength = 0; // Number of bytes readable 4517 BOOL error = FALSE; // Error holder 4518 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4519 4520 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4521 return FALSE; 4522 } 4523 4524 // Construct an array of input records in the console buffer 4525 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4526 if (error == 0) { 4527 return nonSeekAvailable(fd, pbytes); 4528 } 4529 4530 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4531 if (numEvents > MAX_INPUT_EVENTS) { 4532 numEvents = MAX_INPUT_EVENTS; 4533 } 4534 4535 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4536 if (lpBuffer == NULL) { 4537 return FALSE; 4538 } 4539 4540 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4541 if (error == 0) { 4542 os::free(lpBuffer); 4543 return FALSE; 4544 } 4545 4546 // Examine input records for the number of bytes available 4547 for (i=0; i<numEvents; i++) { 4548 if (lpBuffer[i].EventType == KEY_EVENT) { 4549 4550 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4551 &(lpBuffer[i].Event); 4552 if (keyRecord->bKeyDown == TRUE) { 4553 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4554 curLength++; 4555 if (*keyPressed == '\r') { 4556 actualLength = curLength; 4557 } 4558 } 4559 } 4560 } 4561 4562 if (lpBuffer != NULL) { 4563 os::free(lpBuffer); 4564 } 4565 4566 *pbytes = (long) actualLength; 4567 return TRUE; 4568 } 4569 4570 // Map a block of memory. 4571 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4572 char *addr, size_t bytes, bool read_only, 4573 bool allow_exec) { 4574 HANDLE hFile; 4575 char* base; 4576 4577 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4578 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4579 if (hFile == NULL) { 4580 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4581 return NULL; 4582 } 4583 4584 if (allow_exec) { 4585 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4586 // unless it comes from a PE image (which the shared archive is not.) 4587 // Even VirtualProtect refuses to give execute access to mapped memory 4588 // that was not previously executable. 4589 // 4590 // Instead, stick the executable region in anonymous memory. Yuck. 4591 // Penalty is that ~4 pages will not be shareable - in the future 4592 // we might consider DLLizing the shared archive with a proper PE 4593 // header so that mapping executable + sharing is possible. 4594 4595 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4596 PAGE_READWRITE); 4597 if (base == NULL) { 4598 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4599 CloseHandle(hFile); 4600 return NULL; 4601 } 4602 4603 DWORD bytes_read; 4604 OVERLAPPED overlapped; 4605 overlapped.Offset = (DWORD)file_offset; 4606 overlapped.OffsetHigh = 0; 4607 overlapped.hEvent = NULL; 4608 // ReadFile guarantees that if the return value is true, the requested 4609 // number of bytes were read before returning. 4610 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4611 if (!res) { 4612 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4613 release_memory(base, bytes); 4614 CloseHandle(hFile); 4615 return NULL; 4616 } 4617 } else { 4618 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4619 NULL /* file_name */); 4620 if (hMap == NULL) { 4621 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4622 CloseHandle(hFile); 4623 return NULL; 4624 } 4625 4626 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4627 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4628 (DWORD)bytes, addr); 4629 if (base == NULL) { 4630 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4631 CloseHandle(hMap); 4632 CloseHandle(hFile); 4633 return NULL; 4634 } 4635 4636 if (CloseHandle(hMap) == 0) { 4637 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4638 CloseHandle(hFile); 4639 return base; 4640 } 4641 } 4642 4643 if (allow_exec) { 4644 DWORD old_protect; 4645 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4646 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4647 4648 if (!res) { 4649 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4650 // Don't consider this a hard error, on IA32 even if the 4651 // VirtualProtect fails, we should still be able to execute 4652 CloseHandle(hFile); 4653 return base; 4654 } 4655 } 4656 4657 if (CloseHandle(hFile) == 0) { 4658 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4659 return base; 4660 } 4661 4662 return base; 4663 } 4664 4665 4666 // Remap a block of memory. 4667 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4668 char *addr, size_t bytes, bool read_only, 4669 bool allow_exec) { 4670 // This OS does not allow existing memory maps to be remapped so we 4671 // have to unmap the memory before we remap it. 4672 if (!os::unmap_memory(addr, bytes)) { 4673 return NULL; 4674 } 4675 4676 // There is a very small theoretical window between the unmap_memory() 4677 // call above and the map_memory() call below where a thread in native 4678 // code may be able to access an address that is no longer mapped. 4679 4680 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4681 read_only, allow_exec); 4682 } 4683 4684 4685 // Unmap a block of memory. 4686 // Returns true=success, otherwise false. 4687 4688 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4689 MEMORY_BASIC_INFORMATION mem_info; 4690 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4691 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4692 return false; 4693 } 4694 4695 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4696 // Instead, executable region was allocated using VirtualAlloc(). See 4697 // pd_map_memory() above. 4698 // 4699 // The following flags should match the 'exec_access' flages used for 4700 // VirtualProtect() in pd_map_memory(). 4701 if (mem_info.Protect == PAGE_EXECUTE_READ || 4702 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4703 return pd_release_memory(addr, bytes); 4704 } 4705 4706 BOOL result = UnmapViewOfFile(addr); 4707 if (result == 0) { 4708 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4709 return false; 4710 } 4711 return true; 4712 } 4713 4714 void os::pause() { 4715 char filename[MAX_PATH]; 4716 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4717 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4718 } else { 4719 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4720 } 4721 4722 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4723 if (fd != -1) { 4724 struct stat buf; 4725 ::close(fd); 4726 while (::stat(filename, &buf) == 0) { 4727 Sleep(100); 4728 } 4729 } else { 4730 jio_fprintf(stderr, 4731 "Could not open pause file '%s', continuing immediately.\n", filename); 4732 } 4733 } 4734 4735 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4736 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4737 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4738 4739 os::ThreadCrashProtection::ThreadCrashProtection() { 4740 } 4741 4742 // See the caveats for this class in os_windows.hpp 4743 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4744 // into this method and returns false. If no OS EXCEPTION was raised, returns 4745 // true. 4746 // The callback is supposed to provide the method that should be protected. 4747 // 4748 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4749 4750 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4751 4752 _protected_thread = Thread::current_or_null(); 4753 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4754 4755 bool success = true; 4756 __try { 4757 _crash_protection = this; 4758 cb.call(); 4759 } __except(EXCEPTION_EXECUTE_HANDLER) { 4760 // only for protection, nothing to do 4761 success = false; 4762 } 4763 _crash_protection = NULL; 4764 _protected_thread = NULL; 4765 Thread::muxRelease(&_crash_mux); 4766 return success; 4767 } 4768 4769 // An Event wraps a win32 "CreateEvent" kernel handle. 4770 // 4771 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4772 // 4773 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4774 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4775 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4776 // In addition, an unpark() operation might fetch the handle field, but the 4777 // event could recycle between the fetch and the SetEvent() operation. 4778 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4779 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4780 // on an stale but recycled handle would be harmless, but in practice this might 4781 // confuse other non-Sun code, so it's not a viable approach. 4782 // 4783 // 2: Once a win32 event handle is associated with an Event, it remains associated 4784 // with the Event. The event handle is never closed. This could be construed 4785 // as handle leakage, but only up to the maximum # of threads that have been extant 4786 // at any one time. This shouldn't be an issue, as windows platforms typically 4787 // permit a process to have hundreds of thousands of open handles. 4788 // 4789 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4790 // and release unused handles. 4791 // 4792 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4793 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4794 // 4795 // 5. Use an RCU-like mechanism (Read-Copy Update). 4796 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4797 // 4798 // We use (2). 4799 // 4800 // TODO-FIXME: 4801 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4802 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4803 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4804 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4805 // into a single win32 CreateEvent() handle. 4806 // 4807 // Assumption: 4808 // Only one parker can exist on an event, which is why we allocate 4809 // them per-thread. Multiple unparkers can coexist. 4810 // 4811 // _Event transitions in park() 4812 // -1 => -1 : illegal 4813 // 1 => 0 : pass - return immediately 4814 // 0 => -1 : block; then set _Event to 0 before returning 4815 // 4816 // _Event transitions in unpark() 4817 // 0 => 1 : just return 4818 // 1 => 1 : just return 4819 // -1 => either 0 or 1; must signal target thread 4820 // That is, we can safely transition _Event from -1 to either 4821 // 0 or 1. 4822 // 4823 // _Event serves as a restricted-range semaphore. 4824 // -1 : thread is blocked, i.e. there is a waiter 4825 // 0 : neutral: thread is running or ready, 4826 // could have been signaled after a wait started 4827 // 1 : signaled - thread is running or ready 4828 // 4829 // Another possible encoding of _Event would be with 4830 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4831 // 4832 4833 int os::PlatformEvent::park(jlong Millis) { 4834 // Transitions for _Event: 4835 // -1 => -1 : illegal 4836 // 1 => 0 : pass - return immediately 4837 // 0 => -1 : block; then set _Event to 0 before returning 4838 4839 guarantee(_ParkHandle != NULL , "Invariant"); 4840 guarantee(Millis > 0 , "Invariant"); 4841 4842 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4843 // the initial park() operation. 4844 // Consider: use atomic decrement instead of CAS-loop 4845 4846 int v; 4847 for (;;) { 4848 v = _Event; 4849 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4850 } 4851 guarantee((v == 0) || (v == 1), "invariant"); 4852 if (v != 0) return OS_OK; 4853 4854 // Do this the hard way by blocking ... 4855 // TODO: consider a brief spin here, gated on the success of recent 4856 // spin attempts by this thread. 4857 // 4858 // We decompose long timeouts into series of shorter timed waits. 4859 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4860 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4861 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4862 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4863 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4864 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4865 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4866 // for the already waited time. This policy does not admit any new outcomes. 4867 // In the future, however, we might want to track the accumulated wait time and 4868 // adjust Millis accordingly if we encounter a spurious wakeup. 4869 4870 const int MAXTIMEOUT = 0x10000000; 4871 DWORD rv = WAIT_TIMEOUT; 4872 while (_Event < 0 && Millis > 0) { 4873 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4874 if (Millis > MAXTIMEOUT) { 4875 prd = MAXTIMEOUT; 4876 } 4877 rv = ::WaitForSingleObject(_ParkHandle, prd); 4878 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4879 if (rv == WAIT_TIMEOUT) { 4880 Millis -= prd; 4881 } 4882 } 4883 v = _Event; 4884 _Event = 0; 4885 // see comment at end of os::PlatformEvent::park() below: 4886 OrderAccess::fence(); 4887 // If we encounter a nearly simultanous timeout expiry and unpark() 4888 // we return OS_OK indicating we awoke via unpark(). 4889 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4890 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4891 } 4892 4893 void os::PlatformEvent::park() { 4894 // Transitions for _Event: 4895 // -1 => -1 : illegal 4896 // 1 => 0 : pass - return immediately 4897 // 0 => -1 : block; then set _Event to 0 before returning 4898 4899 guarantee(_ParkHandle != NULL, "Invariant"); 4900 // Invariant: Only the thread associated with the Event/PlatformEvent 4901 // may call park(). 4902 // Consider: use atomic decrement instead of CAS-loop 4903 int v; 4904 for (;;) { 4905 v = _Event; 4906 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4907 } 4908 guarantee((v == 0) || (v == 1), "invariant"); 4909 if (v != 0) return; 4910 4911 // Do this the hard way by blocking ... 4912 // TODO: consider a brief spin here, gated on the success of recent 4913 // spin attempts by this thread. 4914 while (_Event < 0) { 4915 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4916 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4917 } 4918 4919 // Usually we'll find _Event == 0 at this point, but as 4920 // an optional optimization we clear it, just in case can 4921 // multiple unpark() operations drove _Event up to 1. 4922 _Event = 0; 4923 OrderAccess::fence(); 4924 guarantee(_Event >= 0, "invariant"); 4925 } 4926 4927 void os::PlatformEvent::unpark() { 4928 guarantee(_ParkHandle != NULL, "Invariant"); 4929 4930 // Transitions for _Event: 4931 // 0 => 1 : just return 4932 // 1 => 1 : just return 4933 // -1 => either 0 or 1; must signal target thread 4934 // That is, we can safely transition _Event from -1 to either 4935 // 0 or 1. 4936 // See also: "Semaphores in Plan 9" by Mullender & Cox 4937 // 4938 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4939 // that it will take two back-to-back park() calls for the owning 4940 // thread to block. This has the benefit of forcing a spurious return 4941 // from the first park() call after an unpark() call which will help 4942 // shake out uses of park() and unpark() without condition variables. 4943 4944 if (Atomic::xchg(1, &_Event) >= 0) return; 4945 4946 ::SetEvent(_ParkHandle); 4947 } 4948 4949 4950 // JSR166 4951 // ------------------------------------------------------- 4952 4953 // The Windows implementation of Park is very straightforward: Basic 4954 // operations on Win32 Events turn out to have the right semantics to 4955 // use them directly. We opportunistically resuse the event inherited 4956 // from Monitor. 4957 4958 void Parker::park(bool isAbsolute, jlong time) { 4959 guarantee(_ParkEvent != NULL, "invariant"); 4960 // First, demultiplex/decode time arguments 4961 if (time < 0) { // don't wait 4962 return; 4963 } else if (time == 0 && !isAbsolute) { 4964 time = INFINITE; 4965 } else if (isAbsolute) { 4966 time -= os::javaTimeMillis(); // convert to relative time 4967 if (time <= 0) { // already elapsed 4968 return; 4969 } 4970 } else { // relative 4971 time /= 1000000; // Must coarsen from nanos to millis 4972 if (time == 0) { // Wait for the minimal time unit if zero 4973 time = 1; 4974 } 4975 } 4976 4977 JavaThread* thread = JavaThread::current(); 4978 4979 // Don't wait if interrupted or already triggered 4980 if (Thread::is_interrupted(thread, false) || 4981 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4982 ResetEvent(_ParkEvent); 4983 return; 4984 } else { 4985 ThreadBlockInVM tbivm(thread); 4986 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4987 thread->set_suspend_equivalent(); 4988 4989 WaitForSingleObject(_ParkEvent, time); 4990 ResetEvent(_ParkEvent); 4991 4992 // If externally suspended while waiting, re-suspend 4993 if (thread->handle_special_suspend_equivalent_condition()) { 4994 thread->java_suspend_self(); 4995 } 4996 } 4997 } 4998 4999 void Parker::unpark() { 5000 guarantee(_ParkEvent != NULL, "invariant"); 5001 SetEvent(_ParkEvent); 5002 } 5003 5004 // Run the specified command in a separate process. Return its exit value, 5005 // or -1 on failure (e.g. can't create a new process). 5006 int os::fork_and_exec(char* cmd) { 5007 STARTUPINFO si; 5008 PROCESS_INFORMATION pi; 5009 DWORD exit_code; 5010 5011 char * cmd_string; 5012 char * cmd_prefix = "cmd /C "; 5013 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5014 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5015 if (cmd_string == NULL) { 5016 return -1; 5017 } 5018 cmd_string[0] = '\0'; 5019 strcat(cmd_string, cmd_prefix); 5020 strcat(cmd_string, cmd); 5021 5022 // now replace all '\n' with '&' 5023 char * substring = cmd_string; 5024 while ((substring = strchr(substring, '\n')) != NULL) { 5025 substring[0] = '&'; 5026 substring++; 5027 } 5028 memset(&si, 0, sizeof(si)); 5029 si.cb = sizeof(si); 5030 memset(&pi, 0, sizeof(pi)); 5031 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5032 cmd_string, // command line 5033 NULL, // process security attribute 5034 NULL, // thread security attribute 5035 TRUE, // inherits system handles 5036 0, // no creation flags 5037 NULL, // use parent's environment block 5038 NULL, // use parent's starting directory 5039 &si, // (in) startup information 5040 &pi); // (out) process information 5041 5042 if (rslt) { 5043 // Wait until child process exits. 5044 WaitForSingleObject(pi.hProcess, INFINITE); 5045 5046 GetExitCodeProcess(pi.hProcess, &exit_code); 5047 5048 // Close process and thread handles. 5049 CloseHandle(pi.hProcess); 5050 CloseHandle(pi.hThread); 5051 } else { 5052 exit_code = -1; 5053 } 5054 5055 FREE_C_HEAP_ARRAY(char, cmd_string); 5056 return (int)exit_code; 5057 } 5058 5059 bool os::find(address addr, outputStream* st) { 5060 int offset = -1; 5061 bool result = false; 5062 char buf[256]; 5063 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5064 st->print(PTR_FORMAT " ", addr); 5065 if (strlen(buf) < sizeof(buf) - 1) { 5066 char* p = strrchr(buf, '\\'); 5067 if (p) { 5068 st->print("%s", p + 1); 5069 } else { 5070 st->print("%s", buf); 5071 } 5072 } else { 5073 // The library name is probably truncated. Let's omit the library name. 5074 // See also JDK-8147512. 5075 } 5076 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5077 st->print("::%s + 0x%x", buf, offset); 5078 } 5079 st->cr(); 5080 result = true; 5081 } 5082 return result; 5083 } 5084 5085 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5086 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5087 5088 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5089 JavaThread* thread = JavaThread::current(); 5090 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5091 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5092 5093 if (os::is_memory_serialize_page(thread, addr)) { 5094 return EXCEPTION_CONTINUE_EXECUTION; 5095 } 5096 } 5097 5098 return EXCEPTION_CONTINUE_SEARCH; 5099 } 5100 5101 // We don't build a headless jre for Windows 5102 bool os::is_headless_jre() { return false; } 5103 5104 static jint initSock() { 5105 WSADATA wsadata; 5106 5107 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5108 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5109 ::GetLastError()); 5110 return JNI_ERR; 5111 } 5112 return JNI_OK; 5113 } 5114 5115 struct hostent* os::get_host_by_name(char* name) { 5116 return (struct hostent*)gethostbyname(name); 5117 } 5118 5119 int os::socket_close(int fd) { 5120 return ::closesocket(fd); 5121 } 5122 5123 int os::socket(int domain, int type, int protocol) { 5124 return ::socket(domain, type, protocol); 5125 } 5126 5127 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5128 return ::connect(fd, him, len); 5129 } 5130 5131 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5132 return ::recv(fd, buf, (int)nBytes, flags); 5133 } 5134 5135 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5136 return ::send(fd, buf, (int)nBytes, flags); 5137 } 5138 5139 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5140 return ::send(fd, buf, (int)nBytes, flags); 5141 } 5142 5143 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5144 #if defined(IA32) 5145 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5146 #elif defined (AMD64) 5147 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5148 #endif 5149 5150 // returns true if thread could be suspended, 5151 // false otherwise 5152 static bool do_suspend(HANDLE* h) { 5153 if (h != NULL) { 5154 if (SuspendThread(*h) != ~0) { 5155 return true; 5156 } 5157 } 5158 return false; 5159 } 5160 5161 // resume the thread 5162 // calling resume on an active thread is a no-op 5163 static void do_resume(HANDLE* h) { 5164 if (h != NULL) { 5165 ResumeThread(*h); 5166 } 5167 } 5168 5169 // retrieve a suspend/resume context capable handle 5170 // from the tid. Caller validates handle return value. 5171 void get_thread_handle_for_extended_context(HANDLE* h, 5172 OSThread::thread_id_t tid) { 5173 if (h != NULL) { 5174 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5175 } 5176 } 5177 5178 // Thread sampling implementation 5179 // 5180 void os::SuspendedThreadTask::internal_do_task() { 5181 CONTEXT ctxt; 5182 HANDLE h = NULL; 5183 5184 // get context capable handle for thread 5185 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5186 5187 // sanity 5188 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5189 return; 5190 } 5191 5192 // suspend the thread 5193 if (do_suspend(&h)) { 5194 ctxt.ContextFlags = sampling_context_flags; 5195 // get thread context 5196 GetThreadContext(h, &ctxt); 5197 SuspendedThreadTaskContext context(_thread, &ctxt); 5198 // pass context to Thread Sampling impl 5199 do_task(context); 5200 // resume thread 5201 do_resume(&h); 5202 } 5203 5204 // close handle 5205 CloseHandle(h); 5206 } 5207 5208 bool os::start_debugging(char *buf, int buflen) { 5209 int len = (int)strlen(buf); 5210 char *p = &buf[len]; 5211 5212 jio_snprintf(p, buflen-len, 5213 "\n\n" 5214 "Do you want to debug the problem?\n\n" 5215 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5216 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5217 "Otherwise, select 'No' to abort...", 5218 os::current_process_id(), os::current_thread_id()); 5219 5220 bool yes = os::message_box("Unexpected Error", buf); 5221 5222 if (yes) { 5223 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5224 // exception. If VM is running inside a debugger, the debugger will 5225 // catch the exception. Otherwise, the breakpoint exception will reach 5226 // the default windows exception handler, which can spawn a debugger and 5227 // automatically attach to the dying VM. 5228 os::breakpoint(); 5229 yes = false; 5230 } 5231 return yes; 5232 } 5233 5234 void* os::get_default_process_handle() { 5235 return (void*)GetModuleHandle(NULL); 5236 } 5237 5238 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5239 // which is used to find statically linked in agents. 5240 // Additionally for windows, takes into account __stdcall names. 5241 // Parameters: 5242 // sym_name: Symbol in library we are looking for 5243 // lib_name: Name of library to look in, NULL for shared libs. 5244 // is_absolute_path == true if lib_name is absolute path to agent 5245 // such as "C:/a/b/L.dll" 5246 // == false if only the base name of the library is passed in 5247 // such as "L" 5248 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5249 bool is_absolute_path) { 5250 char *agent_entry_name; 5251 size_t len; 5252 size_t name_len; 5253 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5254 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5255 const char *start; 5256 5257 if (lib_name != NULL) { 5258 len = name_len = strlen(lib_name); 5259 if (is_absolute_path) { 5260 // Need to strip path, prefix and suffix 5261 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5262 lib_name = ++start; 5263 } else { 5264 // Need to check for drive prefix 5265 if ((start = strchr(lib_name, ':')) != NULL) { 5266 lib_name = ++start; 5267 } 5268 } 5269 if (len <= (prefix_len + suffix_len)) { 5270 return NULL; 5271 } 5272 lib_name += prefix_len; 5273 name_len = strlen(lib_name) - suffix_len; 5274 } 5275 } 5276 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5277 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5278 if (agent_entry_name == NULL) { 5279 return NULL; 5280 } 5281 if (lib_name != NULL) { 5282 const char *p = strrchr(sym_name, '@'); 5283 if (p != NULL && p != sym_name) { 5284 // sym_name == _Agent_OnLoad@XX 5285 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5286 agent_entry_name[(p-sym_name)] = '\0'; 5287 // agent_entry_name == _Agent_OnLoad 5288 strcat(agent_entry_name, "_"); 5289 strncat(agent_entry_name, lib_name, name_len); 5290 strcat(agent_entry_name, p); 5291 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5292 } else { 5293 strcpy(agent_entry_name, sym_name); 5294 strcat(agent_entry_name, "_"); 5295 strncat(agent_entry_name, lib_name, name_len); 5296 } 5297 } else { 5298 strcpy(agent_entry_name, sym_name); 5299 } 5300 return agent_entry_name; 5301 } 5302 5303 #ifndef PRODUCT 5304 5305 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5306 // contiguous memory block at a particular address. 5307 // The test first tries to find a good approximate address to allocate at by using the same 5308 // method to allocate some memory at any address. The test then tries to allocate memory in 5309 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5310 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5311 // the previously allocated memory is available for allocation. The only actual failure 5312 // that is reported is when the test tries to allocate at a particular location but gets a 5313 // different valid one. A NULL return value at this point is not considered an error but may 5314 // be legitimate. 5315 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5316 void TestReserveMemorySpecial_test() { 5317 if (!UseLargePages) { 5318 if (VerboseInternalVMTests) { 5319 tty->print("Skipping test because large pages are disabled"); 5320 } 5321 return; 5322 } 5323 // save current value of globals 5324 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5325 bool old_use_numa_interleaving = UseNUMAInterleaving; 5326 5327 // set globals to make sure we hit the correct code path 5328 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5329 5330 // do an allocation at an address selected by the OS to get a good one. 5331 const size_t large_allocation_size = os::large_page_size() * 4; 5332 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5333 if (result == NULL) { 5334 if (VerboseInternalVMTests) { 5335 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5336 large_allocation_size); 5337 } 5338 } else { 5339 os::release_memory_special(result, large_allocation_size); 5340 5341 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5342 // we managed to get it once. 5343 const size_t expected_allocation_size = os::large_page_size(); 5344 char* expected_location = result + os::large_page_size(); 5345 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5346 if (actual_location == NULL) { 5347 if (VerboseInternalVMTests) { 5348 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5349 expected_location, large_allocation_size); 5350 } 5351 } else { 5352 // release memory 5353 os::release_memory_special(actual_location, expected_allocation_size); 5354 // only now check, after releasing any memory to avoid any leaks. 5355 assert(actual_location == expected_location, 5356 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5357 expected_location, expected_allocation_size, actual_location); 5358 } 5359 } 5360 5361 // restore globals 5362 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5363 UseNUMAInterleaving = old_use_numa_interleaving; 5364 } 5365 #endif // PRODUCT 5366 5367 /* 5368 All the defined signal names for Windows. 5369 5370 NOTE that not all of these names are accepted by FindSignal! 5371 5372 For various reasons some of these may be rejected at runtime. 5373 5374 Here are the names currently accepted by a user of sun.misc.Signal with 5375 1.4.1 (ignoring potential interaction with use of chaining, etc): 5376 5377 (LIST TBD) 5378 5379 */ 5380 int os::get_signal_number(const char* name) { 5381 static const struct { 5382 char* name; 5383 int number; 5384 } siglabels [] = 5385 // derived from version 6.0 VC98/include/signal.h 5386 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5387 "FPE", SIGFPE, // floating point exception 5388 "SEGV", SIGSEGV, // segment violation 5389 "INT", SIGINT, // interrupt 5390 "TERM", SIGTERM, // software term signal from kill 5391 "BREAK", SIGBREAK, // Ctrl-Break sequence 5392 "ILL", SIGILL}; // illegal instruction 5393 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5394 if (strcmp(name, siglabels[i].name) == 0) { 5395 return siglabels[i].number; 5396 } 5397 } 5398 return -1; 5399 } 5400 5401 // Fast current thread access 5402 5403 int os::win32::_thread_ptr_offset = 0; 5404 5405 static void call_wrapper_dummy() {} 5406 5407 // We need to call the os_exception_wrapper once so that it sets 5408 // up the offset from FS of the thread pointer. 5409 void os::win32::initialize_thread_ptr_offset() { 5410 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5411 NULL, NULL, NULL, NULL); 5412 }