1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "jvm.h" 30 #include "classfile/classLoader.hpp" 31 #include "classfile/systemDictionary.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/vtableStubs.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/disassembler.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.hpp" 48 #include "runtime/extendedPC.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/interfaceSupport.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/orderAccess.inline.hpp" 56 #include "runtime/osThread.hpp" 57 #include "runtime/perfMemory.hpp" 58 #include "runtime/sharedRuntime.hpp" 59 #include "runtime/statSampler.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "runtime/thread.inline.hpp" 62 #include "runtime/threadCritical.hpp" 63 #include "runtime/timer.hpp" 64 #include "runtime/vm_version.hpp" 65 #include "services/attachListener.hpp" 66 #include "services/memTracker.hpp" 67 #include "services/runtimeService.hpp" 68 #include "utilities/align.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/macros.hpp" 74 #include "utilities/vmError.hpp" 75 #include "symbolengine.hpp" 76 #include "windbghelp.hpp" 77 78 79 #ifdef _DEBUG 80 #include <crtdbg.h> 81 #endif 82 83 84 #include <windows.h> 85 #include <sys/types.h> 86 #include <sys/stat.h> 87 #include <sys/timeb.h> 88 #include <objidl.h> 89 #include <shlobj.h> 90 91 #include <malloc.h> 92 #include <signal.h> 93 #include <direct.h> 94 #include <errno.h> 95 #include <fcntl.h> 96 #include <io.h> 97 #include <process.h> // For _beginthreadex(), _endthreadex() 98 #include <imagehlp.h> // For os::dll_address_to_function_name 99 // for enumerating dll libraries 100 #include <vdmdbg.h> 101 #include <psapi.h> 102 103 // for timer info max values which include all bits 104 #define ALL_64_BITS CONST64(-1) 105 106 // For DLL loading/load error detection 107 // Values of PE COFF 108 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 109 #define IMAGE_FILE_SIGNATURE_LENGTH 4 110 111 static HANDLE main_process; 112 static HANDLE main_thread; 113 static int main_thread_id; 114 115 static FILETIME process_creation_time; 116 static FILETIME process_exit_time; 117 static FILETIME process_user_time; 118 static FILETIME process_kernel_time; 119 120 #ifdef _M_AMD64 121 #define __CPU__ amd64 122 #else 123 #define __CPU__ i486 124 #endif 125 126 // save DLL module handle, used by GetModuleFileName 127 128 HINSTANCE vm_lib_handle; 129 130 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 131 switch (reason) { 132 case DLL_PROCESS_ATTACH: 133 vm_lib_handle = hinst; 134 if (ForceTimeHighResolution) { 135 timeBeginPeriod(1L); 136 } 137 WindowsDbgHelp::pre_initialize(); 138 SymbolEngine::pre_initialize(); 139 break; 140 case DLL_PROCESS_DETACH: 141 if (ForceTimeHighResolution) { 142 timeEndPeriod(1L); 143 } 144 break; 145 default: 146 break; 147 } 148 return true; 149 } 150 151 static inline double fileTimeAsDouble(FILETIME* time) { 152 const double high = (double) ((unsigned int) ~0); 153 const double split = 10000000.0; 154 double result = (time->dwLowDateTime / split) + 155 time->dwHighDateTime * (high/split); 156 return result; 157 } 158 159 // Implementation of os 160 161 bool os::unsetenv(const char* name) { 162 assert(name != NULL, "Null pointer"); 163 return (SetEnvironmentVariable(name, NULL) == TRUE); 164 } 165 166 // No setuid programs under Windows. 167 bool os::have_special_privileges() { 168 return false; 169 } 170 171 172 // This method is a periodic task to check for misbehaving JNI applications 173 // under CheckJNI, we can add any periodic checks here. 174 // For Windows at the moment does nothing 175 void os::run_periodic_checks() { 176 return; 177 } 178 179 // previous UnhandledExceptionFilter, if there is one 180 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 181 182 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 183 184 void os::init_system_properties_values() { 185 // sysclasspath, java_home, dll_dir 186 { 187 char *home_path; 188 char *dll_path; 189 char *pslash; 190 char *bin = "\\bin"; 191 char home_dir[MAX_PATH + 1]; 192 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 193 194 if (alt_home_dir != NULL) { 195 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 196 home_dir[MAX_PATH] = '\0'; 197 } else { 198 os::jvm_path(home_dir, sizeof(home_dir)); 199 // Found the full path to jvm.dll. 200 // Now cut the path to <java_home>/jre if we can. 201 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 202 pslash = strrchr(home_dir, '\\'); 203 if (pslash != NULL) { 204 *pslash = '\0'; // get rid of \{client|server} 205 pslash = strrchr(home_dir, '\\'); 206 if (pslash != NULL) { 207 *pslash = '\0'; // get rid of \bin 208 } 209 } 210 } 211 212 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 213 if (home_path == NULL) { 214 return; 215 } 216 strcpy(home_path, home_dir); 217 Arguments::set_java_home(home_path); 218 FREE_C_HEAP_ARRAY(char, home_path); 219 220 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 221 mtInternal); 222 if (dll_path == NULL) { 223 return; 224 } 225 strcpy(dll_path, home_dir); 226 strcat(dll_path, bin); 227 Arguments::set_dll_dir(dll_path); 228 FREE_C_HEAP_ARRAY(char, dll_path); 229 230 if (!set_boot_path('\\', ';')) { 231 return; 232 } 233 } 234 235 // library_path 236 #define EXT_DIR "\\lib\\ext" 237 #define BIN_DIR "\\bin" 238 #define PACKAGE_DIR "\\Sun\\Java" 239 { 240 // Win32 library search order (See the documentation for LoadLibrary): 241 // 242 // 1. The directory from which application is loaded. 243 // 2. The system wide Java Extensions directory (Java only) 244 // 3. System directory (GetSystemDirectory) 245 // 4. Windows directory (GetWindowsDirectory) 246 // 5. The PATH environment variable 247 // 6. The current directory 248 249 char *library_path; 250 char tmp[MAX_PATH]; 251 char *path_str = ::getenv("PATH"); 252 253 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 254 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 255 256 library_path[0] = '\0'; 257 258 GetModuleFileName(NULL, tmp, sizeof(tmp)); 259 *(strrchr(tmp, '\\')) = '\0'; 260 strcat(library_path, tmp); 261 262 GetWindowsDirectory(tmp, sizeof(tmp)); 263 strcat(library_path, ";"); 264 strcat(library_path, tmp); 265 strcat(library_path, PACKAGE_DIR BIN_DIR); 266 267 GetSystemDirectory(tmp, sizeof(tmp)); 268 strcat(library_path, ";"); 269 strcat(library_path, tmp); 270 271 GetWindowsDirectory(tmp, sizeof(tmp)); 272 strcat(library_path, ";"); 273 strcat(library_path, tmp); 274 275 if (path_str) { 276 strcat(library_path, ";"); 277 strcat(library_path, path_str); 278 } 279 280 strcat(library_path, ";."); 281 282 Arguments::set_library_path(library_path); 283 FREE_C_HEAP_ARRAY(char, library_path); 284 } 285 286 // Default extensions directory 287 { 288 char path[MAX_PATH]; 289 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 290 GetWindowsDirectory(path, MAX_PATH); 291 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 292 path, PACKAGE_DIR, EXT_DIR); 293 Arguments::set_ext_dirs(buf); 294 } 295 #undef EXT_DIR 296 #undef BIN_DIR 297 #undef PACKAGE_DIR 298 299 #ifndef _WIN64 300 // set our UnhandledExceptionFilter and save any previous one 301 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 302 #endif 303 304 // Done 305 return; 306 } 307 308 void os::breakpoint() { 309 DebugBreak(); 310 } 311 312 // Invoked from the BREAKPOINT Macro 313 extern "C" void breakpoint() { 314 os::breakpoint(); 315 } 316 317 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 318 // So far, this method is only used by Native Memory Tracking, which is 319 // only supported on Windows XP or later. 320 // 321 int os::get_native_stack(address* stack, int frames, int toSkip) { 322 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 323 for (int index = captured; index < frames; index ++) { 324 stack[index] = NULL; 325 } 326 return captured; 327 } 328 329 330 // os::current_stack_base() 331 // 332 // Returns the base of the stack, which is the stack's 333 // starting address. This function must be called 334 // while running on the stack of the thread being queried. 335 336 address os::current_stack_base() { 337 MEMORY_BASIC_INFORMATION minfo; 338 address stack_bottom; 339 size_t stack_size; 340 341 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 342 stack_bottom = (address)minfo.AllocationBase; 343 stack_size = minfo.RegionSize; 344 345 // Add up the sizes of all the regions with the same 346 // AllocationBase. 347 while (1) { 348 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 349 if (stack_bottom == (address)minfo.AllocationBase) { 350 stack_size += minfo.RegionSize; 351 } else { 352 break; 353 } 354 } 355 return stack_bottom + stack_size; 356 } 357 358 size_t os::current_stack_size() { 359 size_t sz; 360 MEMORY_BASIC_INFORMATION minfo; 361 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 362 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 363 return sz; 364 } 365 366 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 367 const struct tm* time_struct_ptr = localtime(clock); 368 if (time_struct_ptr != NULL) { 369 *res = *time_struct_ptr; 370 return res; 371 } 372 return NULL; 373 } 374 375 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 376 const struct tm* time_struct_ptr = gmtime(clock); 377 if (time_struct_ptr != NULL) { 378 *res = *time_struct_ptr; 379 return res; 380 } 381 return NULL; 382 } 383 384 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 385 386 // Thread start routine for all newly created threads 387 static unsigned __stdcall thread_native_entry(Thread* thread) { 388 // Try to randomize the cache line index of hot stack frames. 389 // This helps when threads of the same stack traces evict each other's 390 // cache lines. The threads can be either from the same JVM instance, or 391 // from different JVM instances. The benefit is especially true for 392 // processors with hyperthreading technology. 393 static int counter = 0; 394 int pid = os::current_process_id(); 395 _alloca(((pid ^ counter++) & 7) * 128); 396 397 thread->initialize_thread_current(); 398 399 OSThread* osthr = thread->osthread(); 400 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 401 402 if (UseNUMA) { 403 int lgrp_id = os::numa_get_group_id(); 404 if (lgrp_id != -1) { 405 thread->set_lgrp_id(lgrp_id); 406 } 407 } 408 409 // Diagnostic code to investigate JDK-6573254 410 int res = 30115; // non-java thread 411 if (thread->is_Java_thread()) { 412 res = 20115; // java thread 413 } 414 415 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 416 417 // Install a win32 structured exception handler around every thread created 418 // by VM, so VM can generate error dump when an exception occurred in non- 419 // Java thread (e.g. VM thread). 420 __try { 421 thread->run(); 422 } __except(topLevelExceptionFilter( 423 (_EXCEPTION_POINTERS*)_exception_info())) { 424 // Nothing to do. 425 } 426 427 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 428 429 // One less thread is executing 430 // When the VMThread gets here, the main thread may have already exited 431 // which frees the CodeHeap containing the Atomic::add code 432 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 433 Atomic::dec(&os::win32::_os_thread_count); 434 } 435 436 // If a thread has not deleted itself ("delete this") as part of its 437 // termination sequence, we have to ensure thread-local-storage is 438 // cleared before we actually terminate. No threads should ever be 439 // deleted asynchronously with respect to their termination. 440 if (Thread::current_or_null_safe() != NULL) { 441 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 442 thread->clear_thread_current(); 443 } 444 445 // Thread must not return from exit_process_or_thread(), but if it does, 446 // let it proceed to exit normally 447 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 448 } 449 450 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 451 int thread_id) { 452 // Allocate the OSThread object 453 OSThread* osthread = new OSThread(NULL, NULL); 454 if (osthread == NULL) return NULL; 455 456 // Initialize support for Java interrupts 457 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 458 if (interrupt_event == NULL) { 459 delete osthread; 460 return NULL; 461 } 462 osthread->set_interrupt_event(interrupt_event); 463 464 // Store info on the Win32 thread into the OSThread 465 osthread->set_thread_handle(thread_handle); 466 osthread->set_thread_id(thread_id); 467 468 if (UseNUMA) { 469 int lgrp_id = os::numa_get_group_id(); 470 if (lgrp_id != -1) { 471 thread->set_lgrp_id(lgrp_id); 472 } 473 } 474 475 // Initial thread state is INITIALIZED, not SUSPENDED 476 osthread->set_state(INITIALIZED); 477 478 return osthread; 479 } 480 481 482 bool os::create_attached_thread(JavaThread* thread) { 483 #ifdef ASSERT 484 thread->verify_not_published(); 485 #endif 486 HANDLE thread_h; 487 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 488 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 489 fatal("DuplicateHandle failed\n"); 490 } 491 OSThread* osthread = create_os_thread(thread, thread_h, 492 (int)current_thread_id()); 493 if (osthread == NULL) { 494 return false; 495 } 496 497 // Initial thread state is RUNNABLE 498 osthread->set_state(RUNNABLE); 499 500 thread->set_osthread(osthread); 501 502 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 503 os::current_thread_id()); 504 505 return true; 506 } 507 508 bool os::create_main_thread(JavaThread* thread) { 509 #ifdef ASSERT 510 thread->verify_not_published(); 511 #endif 512 if (_starting_thread == NULL) { 513 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 514 if (_starting_thread == NULL) { 515 return false; 516 } 517 } 518 519 // The primordial thread is runnable from the start) 520 _starting_thread->set_state(RUNNABLE); 521 522 thread->set_osthread(_starting_thread); 523 return true; 524 } 525 526 // Helper function to trace _beginthreadex attributes, 527 // similar to os::Posix::describe_pthread_attr() 528 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 529 size_t stacksize, unsigned initflag) { 530 stringStream ss(buf, buflen); 531 if (stacksize == 0) { 532 ss.print("stacksize: default, "); 533 } else { 534 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 535 } 536 ss.print("flags: "); 537 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 538 #define ALL(X) \ 539 X(CREATE_SUSPENDED) \ 540 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 541 ALL(PRINT_FLAG) 542 #undef ALL 543 #undef PRINT_FLAG 544 return buf; 545 } 546 547 // Allocate and initialize a new OSThread 548 bool os::create_thread(Thread* thread, ThreadType thr_type, 549 size_t stack_size) { 550 unsigned thread_id; 551 552 // Allocate the OSThread object 553 OSThread* osthread = new OSThread(NULL, NULL); 554 if (osthread == NULL) { 555 return false; 556 } 557 558 // Initialize support for Java interrupts 559 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 560 if (interrupt_event == NULL) { 561 delete osthread; 562 return NULL; 563 } 564 osthread->set_interrupt_event(interrupt_event); 565 osthread->set_interrupted(false); 566 567 thread->set_osthread(osthread); 568 569 if (stack_size == 0) { 570 switch (thr_type) { 571 case os::java_thread: 572 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 573 if (JavaThread::stack_size_at_create() > 0) { 574 stack_size = JavaThread::stack_size_at_create(); 575 } 576 break; 577 case os::compiler_thread: 578 if (CompilerThreadStackSize > 0) { 579 stack_size = (size_t)(CompilerThreadStackSize * K); 580 break; 581 } // else fall through: 582 // use VMThreadStackSize if CompilerThreadStackSize is not defined 583 case os::vm_thread: 584 case os::pgc_thread: 585 case os::cgc_thread: 586 case os::watcher_thread: 587 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 588 break; 589 } 590 } 591 592 // Create the Win32 thread 593 // 594 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 595 // does not specify stack size. Instead, it specifies the size of 596 // initially committed space. The stack size is determined by 597 // PE header in the executable. If the committed "stack_size" is larger 598 // than default value in the PE header, the stack is rounded up to the 599 // nearest multiple of 1MB. For example if the launcher has default 600 // stack size of 320k, specifying any size less than 320k does not 601 // affect the actual stack size at all, it only affects the initial 602 // commitment. On the other hand, specifying 'stack_size' larger than 603 // default value may cause significant increase in memory usage, because 604 // not only the stack space will be rounded up to MB, but also the 605 // entire space is committed upfront. 606 // 607 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 608 // for CreateThread() that can treat 'stack_size' as stack size. However we 609 // are not supposed to call CreateThread() directly according to MSDN 610 // document because JVM uses C runtime library. The good news is that the 611 // flag appears to work with _beginthredex() as well. 612 613 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 614 HANDLE thread_handle = 615 (HANDLE)_beginthreadex(NULL, 616 (unsigned)stack_size, 617 (unsigned (__stdcall *)(void*)) thread_native_entry, 618 thread, 619 initflag, 620 &thread_id); 621 622 char buf[64]; 623 if (thread_handle != NULL) { 624 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 625 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 626 } else { 627 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 628 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 629 } 630 631 if (thread_handle == NULL) { 632 // Need to clean up stuff we've allocated so far 633 CloseHandle(osthread->interrupt_event()); 634 thread->set_osthread(NULL); 635 delete osthread; 636 return NULL; 637 } 638 639 Atomic::inc(&os::win32::_os_thread_count); 640 641 // Store info on the Win32 thread into the OSThread 642 osthread->set_thread_handle(thread_handle); 643 osthread->set_thread_id(thread_id); 644 645 // Initial thread state is INITIALIZED, not SUSPENDED 646 osthread->set_state(INITIALIZED); 647 648 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 649 return true; 650 } 651 652 653 // Free Win32 resources related to the OSThread 654 void os::free_thread(OSThread* osthread) { 655 assert(osthread != NULL, "osthread not set"); 656 657 // We are told to free resources of the argument thread, 658 // but we can only really operate on the current thread. 659 assert(Thread::current()->osthread() == osthread, 660 "os::free_thread but not current thread"); 661 662 CloseHandle(osthread->thread_handle()); 663 CloseHandle(osthread->interrupt_event()); 664 delete osthread; 665 } 666 667 static jlong first_filetime; 668 static jlong initial_performance_count; 669 static jlong performance_frequency; 670 671 672 jlong as_long(LARGE_INTEGER x) { 673 jlong result = 0; // initialization to avoid warning 674 set_high(&result, x.HighPart); 675 set_low(&result, x.LowPart); 676 return result; 677 } 678 679 680 jlong os::elapsed_counter() { 681 LARGE_INTEGER count; 682 QueryPerformanceCounter(&count); 683 return as_long(count) - initial_performance_count; 684 } 685 686 687 jlong os::elapsed_frequency() { 688 return performance_frequency; 689 } 690 691 692 julong os::available_memory() { 693 return win32::available_memory(); 694 } 695 696 julong os::win32::available_memory() { 697 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 698 // value if total memory is larger than 4GB 699 MEMORYSTATUSEX ms; 700 ms.dwLength = sizeof(ms); 701 GlobalMemoryStatusEx(&ms); 702 703 return (julong)ms.ullAvailPhys; 704 } 705 706 julong os::physical_memory() { 707 return win32::physical_memory(); 708 } 709 710 bool os::has_allocatable_memory_limit(julong* limit) { 711 MEMORYSTATUSEX ms; 712 ms.dwLength = sizeof(ms); 713 GlobalMemoryStatusEx(&ms); 714 #ifdef _LP64 715 *limit = (julong)ms.ullAvailVirtual; 716 return true; 717 #else 718 // Limit to 1400m because of the 2gb address space wall 719 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 720 return true; 721 #endif 722 } 723 724 int os::active_processor_count() { 725 // User has overridden the number of active processors 726 if (ActiveProcessorCount > 0) { 727 log_trace(os)("active_processor_count: " 728 "active processor count set by user : %d", 729 ActiveProcessorCount); 730 return ActiveProcessorCount; 731 } 732 733 DWORD_PTR lpProcessAffinityMask = 0; 734 DWORD_PTR lpSystemAffinityMask = 0; 735 int proc_count = processor_count(); 736 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 737 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 738 // Nof active processors is number of bits in process affinity mask 739 int bitcount = 0; 740 while (lpProcessAffinityMask != 0) { 741 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 742 bitcount++; 743 } 744 return bitcount; 745 } else { 746 return proc_count; 747 } 748 } 749 750 void os::set_native_thread_name(const char *name) { 751 752 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 753 // 754 // Note that unfortunately this only works if the process 755 // is already attached to a debugger; debugger must observe 756 // the exception below to show the correct name. 757 758 // If there is no debugger attached skip raising the exception 759 if (!IsDebuggerPresent()) { 760 return; 761 } 762 763 const DWORD MS_VC_EXCEPTION = 0x406D1388; 764 struct { 765 DWORD dwType; // must be 0x1000 766 LPCSTR szName; // pointer to name (in user addr space) 767 DWORD dwThreadID; // thread ID (-1=caller thread) 768 DWORD dwFlags; // reserved for future use, must be zero 769 } info; 770 771 info.dwType = 0x1000; 772 info.szName = name; 773 info.dwThreadID = -1; 774 info.dwFlags = 0; 775 776 __try { 777 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 778 } __except(EXCEPTION_EXECUTE_HANDLER) {} 779 } 780 781 bool os::distribute_processes(uint length, uint* distribution) { 782 // Not yet implemented. 783 return false; 784 } 785 786 bool os::bind_to_processor(uint processor_id) { 787 // Not yet implemented. 788 return false; 789 } 790 791 void os::win32::initialize_performance_counter() { 792 LARGE_INTEGER count; 793 QueryPerformanceFrequency(&count); 794 performance_frequency = as_long(count); 795 QueryPerformanceCounter(&count); 796 initial_performance_count = as_long(count); 797 } 798 799 800 double os::elapsedTime() { 801 return (double) elapsed_counter() / (double) elapsed_frequency(); 802 } 803 804 805 // Windows format: 806 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 807 // Java format: 808 // Java standards require the number of milliseconds since 1/1/1970 809 810 // Constant offset - calculated using offset() 811 static jlong _offset = 116444736000000000; 812 // Fake time counter for reproducible results when debugging 813 static jlong fake_time = 0; 814 815 #ifdef ASSERT 816 // Just to be safe, recalculate the offset in debug mode 817 static jlong _calculated_offset = 0; 818 static int _has_calculated_offset = 0; 819 820 jlong offset() { 821 if (_has_calculated_offset) return _calculated_offset; 822 SYSTEMTIME java_origin; 823 java_origin.wYear = 1970; 824 java_origin.wMonth = 1; 825 java_origin.wDayOfWeek = 0; // ignored 826 java_origin.wDay = 1; 827 java_origin.wHour = 0; 828 java_origin.wMinute = 0; 829 java_origin.wSecond = 0; 830 java_origin.wMilliseconds = 0; 831 FILETIME jot; 832 if (!SystemTimeToFileTime(&java_origin, &jot)) { 833 fatal("Error = %d\nWindows error", GetLastError()); 834 } 835 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 836 _has_calculated_offset = 1; 837 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 838 return _calculated_offset; 839 } 840 #else 841 jlong offset() { 842 return _offset; 843 } 844 #endif 845 846 jlong windows_to_java_time(FILETIME wt) { 847 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 848 return (a - offset()) / 10000; 849 } 850 851 // Returns time ticks in (10th of micro seconds) 852 jlong windows_to_time_ticks(FILETIME wt) { 853 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 854 return (a - offset()); 855 } 856 857 FILETIME java_to_windows_time(jlong l) { 858 jlong a = (l * 10000) + offset(); 859 FILETIME result; 860 result.dwHighDateTime = high(a); 861 result.dwLowDateTime = low(a); 862 return result; 863 } 864 865 bool os::supports_vtime() { return true; } 866 bool os::enable_vtime() { return false; } 867 bool os::vtime_enabled() { return false; } 868 869 double os::elapsedVTime() { 870 FILETIME created; 871 FILETIME exited; 872 FILETIME kernel; 873 FILETIME user; 874 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 875 // the resolution of windows_to_java_time() should be sufficient (ms) 876 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 877 } else { 878 return elapsedTime(); 879 } 880 } 881 882 jlong os::javaTimeMillis() { 883 if (UseFakeTimers) { 884 return fake_time++; 885 } else { 886 FILETIME wt; 887 GetSystemTimeAsFileTime(&wt); 888 return windows_to_java_time(wt); 889 } 890 } 891 892 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 893 FILETIME wt; 894 GetSystemTimeAsFileTime(&wt); 895 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 896 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 897 seconds = secs; 898 nanos = jlong(ticks - (secs*10000000)) * 100; 899 } 900 901 jlong os::javaTimeNanos() { 902 LARGE_INTEGER current_count; 903 QueryPerformanceCounter(¤t_count); 904 double current = as_long(current_count); 905 double freq = performance_frequency; 906 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 907 return time; 908 } 909 910 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 911 jlong freq = performance_frequency; 912 if (freq < NANOSECS_PER_SEC) { 913 // the performance counter is 64 bits and we will 914 // be multiplying it -- so no wrap in 64 bits 915 info_ptr->max_value = ALL_64_BITS; 916 } else if (freq > NANOSECS_PER_SEC) { 917 // use the max value the counter can reach to 918 // determine the max value which could be returned 919 julong max_counter = (julong)ALL_64_BITS; 920 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 921 } else { 922 // the performance counter is 64 bits and we will 923 // be using it directly -- so no wrap in 64 bits 924 info_ptr->max_value = ALL_64_BITS; 925 } 926 927 // using a counter, so no skipping 928 info_ptr->may_skip_backward = false; 929 info_ptr->may_skip_forward = false; 930 931 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 932 } 933 934 char* os::local_time_string(char *buf, size_t buflen) { 935 SYSTEMTIME st; 936 GetLocalTime(&st); 937 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 938 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 939 return buf; 940 } 941 942 bool os::getTimesSecs(double* process_real_time, 943 double* process_user_time, 944 double* process_system_time) { 945 HANDLE h_process = GetCurrentProcess(); 946 FILETIME create_time, exit_time, kernel_time, user_time; 947 BOOL result = GetProcessTimes(h_process, 948 &create_time, 949 &exit_time, 950 &kernel_time, 951 &user_time); 952 if (result != 0) { 953 FILETIME wt; 954 GetSystemTimeAsFileTime(&wt); 955 jlong rtc_millis = windows_to_java_time(wt); 956 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 957 *process_user_time = 958 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 959 *process_system_time = 960 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 961 return true; 962 } else { 963 return false; 964 } 965 } 966 967 void os::shutdown() { 968 // allow PerfMemory to attempt cleanup of any persistent resources 969 perfMemory_exit(); 970 971 // flush buffered output, finish log files 972 ostream_abort(); 973 974 // Check for abort hook 975 abort_hook_t abort_hook = Arguments::abort_hook(); 976 if (abort_hook != NULL) { 977 abort_hook(); 978 } 979 } 980 981 982 static HANDLE dumpFile = NULL; 983 984 // Check if dump file can be created. 985 void os::check_dump_limit(char* buffer, size_t buffsz) { 986 bool status = true; 987 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 988 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 989 status = false; 990 } 991 992 #ifndef ASSERT 993 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 994 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 995 status = false; 996 } 997 #endif 998 999 if (status) { 1000 const char* cwd = get_current_directory(NULL, 0); 1001 int pid = current_process_id(); 1002 if (cwd != NULL) { 1003 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1004 } else { 1005 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1006 } 1007 1008 if (dumpFile == NULL && 1009 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1010 == INVALID_HANDLE_VALUE) { 1011 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1012 status = false; 1013 } 1014 } 1015 VMError::record_coredump_status(buffer, status); 1016 } 1017 1018 void os::abort(bool dump_core, void* siginfo, const void* context) { 1019 EXCEPTION_POINTERS ep; 1020 MINIDUMP_EXCEPTION_INFORMATION mei; 1021 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1022 1023 HANDLE hProcess = GetCurrentProcess(); 1024 DWORD processId = GetCurrentProcessId(); 1025 MINIDUMP_TYPE dumpType; 1026 1027 shutdown(); 1028 if (!dump_core || dumpFile == NULL) { 1029 if (dumpFile != NULL) { 1030 CloseHandle(dumpFile); 1031 } 1032 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1033 } 1034 1035 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1036 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1037 1038 if (siginfo != NULL && context != NULL) { 1039 ep.ContextRecord = (PCONTEXT) context; 1040 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1041 1042 mei.ThreadId = GetCurrentThreadId(); 1043 mei.ExceptionPointers = &ep; 1044 pmei = &mei; 1045 } else { 1046 pmei = NULL; 1047 } 1048 1049 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1050 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1051 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1052 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1053 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1054 } 1055 CloseHandle(dumpFile); 1056 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1057 } 1058 1059 // Die immediately, no exit hook, no abort hook, no cleanup. 1060 void os::die() { 1061 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1062 } 1063 1064 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1065 // * dirent_md.c 1.15 00/02/02 1066 // 1067 // The declarations for DIR and struct dirent are in jvm_win32.h. 1068 1069 // Caller must have already run dirname through JVM_NativePath, which removes 1070 // duplicate slashes and converts all instances of '/' into '\\'. 1071 1072 DIR * os::opendir(const char *dirname) { 1073 assert(dirname != NULL, "just checking"); // hotspot change 1074 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1075 DWORD fattr; // hotspot change 1076 char alt_dirname[4] = { 0, 0, 0, 0 }; 1077 1078 if (dirp == 0) { 1079 errno = ENOMEM; 1080 return 0; 1081 } 1082 1083 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1084 // as a directory in FindFirstFile(). We detect this case here and 1085 // prepend the current drive name. 1086 // 1087 if (dirname[1] == '\0' && dirname[0] == '\\') { 1088 alt_dirname[0] = _getdrive() + 'A' - 1; 1089 alt_dirname[1] = ':'; 1090 alt_dirname[2] = '\\'; 1091 alt_dirname[3] = '\0'; 1092 dirname = alt_dirname; 1093 } 1094 1095 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1096 if (dirp->path == 0) { 1097 free(dirp); 1098 errno = ENOMEM; 1099 return 0; 1100 } 1101 strcpy(dirp->path, dirname); 1102 1103 fattr = GetFileAttributes(dirp->path); 1104 if (fattr == 0xffffffff) { 1105 free(dirp->path); 1106 free(dirp); 1107 errno = ENOENT; 1108 return 0; 1109 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1110 free(dirp->path); 1111 free(dirp); 1112 errno = ENOTDIR; 1113 return 0; 1114 } 1115 1116 // Append "*.*", or possibly "\\*.*", to path 1117 if (dirp->path[1] == ':' && 1118 (dirp->path[2] == '\0' || 1119 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1120 // No '\\' needed for cases like "Z:" or "Z:\" 1121 strcat(dirp->path, "*.*"); 1122 } else { 1123 strcat(dirp->path, "\\*.*"); 1124 } 1125 1126 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1127 if (dirp->handle == INVALID_HANDLE_VALUE) { 1128 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1129 free(dirp->path); 1130 free(dirp); 1131 errno = EACCES; 1132 return 0; 1133 } 1134 } 1135 return dirp; 1136 } 1137 1138 // parameter dbuf unused on Windows 1139 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1140 assert(dirp != NULL, "just checking"); // hotspot change 1141 if (dirp->handle == INVALID_HANDLE_VALUE) { 1142 return 0; 1143 } 1144 1145 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1146 1147 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1148 if (GetLastError() == ERROR_INVALID_HANDLE) { 1149 errno = EBADF; 1150 return 0; 1151 } 1152 FindClose(dirp->handle); 1153 dirp->handle = INVALID_HANDLE_VALUE; 1154 } 1155 1156 return &dirp->dirent; 1157 } 1158 1159 int os::closedir(DIR *dirp) { 1160 assert(dirp != NULL, "just checking"); // hotspot change 1161 if (dirp->handle != INVALID_HANDLE_VALUE) { 1162 if (!FindClose(dirp->handle)) { 1163 errno = EBADF; 1164 return -1; 1165 } 1166 dirp->handle = INVALID_HANDLE_VALUE; 1167 } 1168 free(dirp->path); 1169 free(dirp); 1170 return 0; 1171 } 1172 1173 // This must be hard coded because it's the system's temporary 1174 // directory not the java application's temp directory, ala java.io.tmpdir. 1175 const char* os::get_temp_directory() { 1176 static char path_buf[MAX_PATH]; 1177 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1178 return path_buf; 1179 } else { 1180 path_buf[0] = '\0'; 1181 return path_buf; 1182 } 1183 } 1184 1185 // Needs to be in os specific directory because windows requires another 1186 // header file <direct.h> 1187 const char* os::get_current_directory(char *buf, size_t buflen) { 1188 int n = static_cast<int>(buflen); 1189 if (buflen > INT_MAX) n = INT_MAX; 1190 return _getcwd(buf, n); 1191 } 1192 1193 //----------------------------------------------------------- 1194 // Helper functions for fatal error handler 1195 #ifdef _WIN64 1196 // Helper routine which returns true if address in 1197 // within the NTDLL address space. 1198 // 1199 static bool _addr_in_ntdll(address addr) { 1200 HMODULE hmod; 1201 MODULEINFO minfo; 1202 1203 hmod = GetModuleHandle("NTDLL.DLL"); 1204 if (hmod == NULL) return false; 1205 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1206 &minfo, sizeof(MODULEINFO))) { 1207 return false; 1208 } 1209 1210 if ((addr >= minfo.lpBaseOfDll) && 1211 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1212 return true; 1213 } else { 1214 return false; 1215 } 1216 } 1217 #endif 1218 1219 struct _modinfo { 1220 address addr; 1221 char* full_path; // point to a char buffer 1222 int buflen; // size of the buffer 1223 address base_addr; 1224 }; 1225 1226 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1227 address top_address, void * param) { 1228 struct _modinfo *pmod = (struct _modinfo *)param; 1229 if (!pmod) return -1; 1230 1231 if (base_addr <= pmod->addr && 1232 top_address > pmod->addr) { 1233 // if a buffer is provided, copy path name to the buffer 1234 if (pmod->full_path) { 1235 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1236 } 1237 pmod->base_addr = base_addr; 1238 return 1; 1239 } 1240 return 0; 1241 } 1242 1243 bool os::dll_address_to_library_name(address addr, char* buf, 1244 int buflen, int* offset) { 1245 // buf is not optional, but offset is optional 1246 assert(buf != NULL, "sanity check"); 1247 1248 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1249 // return the full path to the DLL file, sometimes it returns path 1250 // to the corresponding PDB file (debug info); sometimes it only 1251 // returns partial path, which makes life painful. 1252 1253 struct _modinfo mi; 1254 mi.addr = addr; 1255 mi.full_path = buf; 1256 mi.buflen = buflen; 1257 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1258 // buf already contains path name 1259 if (offset) *offset = addr - mi.base_addr; 1260 return true; 1261 } 1262 1263 buf[0] = '\0'; 1264 if (offset) *offset = -1; 1265 return false; 1266 } 1267 1268 bool os::dll_address_to_function_name(address addr, char *buf, 1269 int buflen, int *offset, 1270 bool demangle) { 1271 // buf is not optional, but offset is optional 1272 assert(buf != NULL, "sanity check"); 1273 1274 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1275 return true; 1276 } 1277 if (offset != NULL) *offset = -1; 1278 buf[0] = '\0'; 1279 return false; 1280 } 1281 1282 // save the start and end address of jvm.dll into param[0] and param[1] 1283 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1284 address top_address, void * param) { 1285 if (!param) return -1; 1286 1287 if (base_addr <= (address)_locate_jvm_dll && 1288 top_address > (address)_locate_jvm_dll) { 1289 ((address*)param)[0] = base_addr; 1290 ((address*)param)[1] = top_address; 1291 return 1; 1292 } 1293 return 0; 1294 } 1295 1296 address vm_lib_location[2]; // start and end address of jvm.dll 1297 1298 // check if addr is inside jvm.dll 1299 bool os::address_is_in_vm(address addr) { 1300 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1301 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1302 assert(false, "Can't find jvm module."); 1303 return false; 1304 } 1305 } 1306 1307 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1308 } 1309 1310 // print module info; param is outputStream* 1311 static int _print_module(const char* fname, address base_address, 1312 address top_address, void* param) { 1313 if (!param) return -1; 1314 1315 outputStream* st = (outputStream*)param; 1316 1317 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1318 return 0; 1319 } 1320 1321 // Loads .dll/.so and 1322 // in case of error it checks if .dll/.so was built for the 1323 // same architecture as Hotspot is running on 1324 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1325 void * result = LoadLibrary(name); 1326 if (result != NULL) { 1327 // Recalculate pdb search path if a DLL was loaded successfully. 1328 SymbolEngine::recalc_search_path(); 1329 return result; 1330 } 1331 1332 DWORD errcode = GetLastError(); 1333 if (errcode == ERROR_MOD_NOT_FOUND) { 1334 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1335 ebuf[ebuflen - 1] = '\0'; 1336 return NULL; 1337 } 1338 1339 // Parsing dll below 1340 // If we can read dll-info and find that dll was built 1341 // for an architecture other than Hotspot is running in 1342 // - then print to buffer "DLL was built for a different architecture" 1343 // else call os::lasterror to obtain system error message 1344 1345 // Read system error message into ebuf 1346 // It may or may not be overwritten below (in the for loop and just above) 1347 lasterror(ebuf, (size_t) ebuflen); 1348 ebuf[ebuflen - 1] = '\0'; 1349 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1350 if (fd < 0) { 1351 return NULL; 1352 } 1353 1354 uint32_t signature_offset; 1355 uint16_t lib_arch = 0; 1356 bool failed_to_get_lib_arch = 1357 ( // Go to position 3c in the dll 1358 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1359 || 1360 // Read location of signature 1361 (sizeof(signature_offset) != 1362 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1363 || 1364 // Go to COFF File Header in dll 1365 // that is located after "signature" (4 bytes long) 1366 (os::seek_to_file_offset(fd, 1367 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1368 || 1369 // Read field that contains code of architecture 1370 // that dll was built for 1371 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1372 ); 1373 1374 ::close(fd); 1375 if (failed_to_get_lib_arch) { 1376 // file i/o error - report os::lasterror(...) msg 1377 return NULL; 1378 } 1379 1380 typedef struct { 1381 uint16_t arch_code; 1382 char* arch_name; 1383 } arch_t; 1384 1385 static const arch_t arch_array[] = { 1386 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1387 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1388 }; 1389 #if (defined _M_AMD64) 1390 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1391 #elif (defined _M_IX86) 1392 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1393 #else 1394 #error Method os::dll_load requires that one of following \ 1395 is defined :_M_AMD64 or _M_IX86 1396 #endif 1397 1398 1399 // Obtain a string for printf operation 1400 // lib_arch_str shall contain string what platform this .dll was built for 1401 // running_arch_str shall string contain what platform Hotspot was built for 1402 char *running_arch_str = NULL, *lib_arch_str = NULL; 1403 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1404 if (lib_arch == arch_array[i].arch_code) { 1405 lib_arch_str = arch_array[i].arch_name; 1406 } 1407 if (running_arch == arch_array[i].arch_code) { 1408 running_arch_str = arch_array[i].arch_name; 1409 } 1410 } 1411 1412 assert(running_arch_str, 1413 "Didn't find running architecture code in arch_array"); 1414 1415 // If the architecture is right 1416 // but some other error took place - report os::lasterror(...) msg 1417 if (lib_arch == running_arch) { 1418 return NULL; 1419 } 1420 1421 if (lib_arch_str != NULL) { 1422 ::_snprintf(ebuf, ebuflen - 1, 1423 "Can't load %s-bit .dll on a %s-bit platform", 1424 lib_arch_str, running_arch_str); 1425 } else { 1426 // don't know what architecture this dll was build for 1427 ::_snprintf(ebuf, ebuflen - 1, 1428 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1429 lib_arch, running_arch_str); 1430 } 1431 1432 return NULL; 1433 } 1434 1435 void os::print_dll_info(outputStream *st) { 1436 st->print_cr("Dynamic libraries:"); 1437 get_loaded_modules_info(_print_module, (void *)st); 1438 } 1439 1440 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1441 HANDLE hProcess; 1442 1443 # define MAX_NUM_MODULES 128 1444 HMODULE modules[MAX_NUM_MODULES]; 1445 static char filename[MAX_PATH]; 1446 int result = 0; 1447 1448 int pid = os::current_process_id(); 1449 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1450 FALSE, pid); 1451 if (hProcess == NULL) return 0; 1452 1453 DWORD size_needed; 1454 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1455 CloseHandle(hProcess); 1456 return 0; 1457 } 1458 1459 // number of modules that are currently loaded 1460 int num_modules = size_needed / sizeof(HMODULE); 1461 1462 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1463 // Get Full pathname: 1464 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1465 filename[0] = '\0'; 1466 } 1467 1468 MODULEINFO modinfo; 1469 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1470 modinfo.lpBaseOfDll = NULL; 1471 modinfo.SizeOfImage = 0; 1472 } 1473 1474 // Invoke callback function 1475 result = callback(filename, (address)modinfo.lpBaseOfDll, 1476 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1477 if (result) break; 1478 } 1479 1480 CloseHandle(hProcess); 1481 return result; 1482 } 1483 1484 bool os::get_host_name(char* buf, size_t buflen) { 1485 DWORD size = (DWORD)buflen; 1486 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1487 } 1488 1489 void os::get_summary_os_info(char* buf, size_t buflen) { 1490 stringStream sst(buf, buflen); 1491 os::win32::print_windows_version(&sst); 1492 // chop off newline character 1493 char* nl = strchr(buf, '\n'); 1494 if (nl != NULL) *nl = '\0'; 1495 } 1496 1497 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1498 #if _MSC_VER >= 1900 1499 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1500 int result = ::vsnprintf(buf, len, fmt, args); 1501 // If an encoding error occurred (result < 0) then it's not clear 1502 // whether the buffer is NUL terminated, so ensure it is. 1503 if ((result < 0) && (len > 0)) { 1504 buf[len - 1] = '\0'; 1505 } 1506 return result; 1507 #else 1508 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1509 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1510 // versions. However, when len == 0, avoid _vsnprintf too, and just 1511 // go straight to _vscprintf. The output is going to be truncated in 1512 // that case, except in the unusual case of empty output. More 1513 // importantly, the documentation for various versions of Visual Studio 1514 // are inconsistent about the behavior of _vsnprintf when len == 0, 1515 // including it possibly being an error. 1516 int result = -1; 1517 if (len > 0) { 1518 result = _vsnprintf(buf, len, fmt, args); 1519 // If output (including NUL terminator) is truncated, the buffer 1520 // won't be NUL terminated. Add the trailing NUL specified by C99. 1521 if ((result < 0) || ((size_t)result >= len)) { 1522 buf[len - 1] = '\0'; 1523 } 1524 } 1525 if (result < 0) { 1526 result = _vscprintf(fmt, args); 1527 } 1528 return result; 1529 #endif // _MSC_VER dispatch 1530 } 1531 1532 static inline time_t get_mtime(const char* filename) { 1533 struct stat st; 1534 int ret = os::stat(filename, &st); 1535 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1536 return st.st_mtime; 1537 } 1538 1539 int os::compare_file_modified_times(const char* file1, const char* file2) { 1540 time_t t1 = get_mtime(file1); 1541 time_t t2 = get_mtime(file2); 1542 return t1 - t2; 1543 } 1544 1545 void os::print_os_info_brief(outputStream* st) { 1546 os::print_os_info(st); 1547 } 1548 1549 void os::print_os_info(outputStream* st) { 1550 #ifdef ASSERT 1551 char buffer[1024]; 1552 st->print("HostName: "); 1553 if (get_host_name(buffer, sizeof(buffer))) { 1554 st->print("%s ", buffer); 1555 } else { 1556 st->print("N/A "); 1557 } 1558 #endif 1559 st->print("OS:"); 1560 os::win32::print_windows_version(st); 1561 } 1562 1563 void os::win32::print_windows_version(outputStream* st) { 1564 OSVERSIONINFOEX osvi; 1565 VS_FIXEDFILEINFO *file_info; 1566 TCHAR kernel32_path[MAX_PATH]; 1567 UINT len, ret; 1568 1569 // Use the GetVersionEx information to see if we're on a server or 1570 // workstation edition of Windows. Starting with Windows 8.1 we can't 1571 // trust the OS version information returned by this API. 1572 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1573 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1574 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1575 st->print_cr("Call to GetVersionEx failed"); 1576 return; 1577 } 1578 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1579 1580 // Get the full path to \Windows\System32\kernel32.dll and use that for 1581 // determining what version of Windows we're running on. 1582 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1583 ret = GetSystemDirectory(kernel32_path, len); 1584 if (ret == 0 || ret > len) { 1585 st->print_cr("Call to GetSystemDirectory failed"); 1586 return; 1587 } 1588 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1589 1590 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1591 if (version_size == 0) { 1592 st->print_cr("Call to GetFileVersionInfoSize failed"); 1593 return; 1594 } 1595 1596 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1597 if (version_info == NULL) { 1598 st->print_cr("Failed to allocate version_info"); 1599 return; 1600 } 1601 1602 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1603 os::free(version_info); 1604 st->print_cr("Call to GetFileVersionInfo failed"); 1605 return; 1606 } 1607 1608 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1609 os::free(version_info); 1610 st->print_cr("Call to VerQueryValue failed"); 1611 return; 1612 } 1613 1614 int major_version = HIWORD(file_info->dwProductVersionMS); 1615 int minor_version = LOWORD(file_info->dwProductVersionMS); 1616 int build_number = HIWORD(file_info->dwProductVersionLS); 1617 int build_minor = LOWORD(file_info->dwProductVersionLS); 1618 int os_vers = major_version * 1000 + minor_version; 1619 os::free(version_info); 1620 1621 st->print(" Windows "); 1622 switch (os_vers) { 1623 1624 case 6000: 1625 if (is_workstation) { 1626 st->print("Vista"); 1627 } else { 1628 st->print("Server 2008"); 1629 } 1630 break; 1631 1632 case 6001: 1633 if (is_workstation) { 1634 st->print("7"); 1635 } else { 1636 st->print("Server 2008 R2"); 1637 } 1638 break; 1639 1640 case 6002: 1641 if (is_workstation) { 1642 st->print("8"); 1643 } else { 1644 st->print("Server 2012"); 1645 } 1646 break; 1647 1648 case 6003: 1649 if (is_workstation) { 1650 st->print("8.1"); 1651 } else { 1652 st->print("Server 2012 R2"); 1653 } 1654 break; 1655 1656 case 10000: 1657 if (is_workstation) { 1658 st->print("10"); 1659 } else { 1660 st->print("Server 2016"); 1661 } 1662 break; 1663 1664 default: 1665 // Unrecognized windows, print out its major and minor versions 1666 st->print("%d.%d", major_version, minor_version); 1667 break; 1668 } 1669 1670 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1671 // find out whether we are running on 64 bit processor or not 1672 SYSTEM_INFO si; 1673 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1674 GetNativeSystemInfo(&si); 1675 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1676 st->print(" , 64 bit"); 1677 } 1678 1679 st->print(" Build %d", build_number); 1680 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1681 st->cr(); 1682 } 1683 1684 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1685 // Nothing to do for now. 1686 } 1687 1688 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1689 HKEY key; 1690 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1691 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1692 if (status == ERROR_SUCCESS) { 1693 DWORD size = (DWORD)buflen; 1694 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1695 if (status != ERROR_SUCCESS) { 1696 strncpy(buf, "## __CPU__", buflen); 1697 } 1698 RegCloseKey(key); 1699 } else { 1700 // Put generic cpu info to return 1701 strncpy(buf, "## __CPU__", buflen); 1702 } 1703 } 1704 1705 void os::print_memory_info(outputStream* st) { 1706 st->print("Memory:"); 1707 st->print(" %dk page", os::vm_page_size()>>10); 1708 1709 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1710 // value if total memory is larger than 4GB 1711 MEMORYSTATUSEX ms; 1712 ms.dwLength = sizeof(ms); 1713 GlobalMemoryStatusEx(&ms); 1714 1715 st->print(", physical %uk", os::physical_memory() >> 10); 1716 st->print("(%uk free)", os::available_memory() >> 10); 1717 1718 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1719 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1720 st->cr(); 1721 } 1722 1723 void os::print_siginfo(outputStream *st, const void* siginfo) { 1724 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1725 st->print("siginfo:"); 1726 1727 char tmp[64]; 1728 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1729 strcpy(tmp, "EXCEPTION_??"); 1730 } 1731 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1732 1733 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1734 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1735 er->NumberParameters >= 2) { 1736 switch (er->ExceptionInformation[0]) { 1737 case 0: st->print(", reading address"); break; 1738 case 1: st->print(", writing address"); break; 1739 case 8: st->print(", data execution prevention violation at address"); break; 1740 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1741 er->ExceptionInformation[0]); 1742 } 1743 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1744 } else { 1745 int num = er->NumberParameters; 1746 if (num > 0) { 1747 st->print(", ExceptionInformation="); 1748 for (int i = 0; i < num; i++) { 1749 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1750 } 1751 } 1752 } 1753 st->cr(); 1754 } 1755 1756 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1757 // do nothing 1758 } 1759 1760 static char saved_jvm_path[MAX_PATH] = {0}; 1761 1762 // Find the full path to the current module, jvm.dll 1763 void os::jvm_path(char *buf, jint buflen) { 1764 // Error checking. 1765 if (buflen < MAX_PATH) { 1766 assert(false, "must use a large-enough buffer"); 1767 buf[0] = '\0'; 1768 return; 1769 } 1770 // Lazy resolve the path to current module. 1771 if (saved_jvm_path[0] != 0) { 1772 strcpy(buf, saved_jvm_path); 1773 return; 1774 } 1775 1776 buf[0] = '\0'; 1777 if (Arguments::sun_java_launcher_is_altjvm()) { 1778 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1779 // for a JAVA_HOME environment variable and fix up the path so it 1780 // looks like jvm.dll is installed there (append a fake suffix 1781 // hotspot/jvm.dll). 1782 char* java_home_var = ::getenv("JAVA_HOME"); 1783 if (java_home_var != NULL && java_home_var[0] != 0 && 1784 strlen(java_home_var) < (size_t)buflen) { 1785 strncpy(buf, java_home_var, buflen); 1786 1787 // determine if this is a legacy image or modules image 1788 // modules image doesn't have "jre" subdirectory 1789 size_t len = strlen(buf); 1790 char* jrebin_p = buf + len; 1791 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1792 if (0 != _access(buf, 0)) { 1793 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1794 } 1795 len = strlen(buf); 1796 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1797 } 1798 } 1799 1800 if (buf[0] == '\0') { 1801 GetModuleFileName(vm_lib_handle, buf, buflen); 1802 } 1803 strncpy(saved_jvm_path, buf, MAX_PATH); 1804 saved_jvm_path[MAX_PATH - 1] = '\0'; 1805 } 1806 1807 1808 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1809 #ifndef _WIN64 1810 st->print("_"); 1811 #endif 1812 } 1813 1814 1815 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1816 #ifndef _WIN64 1817 st->print("@%d", args_size * sizeof(int)); 1818 #endif 1819 } 1820 1821 // This method is a copy of JDK's sysGetLastErrorString 1822 // from src/windows/hpi/src/system_md.c 1823 1824 size_t os::lasterror(char* buf, size_t len) { 1825 DWORD errval; 1826 1827 if ((errval = GetLastError()) != 0) { 1828 // DOS error 1829 size_t n = (size_t)FormatMessage( 1830 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1831 NULL, 1832 errval, 1833 0, 1834 buf, 1835 (DWORD)len, 1836 NULL); 1837 if (n > 3) { 1838 // Drop final '.', CR, LF 1839 if (buf[n - 1] == '\n') n--; 1840 if (buf[n - 1] == '\r') n--; 1841 if (buf[n - 1] == '.') n--; 1842 buf[n] = '\0'; 1843 } 1844 return n; 1845 } 1846 1847 if (errno != 0) { 1848 // C runtime error that has no corresponding DOS error code 1849 const char* s = os::strerror(errno); 1850 size_t n = strlen(s); 1851 if (n >= len) n = len - 1; 1852 strncpy(buf, s, n); 1853 buf[n] = '\0'; 1854 return n; 1855 } 1856 1857 return 0; 1858 } 1859 1860 int os::get_last_error() { 1861 DWORD error = GetLastError(); 1862 if (error == 0) { 1863 error = errno; 1864 } 1865 return (int)error; 1866 } 1867 1868 // sun.misc.Signal 1869 // NOTE that this is a workaround for an apparent kernel bug where if 1870 // a signal handler for SIGBREAK is installed then that signal handler 1871 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1872 // See bug 4416763. 1873 static void (*sigbreakHandler)(int) = NULL; 1874 1875 static void UserHandler(int sig, void *siginfo, void *context) { 1876 os::signal_notify(sig); 1877 // We need to reinstate the signal handler each time... 1878 os::signal(sig, (void*)UserHandler); 1879 } 1880 1881 void* os::user_handler() { 1882 return (void*) UserHandler; 1883 } 1884 1885 void* os::signal(int signal_number, void* handler) { 1886 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1887 void (*oldHandler)(int) = sigbreakHandler; 1888 sigbreakHandler = (void (*)(int)) handler; 1889 return (void*) oldHandler; 1890 } else { 1891 return (void*)::signal(signal_number, (void (*)(int))handler); 1892 } 1893 } 1894 1895 void os::signal_raise(int signal_number) { 1896 raise(signal_number); 1897 } 1898 1899 // The Win32 C runtime library maps all console control events other than ^C 1900 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1901 // logoff, and shutdown events. We therefore install our own console handler 1902 // that raises SIGTERM for the latter cases. 1903 // 1904 static BOOL WINAPI consoleHandler(DWORD event) { 1905 switch (event) { 1906 case CTRL_C_EVENT: 1907 if (VMError::is_error_reported()) { 1908 // Ctrl-C is pressed during error reporting, likely because the error 1909 // handler fails to abort. Let VM die immediately. 1910 os::die(); 1911 } 1912 1913 os::signal_raise(SIGINT); 1914 return TRUE; 1915 break; 1916 case CTRL_BREAK_EVENT: 1917 if (sigbreakHandler != NULL) { 1918 (*sigbreakHandler)(SIGBREAK); 1919 } 1920 return TRUE; 1921 break; 1922 case CTRL_LOGOFF_EVENT: { 1923 // Don't terminate JVM if it is running in a non-interactive session, 1924 // such as a service process. 1925 USEROBJECTFLAGS flags; 1926 HANDLE handle = GetProcessWindowStation(); 1927 if (handle != NULL && 1928 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1929 sizeof(USEROBJECTFLAGS), NULL)) { 1930 // If it is a non-interactive session, let next handler to deal 1931 // with it. 1932 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1933 return FALSE; 1934 } 1935 } 1936 } 1937 case CTRL_CLOSE_EVENT: 1938 case CTRL_SHUTDOWN_EVENT: 1939 os::signal_raise(SIGTERM); 1940 return TRUE; 1941 break; 1942 default: 1943 break; 1944 } 1945 return FALSE; 1946 } 1947 1948 // The following code is moved from os.cpp for making this 1949 // code platform specific, which it is by its very nature. 1950 1951 // Return maximum OS signal used + 1 for internal use only 1952 // Used as exit signal for signal_thread 1953 int os::sigexitnum_pd() { 1954 return NSIG; 1955 } 1956 1957 // a counter for each possible signal value, including signal_thread exit signal 1958 static volatile jint pending_signals[NSIG+1] = { 0 }; 1959 static Semaphore* sig_sem = NULL; 1960 1961 void os::signal_init_pd() { 1962 // Initialize signal structures 1963 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1964 1965 // Initialize signal semaphore 1966 sig_sem = new Semaphore(); 1967 1968 // Programs embedding the VM do not want it to attempt to receive 1969 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1970 // shutdown hooks mechanism introduced in 1.3. For example, when 1971 // the VM is run as part of a Windows NT service (i.e., a servlet 1972 // engine in a web server), the correct behavior is for any console 1973 // control handler to return FALSE, not TRUE, because the OS's 1974 // "final" handler for such events allows the process to continue if 1975 // it is a service (while terminating it if it is not a service). 1976 // To make this behavior uniform and the mechanism simpler, we 1977 // completely disable the VM's usage of these console events if -Xrs 1978 // (=ReduceSignalUsage) is specified. This means, for example, that 1979 // the CTRL-BREAK thread dump mechanism is also disabled in this 1980 // case. See bugs 4323062, 4345157, and related bugs. 1981 1982 if (!ReduceSignalUsage) { 1983 // Add a CTRL-C handler 1984 SetConsoleCtrlHandler(consoleHandler, TRUE); 1985 } 1986 } 1987 1988 void os::signal_notify(int sig) { 1989 if (sig_sem != NULL) { 1990 Atomic::inc(&pending_signals[sig]); 1991 sig_sem->signal(); 1992 } else { 1993 // Signal thread is not created with ReduceSignalUsage and signal_init_pd 1994 // initialization isn't called. 1995 assert(ReduceSignalUsage, "signal semaphore should be created"); 1996 } 1997 } 1998 1999 static int check_pending_signals() { 2000 while (true) { 2001 for (int i = 0; i < NSIG + 1; i++) { 2002 jint n = pending_signals[i]; 2003 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2004 return i; 2005 } 2006 } 2007 JavaThread *thread = JavaThread::current(); 2008 2009 ThreadBlockInVM tbivm(thread); 2010 2011 bool threadIsSuspended; 2012 do { 2013 thread->set_suspend_equivalent(); 2014 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2015 sig_sem->wait(); 2016 2017 // were we externally suspended while we were waiting? 2018 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2019 if (threadIsSuspended) { 2020 // The semaphore has been incremented, but while we were waiting 2021 // another thread suspended us. We don't want to continue running 2022 // while suspended because that would surprise the thread that 2023 // suspended us. 2024 sig_sem->signal(); 2025 2026 thread->java_suspend_self(); 2027 } 2028 } while (threadIsSuspended); 2029 } 2030 } 2031 2032 int os::signal_wait() { 2033 return check_pending_signals(); 2034 } 2035 2036 // Implicit OS exception handling 2037 2038 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2039 address handler) { 2040 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2041 // Save pc in thread 2042 #ifdef _M_AMD64 2043 // Do not blow up if no thread info available. 2044 if (thread) { 2045 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2046 } 2047 // Set pc to handler 2048 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2049 #else 2050 // Do not blow up if no thread info available. 2051 if (thread) { 2052 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2053 } 2054 // Set pc to handler 2055 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2056 #endif 2057 2058 // Continue the execution 2059 return EXCEPTION_CONTINUE_EXECUTION; 2060 } 2061 2062 2063 // Used for PostMortemDump 2064 extern "C" void safepoints(); 2065 extern "C" void find(int x); 2066 extern "C" void events(); 2067 2068 // According to Windows API documentation, an illegal instruction sequence should generate 2069 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2070 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2071 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2072 2073 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2074 2075 // From "Execution Protection in the Windows Operating System" draft 0.35 2076 // Once a system header becomes available, the "real" define should be 2077 // included or copied here. 2078 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2079 2080 // Windows Vista/2008 heap corruption check 2081 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2082 2083 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2084 // C++ compiler contain this error code. Because this is a compiler-generated 2085 // error, the code is not listed in the Win32 API header files. 2086 // The code is actually a cryptic mnemonic device, with the initial "E" 2087 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2088 // ASCII values of "msc". 2089 2090 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2091 2092 #define def_excpt(val) { #val, (val) } 2093 2094 static const struct { char* name; uint number; } exceptlabels[] = { 2095 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2096 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2097 def_excpt(EXCEPTION_BREAKPOINT), 2098 def_excpt(EXCEPTION_SINGLE_STEP), 2099 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2100 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2101 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2102 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2103 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2104 def_excpt(EXCEPTION_FLT_OVERFLOW), 2105 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2106 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2107 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2108 def_excpt(EXCEPTION_INT_OVERFLOW), 2109 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2110 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2111 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2112 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2113 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2114 def_excpt(EXCEPTION_STACK_OVERFLOW), 2115 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2116 def_excpt(EXCEPTION_GUARD_PAGE), 2117 def_excpt(EXCEPTION_INVALID_HANDLE), 2118 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2119 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2120 }; 2121 2122 #undef def_excpt 2123 2124 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2125 uint code = static_cast<uint>(exception_code); 2126 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2127 if (exceptlabels[i].number == code) { 2128 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2129 return buf; 2130 } 2131 } 2132 2133 return NULL; 2134 } 2135 2136 //----------------------------------------------------------------------------- 2137 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2138 // handle exception caused by idiv; should only happen for -MinInt/-1 2139 // (division by zero is handled explicitly) 2140 #ifdef _M_AMD64 2141 PCONTEXT ctx = exceptionInfo->ContextRecord; 2142 address pc = (address)ctx->Rip; 2143 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2144 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2145 if (pc[0] == 0xF7) { 2146 // set correct result values and continue after idiv instruction 2147 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2148 } else { 2149 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2150 } 2151 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2152 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2153 // idiv opcode (0xF7). 2154 ctx->Rdx = (DWORD)0; // remainder 2155 // Continue the execution 2156 #else 2157 PCONTEXT ctx = exceptionInfo->ContextRecord; 2158 address pc = (address)ctx->Eip; 2159 assert(pc[0] == 0xF7, "not an idiv opcode"); 2160 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2161 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2162 // set correct result values and continue after idiv instruction 2163 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2164 ctx->Eax = (DWORD)min_jint; // result 2165 ctx->Edx = (DWORD)0; // remainder 2166 // Continue the execution 2167 #endif 2168 return EXCEPTION_CONTINUE_EXECUTION; 2169 } 2170 2171 //----------------------------------------------------------------------------- 2172 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2173 PCONTEXT ctx = exceptionInfo->ContextRecord; 2174 #ifndef _WIN64 2175 // handle exception caused by native method modifying control word 2176 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2177 2178 switch (exception_code) { 2179 case EXCEPTION_FLT_DENORMAL_OPERAND: 2180 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2181 case EXCEPTION_FLT_INEXACT_RESULT: 2182 case EXCEPTION_FLT_INVALID_OPERATION: 2183 case EXCEPTION_FLT_OVERFLOW: 2184 case EXCEPTION_FLT_STACK_CHECK: 2185 case EXCEPTION_FLT_UNDERFLOW: 2186 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2187 if (fp_control_word != ctx->FloatSave.ControlWord) { 2188 // Restore FPCW and mask out FLT exceptions 2189 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2190 // Mask out pending FLT exceptions 2191 ctx->FloatSave.StatusWord &= 0xffffff00; 2192 return EXCEPTION_CONTINUE_EXECUTION; 2193 } 2194 } 2195 2196 if (prev_uef_handler != NULL) { 2197 // We didn't handle this exception so pass it to the previous 2198 // UnhandledExceptionFilter. 2199 return (prev_uef_handler)(exceptionInfo); 2200 } 2201 #else // !_WIN64 2202 // On Windows, the mxcsr control bits are non-volatile across calls 2203 // See also CR 6192333 2204 // 2205 jint MxCsr = INITIAL_MXCSR; 2206 // we can't use StubRoutines::addr_mxcsr_std() 2207 // because in Win64 mxcsr is not saved there 2208 if (MxCsr != ctx->MxCsr) { 2209 ctx->MxCsr = MxCsr; 2210 return EXCEPTION_CONTINUE_EXECUTION; 2211 } 2212 #endif // !_WIN64 2213 2214 return EXCEPTION_CONTINUE_SEARCH; 2215 } 2216 2217 static inline void report_error(Thread* t, DWORD exception_code, 2218 address addr, void* siginfo, void* context) { 2219 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2220 2221 // If UseOsErrorReporting, this will return here and save the error file 2222 // somewhere where we can find it in the minidump. 2223 } 2224 2225 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2226 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2227 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2228 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2229 if (Interpreter::contains(pc)) { 2230 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2231 if (!fr->is_first_java_frame()) { 2232 // get_frame_at_stack_banging_point() is only called when we 2233 // have well defined stacks so java_sender() calls do not need 2234 // to assert safe_for_sender() first. 2235 *fr = fr->java_sender(); 2236 } 2237 } else { 2238 // more complex code with compiled code 2239 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2240 CodeBlob* cb = CodeCache::find_blob(pc); 2241 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2242 // Not sure where the pc points to, fallback to default 2243 // stack overflow handling 2244 return false; 2245 } else { 2246 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2247 // in compiled code, the stack banging is performed just after the return pc 2248 // has been pushed on the stack 2249 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2250 if (!fr->is_java_frame()) { 2251 // See java_sender() comment above. 2252 *fr = fr->java_sender(); 2253 } 2254 } 2255 } 2256 assert(fr->is_java_frame(), "Safety check"); 2257 return true; 2258 } 2259 2260 //----------------------------------------------------------------------------- 2261 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2262 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2263 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2264 #ifdef _M_AMD64 2265 address pc = (address) exceptionInfo->ContextRecord->Rip; 2266 #else 2267 address pc = (address) exceptionInfo->ContextRecord->Eip; 2268 #endif 2269 Thread* t = Thread::current_or_null_safe(); 2270 2271 // Handle SafeFetch32 and SafeFetchN exceptions. 2272 if (StubRoutines::is_safefetch_fault(pc)) { 2273 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2274 } 2275 2276 #ifndef _WIN64 2277 // Execution protection violation - win32 running on AMD64 only 2278 // Handled first to avoid misdiagnosis as a "normal" access violation; 2279 // This is safe to do because we have a new/unique ExceptionInformation 2280 // code for this condition. 2281 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2282 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2283 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2284 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2285 2286 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2287 int page_size = os::vm_page_size(); 2288 2289 // Make sure the pc and the faulting address are sane. 2290 // 2291 // If an instruction spans a page boundary, and the page containing 2292 // the beginning of the instruction is executable but the following 2293 // page is not, the pc and the faulting address might be slightly 2294 // different - we still want to unguard the 2nd page in this case. 2295 // 2296 // 15 bytes seems to be a (very) safe value for max instruction size. 2297 bool pc_is_near_addr = 2298 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2299 bool instr_spans_page_boundary = 2300 (align_down((intptr_t) pc ^ (intptr_t) addr, 2301 (intptr_t) page_size) > 0); 2302 2303 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2304 static volatile address last_addr = 2305 (address) os::non_memory_address_word(); 2306 2307 // In conservative mode, don't unguard unless the address is in the VM 2308 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2309 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2310 2311 // Set memory to RWX and retry 2312 address page_start = align_down(addr, page_size); 2313 bool res = os::protect_memory((char*) page_start, page_size, 2314 os::MEM_PROT_RWX); 2315 2316 log_debug(os)("Execution protection violation " 2317 "at " INTPTR_FORMAT 2318 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2319 p2i(page_start), (res ? "success" : os::strerror(errno))); 2320 2321 // Set last_addr so if we fault again at the same address, we don't 2322 // end up in an endless loop. 2323 // 2324 // There are two potential complications here. Two threads trapping 2325 // at the same address at the same time could cause one of the 2326 // threads to think it already unguarded, and abort the VM. Likely 2327 // very rare. 2328 // 2329 // The other race involves two threads alternately trapping at 2330 // different addresses and failing to unguard the page, resulting in 2331 // an endless loop. This condition is probably even more unlikely 2332 // than the first. 2333 // 2334 // Although both cases could be avoided by using locks or thread 2335 // local last_addr, these solutions are unnecessary complication: 2336 // this handler is a best-effort safety net, not a complete solution. 2337 // It is disabled by default and should only be used as a workaround 2338 // in case we missed any no-execute-unsafe VM code. 2339 2340 last_addr = addr; 2341 2342 return EXCEPTION_CONTINUE_EXECUTION; 2343 } 2344 } 2345 2346 // Last unguard failed or not unguarding 2347 tty->print_raw_cr("Execution protection violation"); 2348 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2349 exceptionInfo->ContextRecord); 2350 return EXCEPTION_CONTINUE_SEARCH; 2351 } 2352 } 2353 #endif // _WIN64 2354 2355 // Check to see if we caught the safepoint code in the 2356 // process of write protecting the memory serialization page. 2357 // It write enables the page immediately after protecting it 2358 // so just return. 2359 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2360 if (t != NULL && t->is_Java_thread()) { 2361 JavaThread* thread = (JavaThread*) t; 2362 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2363 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2364 if (os::is_memory_serialize_page(thread, addr)) { 2365 // Block current thread until the memory serialize page permission restored. 2366 os::block_on_serialize_page_trap(); 2367 return EXCEPTION_CONTINUE_EXECUTION; 2368 } 2369 } 2370 } 2371 2372 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2373 VM_Version::is_cpuinfo_segv_addr(pc)) { 2374 // Verify that OS save/restore AVX registers. 2375 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2376 } 2377 2378 if (t != NULL && t->is_Java_thread()) { 2379 JavaThread* thread = (JavaThread*) t; 2380 bool in_java = thread->thread_state() == _thread_in_Java; 2381 2382 // Handle potential stack overflows up front. 2383 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2384 if (thread->stack_guards_enabled()) { 2385 if (in_java) { 2386 frame fr; 2387 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2388 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2389 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2390 assert(fr.is_java_frame(), "Must be a Java frame"); 2391 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2392 } 2393 } 2394 // Yellow zone violation. The o/s has unprotected the first yellow 2395 // zone page for us. Note: must call disable_stack_yellow_zone to 2396 // update the enabled status, even if the zone contains only one page. 2397 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2398 thread->disable_stack_yellow_reserved_zone(); 2399 // If not in java code, return and hope for the best. 2400 return in_java 2401 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2402 : EXCEPTION_CONTINUE_EXECUTION; 2403 } else { 2404 // Fatal red zone violation. 2405 thread->disable_stack_red_zone(); 2406 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2407 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2408 exceptionInfo->ContextRecord); 2409 return EXCEPTION_CONTINUE_SEARCH; 2410 } 2411 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2412 // Either stack overflow or null pointer exception. 2413 if (in_java) { 2414 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2415 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2416 address stack_end = thread->stack_end(); 2417 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2418 // Stack overflow. 2419 assert(!os::uses_stack_guard_pages(), 2420 "should be caught by red zone code above."); 2421 return Handle_Exception(exceptionInfo, 2422 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2423 } 2424 // Check for safepoint polling and implicit null 2425 // We only expect null pointers in the stubs (vtable) 2426 // the rest are checked explicitly now. 2427 CodeBlob* cb = CodeCache::find_blob(pc); 2428 if (cb != NULL) { 2429 if (os::is_poll_address(addr)) { 2430 address stub = SharedRuntime::get_poll_stub(pc); 2431 return Handle_Exception(exceptionInfo, stub); 2432 } 2433 } 2434 { 2435 #ifdef _WIN64 2436 // If it's a legal stack address map the entire region in 2437 // 2438 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2439 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2440 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2441 addr = (address)((uintptr_t)addr & 2442 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2443 os::commit_memory((char *)addr, thread->stack_base() - addr, 2444 !ExecMem); 2445 return EXCEPTION_CONTINUE_EXECUTION; 2446 } else 2447 #endif 2448 { 2449 // Null pointer exception. 2450 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2451 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2452 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2453 } 2454 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2455 exceptionInfo->ContextRecord); 2456 return EXCEPTION_CONTINUE_SEARCH; 2457 } 2458 } 2459 } 2460 2461 #ifdef _WIN64 2462 // Special care for fast JNI field accessors. 2463 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2464 // in and the heap gets shrunk before the field access. 2465 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2466 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2467 if (addr != (address)-1) { 2468 return Handle_Exception(exceptionInfo, addr); 2469 } 2470 } 2471 #endif 2472 2473 // Stack overflow or null pointer exception in native code. 2474 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2475 exceptionInfo->ContextRecord); 2476 return EXCEPTION_CONTINUE_SEARCH; 2477 } // /EXCEPTION_ACCESS_VIOLATION 2478 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2479 2480 if (exception_code == EXCEPTION_IN_PAGE_ERROR) { 2481 CompiledMethod* nm = NULL; 2482 JavaThread* thread = (JavaThread*)t; 2483 if (in_java) { 2484 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); 2485 nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 2486 } 2487 if ((thread->thread_state() == _thread_in_vm && 2488 thread->doing_unsafe_access()) || 2489 (nm != NULL && nm->has_unsafe_access())) { 2490 return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc))); 2491 } 2492 } 2493 2494 if (in_java) { 2495 switch (exception_code) { 2496 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2497 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2498 2499 case EXCEPTION_INT_OVERFLOW: 2500 return Handle_IDiv_Exception(exceptionInfo); 2501 2502 } // switch 2503 } 2504 if (((thread->thread_state() == _thread_in_Java) || 2505 (thread->thread_state() == _thread_in_native)) && 2506 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2507 LONG result=Handle_FLT_Exception(exceptionInfo); 2508 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2509 } 2510 } 2511 2512 if (exception_code != EXCEPTION_BREAKPOINT) { 2513 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2514 exceptionInfo->ContextRecord); 2515 } 2516 return EXCEPTION_CONTINUE_SEARCH; 2517 } 2518 2519 #ifndef _WIN64 2520 // Special care for fast JNI accessors. 2521 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2522 // the heap gets shrunk before the field access. 2523 // Need to install our own structured exception handler since native code may 2524 // install its own. 2525 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2526 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2527 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2528 address pc = (address) exceptionInfo->ContextRecord->Eip; 2529 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2530 if (addr != (address)-1) { 2531 return Handle_Exception(exceptionInfo, addr); 2532 } 2533 } 2534 return EXCEPTION_CONTINUE_SEARCH; 2535 } 2536 2537 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2538 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2539 jobject obj, \ 2540 jfieldID fieldID) { \ 2541 __try { \ 2542 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2543 obj, \ 2544 fieldID); \ 2545 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2546 _exception_info())) { \ 2547 } \ 2548 return 0; \ 2549 } 2550 2551 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2552 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2553 DEFINE_FAST_GETFIELD(jchar, char, Char) 2554 DEFINE_FAST_GETFIELD(jshort, short, Short) 2555 DEFINE_FAST_GETFIELD(jint, int, Int) 2556 DEFINE_FAST_GETFIELD(jlong, long, Long) 2557 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2558 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2559 2560 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2561 switch (type) { 2562 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2563 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2564 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2565 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2566 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2567 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2568 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2569 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2570 default: ShouldNotReachHere(); 2571 } 2572 return (address)-1; 2573 } 2574 #endif 2575 2576 // Virtual Memory 2577 2578 int os::vm_page_size() { return os::win32::vm_page_size(); } 2579 int os::vm_allocation_granularity() { 2580 return os::win32::vm_allocation_granularity(); 2581 } 2582 2583 // Windows large page support is available on Windows 2003. In order to use 2584 // large page memory, the administrator must first assign additional privilege 2585 // to the user: 2586 // + select Control Panel -> Administrative Tools -> Local Security Policy 2587 // + select Local Policies -> User Rights Assignment 2588 // + double click "Lock pages in memory", add users and/or groups 2589 // + reboot 2590 // Note the above steps are needed for administrator as well, as administrators 2591 // by default do not have the privilege to lock pages in memory. 2592 // 2593 // Note about Windows 2003: although the API supports committing large page 2594 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2595 // scenario, I found through experiment it only uses large page if the entire 2596 // memory region is reserved and committed in a single VirtualAlloc() call. 2597 // This makes Windows large page support more or less like Solaris ISM, in 2598 // that the entire heap must be committed upfront. This probably will change 2599 // in the future, if so the code below needs to be revisited. 2600 2601 #ifndef MEM_LARGE_PAGES 2602 #define MEM_LARGE_PAGES 0x20000000 2603 #endif 2604 2605 static HANDLE _hProcess; 2606 static HANDLE _hToken; 2607 2608 // Container for NUMA node list info 2609 class NUMANodeListHolder { 2610 private: 2611 int *_numa_used_node_list; // allocated below 2612 int _numa_used_node_count; 2613 2614 void free_node_list() { 2615 if (_numa_used_node_list != NULL) { 2616 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2617 } 2618 } 2619 2620 public: 2621 NUMANodeListHolder() { 2622 _numa_used_node_count = 0; 2623 _numa_used_node_list = NULL; 2624 // do rest of initialization in build routine (after function pointers are set up) 2625 } 2626 2627 ~NUMANodeListHolder() { 2628 free_node_list(); 2629 } 2630 2631 bool build() { 2632 DWORD_PTR proc_aff_mask; 2633 DWORD_PTR sys_aff_mask; 2634 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2635 ULONG highest_node_number; 2636 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2637 free_node_list(); 2638 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2639 for (unsigned int i = 0; i <= highest_node_number; i++) { 2640 ULONGLONG proc_mask_numa_node; 2641 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2642 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2643 _numa_used_node_list[_numa_used_node_count++] = i; 2644 } 2645 } 2646 return (_numa_used_node_count > 1); 2647 } 2648 2649 int get_count() { return _numa_used_node_count; } 2650 int get_node_list_entry(int n) { 2651 // for indexes out of range, returns -1 2652 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2653 } 2654 2655 } numa_node_list_holder; 2656 2657 2658 2659 static size_t _large_page_size = 0; 2660 2661 static bool request_lock_memory_privilege() { 2662 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2663 os::current_process_id()); 2664 2665 LUID luid; 2666 if (_hProcess != NULL && 2667 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2668 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2669 2670 TOKEN_PRIVILEGES tp; 2671 tp.PrivilegeCount = 1; 2672 tp.Privileges[0].Luid = luid; 2673 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2674 2675 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2676 // privilege. Check GetLastError() too. See MSDN document. 2677 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2678 (GetLastError() == ERROR_SUCCESS)) { 2679 return true; 2680 } 2681 } 2682 2683 return false; 2684 } 2685 2686 static void cleanup_after_large_page_init() { 2687 if (_hProcess) CloseHandle(_hProcess); 2688 _hProcess = NULL; 2689 if (_hToken) CloseHandle(_hToken); 2690 _hToken = NULL; 2691 } 2692 2693 static bool numa_interleaving_init() { 2694 bool success = false; 2695 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2696 2697 // print a warning if UseNUMAInterleaving flag is specified on command line 2698 bool warn_on_failure = use_numa_interleaving_specified; 2699 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2700 2701 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2702 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2703 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2704 2705 if (numa_node_list_holder.build()) { 2706 if (log_is_enabled(Debug, os, cpu)) { 2707 Log(os, cpu) log; 2708 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2709 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2710 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2711 } 2712 } 2713 success = true; 2714 } else { 2715 WARN("Process does not cover multiple NUMA nodes."); 2716 } 2717 if (!success) { 2718 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2719 } 2720 return success; 2721 #undef WARN 2722 } 2723 2724 // this routine is used whenever we need to reserve a contiguous VA range 2725 // but we need to make separate VirtualAlloc calls for each piece of the range 2726 // Reasons for doing this: 2727 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2728 // * UseNUMAInterleaving requires a separate node for each piece 2729 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2730 DWORD prot, 2731 bool should_inject_error = false) { 2732 char * p_buf; 2733 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2734 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2735 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2736 2737 // first reserve enough address space in advance since we want to be 2738 // able to break a single contiguous virtual address range into multiple 2739 // large page commits but WS2003 does not allow reserving large page space 2740 // so we just use 4K pages for reserve, this gives us a legal contiguous 2741 // address space. then we will deallocate that reservation, and re alloc 2742 // using large pages 2743 const size_t size_of_reserve = bytes + chunk_size; 2744 if (bytes > size_of_reserve) { 2745 // Overflowed. 2746 return NULL; 2747 } 2748 p_buf = (char *) VirtualAlloc(addr, 2749 size_of_reserve, // size of Reserve 2750 MEM_RESERVE, 2751 PAGE_READWRITE); 2752 // If reservation failed, return NULL 2753 if (p_buf == NULL) return NULL; 2754 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2755 os::release_memory(p_buf, bytes + chunk_size); 2756 2757 // we still need to round up to a page boundary (in case we are using large pages) 2758 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2759 // instead we handle this in the bytes_to_rq computation below 2760 p_buf = align_up(p_buf, page_size); 2761 2762 // now go through and allocate one chunk at a time until all bytes are 2763 // allocated 2764 size_t bytes_remaining = bytes; 2765 // An overflow of align_up() would have been caught above 2766 // in the calculation of size_of_reserve. 2767 char * next_alloc_addr = p_buf; 2768 HANDLE hProc = GetCurrentProcess(); 2769 2770 #ifdef ASSERT 2771 // Variable for the failure injection 2772 int ran_num = os::random(); 2773 size_t fail_after = ran_num % bytes; 2774 #endif 2775 2776 int count=0; 2777 while (bytes_remaining) { 2778 // select bytes_to_rq to get to the next chunk_size boundary 2779 2780 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2781 // Note allocate and commit 2782 char * p_new; 2783 2784 #ifdef ASSERT 2785 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2786 #else 2787 const bool inject_error_now = false; 2788 #endif 2789 2790 if (inject_error_now) { 2791 p_new = NULL; 2792 } else { 2793 if (!UseNUMAInterleaving) { 2794 p_new = (char *) VirtualAlloc(next_alloc_addr, 2795 bytes_to_rq, 2796 flags, 2797 prot); 2798 } else { 2799 // get the next node to use from the used_node_list 2800 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2801 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2802 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2803 } 2804 } 2805 2806 if (p_new == NULL) { 2807 // Free any allocated pages 2808 if (next_alloc_addr > p_buf) { 2809 // Some memory was committed so release it. 2810 size_t bytes_to_release = bytes - bytes_remaining; 2811 // NMT has yet to record any individual blocks, so it 2812 // need to create a dummy 'reserve' record to match 2813 // the release. 2814 MemTracker::record_virtual_memory_reserve((address)p_buf, 2815 bytes_to_release, CALLER_PC); 2816 os::release_memory(p_buf, bytes_to_release); 2817 } 2818 #ifdef ASSERT 2819 if (should_inject_error) { 2820 log_develop_debug(pagesize)("Reserving pages individually failed."); 2821 } 2822 #endif 2823 return NULL; 2824 } 2825 2826 bytes_remaining -= bytes_to_rq; 2827 next_alloc_addr += bytes_to_rq; 2828 count++; 2829 } 2830 // Although the memory is allocated individually, it is returned as one. 2831 // NMT records it as one block. 2832 if ((flags & MEM_COMMIT) != 0) { 2833 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2834 } else { 2835 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2836 } 2837 2838 // made it this far, success 2839 return p_buf; 2840 } 2841 2842 2843 2844 void os::large_page_init() { 2845 if (!UseLargePages) return; 2846 2847 // print a warning if any large page related flag is specified on command line 2848 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2849 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2850 bool success = false; 2851 2852 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2853 if (request_lock_memory_privilege()) { 2854 size_t s = GetLargePageMinimum(); 2855 if (s) { 2856 #if defined(IA32) || defined(AMD64) 2857 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2858 WARN("JVM cannot use large pages bigger than 4mb."); 2859 } else { 2860 #endif 2861 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2862 _large_page_size = LargePageSizeInBytes; 2863 } else { 2864 _large_page_size = s; 2865 } 2866 success = true; 2867 #if defined(IA32) || defined(AMD64) 2868 } 2869 #endif 2870 } else { 2871 WARN("Large page is not supported by the processor."); 2872 } 2873 } else { 2874 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2875 } 2876 #undef WARN 2877 2878 const size_t default_page_size = (size_t) vm_page_size(); 2879 if (success && _large_page_size > default_page_size) { 2880 _page_sizes[0] = _large_page_size; 2881 _page_sizes[1] = default_page_size; 2882 _page_sizes[2] = 0; 2883 } 2884 2885 cleanup_after_large_page_init(); 2886 UseLargePages = success; 2887 } 2888 2889 int os::create_file_for_heap(const char* dir) { 2890 2891 const char name_template[] = "/jvmheap.XXXXXX"; 2892 char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal); 2893 if (fullname == NULL) { 2894 vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno))); 2895 return -1; 2896 } 2897 2898 (void)strncpy(fullname, dir, strlen(dir)+1); 2899 (void)strncat(fullname, name_template, strlen(name_template)); 2900 2901 os::native_path(fullname); 2902 2903 char *path = _mktemp(fullname); 2904 if (path == NULL) { 2905 warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno)); 2906 os::free(fullname); 2907 return -1; 2908 } 2909 2910 int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD); 2911 2912 os::free(fullname); 2913 if (fd < 0) { 2914 warning("Problem opening file for heap (%s)", os::strerror(errno)); 2915 return -1; 2916 } 2917 return fd; 2918 } 2919 2920 // If 'base' is not NULL, function will return NULL if it cannot get 'base' 2921 char* os::map_memory_to_file(char* base, size_t size, int fd) { 2922 assert(fd != -1, "File descriptor is not valid"); 2923 2924 HANDLE fh = (HANDLE)_get_osfhandle(fd); 2925 #ifdef _LP64 2926 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2927 (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL); 2928 #else 2929 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2930 0, (DWORD)size, NULL); 2931 #endif 2932 if (fileMapping == NULL) { 2933 if (GetLastError() == ERROR_DISK_FULL) { 2934 vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap")); 2935 } 2936 else { 2937 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); 2938 } 2939 2940 return NULL; 2941 } 2942 2943 LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base); 2944 2945 CloseHandle(fileMapping); 2946 2947 return (char*)addr; 2948 } 2949 2950 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { 2951 assert(fd != -1, "File descriptor is not valid"); 2952 assert(base != NULL, "Base address cannot be NULL"); 2953 2954 release_memory(base, size); 2955 return map_memory_to_file(base, size, fd); 2956 } 2957 2958 // On win32, one cannot release just a part of reserved memory, it's an 2959 // all or nothing deal. When we split a reservation, we must break the 2960 // reservation into two reservations. 2961 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2962 bool realloc) { 2963 if (size > 0) { 2964 release_memory(base, size); 2965 if (realloc) { 2966 reserve_memory(split, base); 2967 } 2968 if (size != split) { 2969 reserve_memory(size - split, base + split); 2970 } 2971 } 2972 } 2973 2974 // Multiple threads can race in this code but it's not possible to unmap small sections of 2975 // virtual space to get requested alignment, like posix-like os's. 2976 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2977 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { 2978 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 2979 "Alignment must be a multiple of allocation granularity (page size)"); 2980 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 2981 2982 size_t extra_size = size + alignment; 2983 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 2984 2985 char* aligned_base = NULL; 2986 2987 do { 2988 char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc); 2989 if (extra_base == NULL) { 2990 return NULL; 2991 } 2992 // Do manual alignment 2993 aligned_base = align_up(extra_base, alignment); 2994 2995 if (file_desc != -1) { 2996 os::unmap_memory(extra_base, extra_size); 2997 } else { 2998 os::release_memory(extra_base, extra_size); 2999 } 3000 3001 aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc); 3002 3003 } while (aligned_base == NULL); 3004 3005 return aligned_base; 3006 } 3007 3008 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3009 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3010 "reserve alignment"); 3011 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3012 char* res; 3013 // note that if UseLargePages is on, all the areas that require interleaving 3014 // will go thru reserve_memory_special rather than thru here. 3015 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3016 if (!use_individual) { 3017 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3018 } else { 3019 elapsedTimer reserveTimer; 3020 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3021 // in numa interleaving, we have to allocate pages individually 3022 // (well really chunks of NUMAInterleaveGranularity size) 3023 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3024 if (res == NULL) { 3025 warning("NUMA page allocation failed"); 3026 } 3027 if (Verbose && PrintMiscellaneous) { 3028 reserveTimer.stop(); 3029 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3030 reserveTimer.milliseconds(), reserveTimer.ticks()); 3031 } 3032 } 3033 assert(res == NULL || addr == NULL || addr == res, 3034 "Unexpected address from reserve."); 3035 3036 return res; 3037 } 3038 3039 // Reserve memory at an arbitrary address, only if that area is 3040 // available (and not reserved for something else). 3041 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3042 // Windows os::reserve_memory() fails of the requested address range is 3043 // not avilable. 3044 return reserve_memory(bytes, requested_addr); 3045 } 3046 3047 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) { 3048 assert(file_desc >= 0, "file_desc is not valid"); 3049 return map_memory_to_file(requested_addr, bytes, file_desc); 3050 } 3051 3052 size_t os::large_page_size() { 3053 return _large_page_size; 3054 } 3055 3056 bool os::can_commit_large_page_memory() { 3057 // Windows only uses large page memory when the entire region is reserved 3058 // and committed in a single VirtualAlloc() call. This may change in the 3059 // future, but with Windows 2003 it's not possible to commit on demand. 3060 return false; 3061 } 3062 3063 bool os::can_execute_large_page_memory() { 3064 return true; 3065 } 3066 3067 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3068 bool exec) { 3069 assert(UseLargePages, "only for large pages"); 3070 3071 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3072 return NULL; // Fallback to small pages. 3073 } 3074 3075 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3076 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3077 3078 // with large pages, there are two cases where we need to use Individual Allocation 3079 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3080 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3081 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3082 log_debug(pagesize)("Reserving large pages individually."); 3083 3084 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3085 if (p_buf == NULL) { 3086 // give an appropriate warning message 3087 if (UseNUMAInterleaving) { 3088 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3089 } 3090 if (UseLargePagesIndividualAllocation) { 3091 warning("Individually allocated large pages failed, " 3092 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3093 } 3094 return NULL; 3095 } 3096 3097 return p_buf; 3098 3099 } else { 3100 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3101 3102 // normal policy just allocate it all at once 3103 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3104 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3105 if (res != NULL) { 3106 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3107 } 3108 3109 return res; 3110 } 3111 } 3112 3113 bool os::release_memory_special(char* base, size_t bytes) { 3114 assert(base != NULL, "Sanity check"); 3115 return release_memory(base, bytes); 3116 } 3117 3118 void os::print_statistics() { 3119 } 3120 3121 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3122 int err = os::get_last_error(); 3123 char buf[256]; 3124 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3125 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3126 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3127 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3128 } 3129 3130 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3131 if (bytes == 0) { 3132 // Don't bother the OS with noops. 3133 return true; 3134 } 3135 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3136 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3137 // Don't attempt to print anything if the OS call fails. We're 3138 // probably low on resources, so the print itself may cause crashes. 3139 3140 // unless we have NUMAInterleaving enabled, the range of a commit 3141 // is always within a reserve covered by a single VirtualAlloc 3142 // in that case we can just do a single commit for the requested size 3143 if (!UseNUMAInterleaving) { 3144 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3145 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3146 return false; 3147 } 3148 if (exec) { 3149 DWORD oldprot; 3150 // Windows doc says to use VirtualProtect to get execute permissions 3151 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3152 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3153 return false; 3154 } 3155 } 3156 return true; 3157 } else { 3158 3159 // when NUMAInterleaving is enabled, the commit might cover a range that 3160 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3161 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3162 // returns represents the number of bytes that can be committed in one step. 3163 size_t bytes_remaining = bytes; 3164 char * next_alloc_addr = addr; 3165 while (bytes_remaining > 0) { 3166 MEMORY_BASIC_INFORMATION alloc_info; 3167 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3168 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3169 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3170 PAGE_READWRITE) == NULL) { 3171 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3172 exec);) 3173 return false; 3174 } 3175 if (exec) { 3176 DWORD oldprot; 3177 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3178 PAGE_EXECUTE_READWRITE, &oldprot)) { 3179 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3180 exec);) 3181 return false; 3182 } 3183 } 3184 bytes_remaining -= bytes_to_rq; 3185 next_alloc_addr += bytes_to_rq; 3186 } 3187 } 3188 // if we made it this far, return true 3189 return true; 3190 } 3191 3192 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3193 bool exec) { 3194 // alignment_hint is ignored on this OS 3195 return pd_commit_memory(addr, size, exec); 3196 } 3197 3198 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3199 const char* mesg) { 3200 assert(mesg != NULL, "mesg must be specified"); 3201 if (!pd_commit_memory(addr, size, exec)) { 3202 warn_fail_commit_memory(addr, size, exec); 3203 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3204 } 3205 } 3206 3207 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3208 size_t alignment_hint, bool exec, 3209 const char* mesg) { 3210 // alignment_hint is ignored on this OS 3211 pd_commit_memory_or_exit(addr, size, exec, mesg); 3212 } 3213 3214 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3215 if (bytes == 0) { 3216 // Don't bother the OS with noops. 3217 return true; 3218 } 3219 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3220 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3221 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3222 } 3223 3224 bool os::pd_release_memory(char* addr, size_t bytes) { 3225 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3226 } 3227 3228 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3229 return os::commit_memory(addr, size, !ExecMem); 3230 } 3231 3232 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3233 return os::uncommit_memory(addr, size); 3234 } 3235 3236 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3237 uint count = 0; 3238 bool ret = false; 3239 size_t bytes_remaining = bytes; 3240 char * next_protect_addr = addr; 3241 3242 // Use VirtualQuery() to get the chunk size. 3243 while (bytes_remaining) { 3244 MEMORY_BASIC_INFORMATION alloc_info; 3245 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3246 return false; 3247 } 3248 3249 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3250 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3251 // but we don't distinguish here as both cases are protected by same API. 3252 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3253 warning("Failed protecting pages individually for chunk #%u", count); 3254 if (!ret) { 3255 return false; 3256 } 3257 3258 bytes_remaining -= bytes_to_protect; 3259 next_protect_addr += bytes_to_protect; 3260 count++; 3261 } 3262 return ret; 3263 } 3264 3265 // Set protections specified 3266 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3267 bool is_committed) { 3268 unsigned int p = 0; 3269 switch (prot) { 3270 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3271 case MEM_PROT_READ: p = PAGE_READONLY; break; 3272 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3273 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3274 default: 3275 ShouldNotReachHere(); 3276 } 3277 3278 DWORD old_status; 3279 3280 // Strange enough, but on Win32 one can change protection only for committed 3281 // memory, not a big deal anyway, as bytes less or equal than 64K 3282 if (!is_committed) { 3283 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3284 "cannot commit protection page"); 3285 } 3286 // One cannot use os::guard_memory() here, as on Win32 guard page 3287 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3288 // 3289 // Pages in the region become guard pages. Any attempt to access a guard page 3290 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3291 // the guard page status. Guard pages thus act as a one-time access alarm. 3292 bool ret; 3293 if (UseNUMAInterleaving) { 3294 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3295 // so we must protect the chunks individually. 3296 ret = protect_pages_individually(addr, bytes, p, &old_status); 3297 } else { 3298 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3299 } 3300 #ifdef ASSERT 3301 if (!ret) { 3302 int err = os::get_last_error(); 3303 char buf[256]; 3304 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3305 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3306 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3307 buf_len != 0 ? buf : "<no_error_string>", err); 3308 } 3309 #endif 3310 return ret; 3311 } 3312 3313 bool os::guard_memory(char* addr, size_t bytes) { 3314 DWORD old_status; 3315 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3316 } 3317 3318 bool os::unguard_memory(char* addr, size_t bytes) { 3319 DWORD old_status; 3320 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3321 } 3322 3323 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3324 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3325 void os::numa_make_global(char *addr, size_t bytes) { } 3326 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3327 bool os::numa_topology_changed() { return false; } 3328 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3329 int os::numa_get_group_id() { return 0; } 3330 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3331 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3332 // Provide an answer for UMA systems 3333 ids[0] = 0; 3334 return 1; 3335 } else { 3336 // check for size bigger than actual groups_num 3337 size = MIN2(size, numa_get_groups_num()); 3338 for (int i = 0; i < (int)size; i++) { 3339 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3340 } 3341 return size; 3342 } 3343 } 3344 3345 bool os::get_page_info(char *start, page_info* info) { 3346 return false; 3347 } 3348 3349 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3350 page_info* page_found) { 3351 return end; 3352 } 3353 3354 char* os::non_memory_address_word() { 3355 // Must never look like an address returned by reserve_memory, 3356 // even in its subfields (as defined by the CPU immediate fields, 3357 // if the CPU splits constants across multiple instructions). 3358 return (char*)-1; 3359 } 3360 3361 #define MAX_ERROR_COUNT 100 3362 #define SYS_THREAD_ERROR 0xffffffffUL 3363 3364 void os::pd_start_thread(Thread* thread) { 3365 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3366 // Returns previous suspend state: 3367 // 0: Thread was not suspended 3368 // 1: Thread is running now 3369 // >1: Thread is still suspended. 3370 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3371 } 3372 3373 class HighResolutionInterval : public CHeapObj<mtThread> { 3374 // The default timer resolution seems to be 10 milliseconds. 3375 // (Where is this written down?) 3376 // If someone wants to sleep for only a fraction of the default, 3377 // then we set the timer resolution down to 1 millisecond for 3378 // the duration of their interval. 3379 // We carefully set the resolution back, since otherwise we 3380 // seem to incur an overhead (3%?) that we don't need. 3381 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3382 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3383 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3384 // timeBeginPeriod() if the relative error exceeded some threshold. 3385 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3386 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3387 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3388 // resolution timers running. 3389 private: 3390 jlong resolution; 3391 public: 3392 HighResolutionInterval(jlong ms) { 3393 resolution = ms % 10L; 3394 if (resolution != 0) { 3395 MMRESULT result = timeBeginPeriod(1L); 3396 } 3397 } 3398 ~HighResolutionInterval() { 3399 if (resolution != 0) { 3400 MMRESULT result = timeEndPeriod(1L); 3401 } 3402 resolution = 0L; 3403 } 3404 }; 3405 3406 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3407 jlong limit = (jlong) MAXDWORD; 3408 3409 while (ms > limit) { 3410 int res; 3411 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3412 return res; 3413 } 3414 ms -= limit; 3415 } 3416 3417 assert(thread == Thread::current(), "thread consistency check"); 3418 OSThread* osthread = thread->osthread(); 3419 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3420 int result; 3421 if (interruptable) { 3422 assert(thread->is_Java_thread(), "must be java thread"); 3423 JavaThread *jt = (JavaThread *) thread; 3424 ThreadBlockInVM tbivm(jt); 3425 3426 jt->set_suspend_equivalent(); 3427 // cleared by handle_special_suspend_equivalent_condition() or 3428 // java_suspend_self() via check_and_wait_while_suspended() 3429 3430 HANDLE events[1]; 3431 events[0] = osthread->interrupt_event(); 3432 HighResolutionInterval *phri=NULL; 3433 if (!ForceTimeHighResolution) { 3434 phri = new HighResolutionInterval(ms); 3435 } 3436 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3437 result = OS_TIMEOUT; 3438 } else { 3439 ResetEvent(osthread->interrupt_event()); 3440 osthread->set_interrupted(false); 3441 result = OS_INTRPT; 3442 } 3443 delete phri; //if it is NULL, harmless 3444 3445 // were we externally suspended while we were waiting? 3446 jt->check_and_wait_while_suspended(); 3447 } else { 3448 assert(!thread->is_Java_thread(), "must not be java thread"); 3449 Sleep((long) ms); 3450 result = OS_TIMEOUT; 3451 } 3452 return result; 3453 } 3454 3455 // Short sleep, direct OS call. 3456 // 3457 // ms = 0, means allow others (if any) to run. 3458 // 3459 void os::naked_short_sleep(jlong ms) { 3460 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3461 Sleep(ms); 3462 } 3463 3464 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3465 void os::infinite_sleep() { 3466 while (true) { // sleep forever ... 3467 Sleep(100000); // ... 100 seconds at a time 3468 } 3469 } 3470 3471 typedef BOOL (WINAPI * STTSignature)(void); 3472 3473 void os::naked_yield() { 3474 // Consider passing back the return value from SwitchToThread(). 3475 SwitchToThread(); 3476 } 3477 3478 // Win32 only gives you access to seven real priorities at a time, 3479 // so we compress Java's ten down to seven. It would be better 3480 // if we dynamically adjusted relative priorities. 3481 3482 int os::java_to_os_priority[CriticalPriority + 1] = { 3483 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3484 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3485 THREAD_PRIORITY_LOWEST, // 2 3486 THREAD_PRIORITY_BELOW_NORMAL, // 3 3487 THREAD_PRIORITY_BELOW_NORMAL, // 4 3488 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3489 THREAD_PRIORITY_NORMAL, // 6 3490 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3491 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3492 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3493 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3494 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3495 }; 3496 3497 int prio_policy1[CriticalPriority + 1] = { 3498 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3499 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3500 THREAD_PRIORITY_LOWEST, // 2 3501 THREAD_PRIORITY_BELOW_NORMAL, // 3 3502 THREAD_PRIORITY_BELOW_NORMAL, // 4 3503 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3504 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3505 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3506 THREAD_PRIORITY_HIGHEST, // 8 3507 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3508 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3509 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3510 }; 3511 3512 static int prio_init() { 3513 // If ThreadPriorityPolicy is 1, switch tables 3514 if (ThreadPriorityPolicy == 1) { 3515 int i; 3516 for (i = 0; i < CriticalPriority + 1; i++) { 3517 os::java_to_os_priority[i] = prio_policy1[i]; 3518 } 3519 } 3520 if (UseCriticalJavaThreadPriority) { 3521 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3522 } 3523 return 0; 3524 } 3525 3526 OSReturn os::set_native_priority(Thread* thread, int priority) { 3527 if (!UseThreadPriorities) return OS_OK; 3528 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3529 return ret ? OS_OK : OS_ERR; 3530 } 3531 3532 OSReturn os::get_native_priority(const Thread* const thread, 3533 int* priority_ptr) { 3534 if (!UseThreadPriorities) { 3535 *priority_ptr = java_to_os_priority[NormPriority]; 3536 return OS_OK; 3537 } 3538 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3539 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3540 assert(false, "GetThreadPriority failed"); 3541 return OS_ERR; 3542 } 3543 *priority_ptr = os_prio; 3544 return OS_OK; 3545 } 3546 3547 3548 // Hint to the underlying OS that a task switch would not be good. 3549 // Void return because it's a hint and can fail. 3550 void os::hint_no_preempt() {} 3551 3552 void os::interrupt(Thread* thread) { 3553 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3554 3555 OSThread* osthread = thread->osthread(); 3556 osthread->set_interrupted(true); 3557 // More than one thread can get here with the same value of osthread, 3558 // resulting in multiple notifications. We do, however, want the store 3559 // to interrupted() to be visible to other threads before we post 3560 // the interrupt event. 3561 OrderAccess::release(); 3562 SetEvent(osthread->interrupt_event()); 3563 // For JSR166: unpark after setting status 3564 if (thread->is_Java_thread()) { 3565 ((JavaThread*)thread)->parker()->unpark(); 3566 } 3567 3568 ParkEvent * ev = thread->_ParkEvent; 3569 if (ev != NULL) ev->unpark(); 3570 } 3571 3572 3573 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3574 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3575 3576 OSThread* osthread = thread->osthread(); 3577 // There is no synchronization between the setting of the interrupt 3578 // and it being cleared here. It is critical - see 6535709 - that 3579 // we only clear the interrupt state, and reset the interrupt event, 3580 // if we are going to report that we were indeed interrupted - else 3581 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3582 // depending on the timing. By checking thread interrupt event to see 3583 // if the thread gets real interrupt thus prevent spurious wakeup. 3584 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3585 if (interrupted && clear_interrupted) { 3586 osthread->set_interrupted(false); 3587 ResetEvent(osthread->interrupt_event()); 3588 } // Otherwise leave the interrupted state alone 3589 3590 return interrupted; 3591 } 3592 3593 // GetCurrentThreadId() returns DWORD 3594 intx os::current_thread_id() { return GetCurrentThreadId(); } 3595 3596 static int _initial_pid = 0; 3597 3598 int os::current_process_id() { 3599 return (_initial_pid ? _initial_pid : _getpid()); 3600 } 3601 3602 int os::win32::_vm_page_size = 0; 3603 int os::win32::_vm_allocation_granularity = 0; 3604 int os::win32::_processor_type = 0; 3605 // Processor level is not available on non-NT systems, use vm_version instead 3606 int os::win32::_processor_level = 0; 3607 julong os::win32::_physical_memory = 0; 3608 size_t os::win32::_default_stack_size = 0; 3609 3610 intx os::win32::_os_thread_limit = 0; 3611 volatile intx os::win32::_os_thread_count = 0; 3612 3613 bool os::win32::_is_windows_server = false; 3614 3615 // 6573254 3616 // Currently, the bug is observed across all the supported Windows releases, 3617 // including the latest one (as of this writing - Windows Server 2012 R2) 3618 bool os::win32::_has_exit_bug = true; 3619 3620 void os::win32::initialize_system_info() { 3621 SYSTEM_INFO si; 3622 GetSystemInfo(&si); 3623 _vm_page_size = si.dwPageSize; 3624 _vm_allocation_granularity = si.dwAllocationGranularity; 3625 _processor_type = si.dwProcessorType; 3626 _processor_level = si.wProcessorLevel; 3627 set_processor_count(si.dwNumberOfProcessors); 3628 3629 MEMORYSTATUSEX ms; 3630 ms.dwLength = sizeof(ms); 3631 3632 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3633 // dwMemoryLoad (% of memory in use) 3634 GlobalMemoryStatusEx(&ms); 3635 _physical_memory = ms.ullTotalPhys; 3636 3637 if (FLAG_IS_DEFAULT(MaxRAM)) { 3638 // Adjust MaxRAM according to the maximum virtual address space available. 3639 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3640 } 3641 3642 OSVERSIONINFOEX oi; 3643 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3644 GetVersionEx((OSVERSIONINFO*)&oi); 3645 switch (oi.dwPlatformId) { 3646 case VER_PLATFORM_WIN32_NT: 3647 { 3648 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3649 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3650 oi.wProductType == VER_NT_SERVER) { 3651 _is_windows_server = true; 3652 } 3653 } 3654 break; 3655 default: fatal("Unknown platform"); 3656 } 3657 3658 _default_stack_size = os::current_stack_size(); 3659 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3660 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3661 "stack size not a multiple of page size"); 3662 3663 initialize_performance_counter(); 3664 } 3665 3666 3667 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3668 int ebuflen) { 3669 char path[MAX_PATH]; 3670 DWORD size; 3671 DWORD pathLen = (DWORD)sizeof(path); 3672 HINSTANCE result = NULL; 3673 3674 // only allow library name without path component 3675 assert(strchr(name, '\\') == NULL, "path not allowed"); 3676 assert(strchr(name, ':') == NULL, "path not allowed"); 3677 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3678 jio_snprintf(ebuf, ebuflen, 3679 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3680 return NULL; 3681 } 3682 3683 // search system directory 3684 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3685 if (size >= pathLen) { 3686 return NULL; // truncated 3687 } 3688 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3689 return NULL; // truncated 3690 } 3691 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3692 return result; 3693 } 3694 } 3695 3696 // try Windows directory 3697 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3698 if (size >= pathLen) { 3699 return NULL; // truncated 3700 } 3701 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3702 return NULL; // truncated 3703 } 3704 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3705 return result; 3706 } 3707 } 3708 3709 jio_snprintf(ebuf, ebuflen, 3710 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3711 return NULL; 3712 } 3713 3714 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3715 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3716 3717 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3718 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3719 return TRUE; 3720 } 3721 3722 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3723 // Basic approach: 3724 // - Each exiting thread registers its intent to exit and then does so. 3725 // - A thread trying to terminate the process must wait for all 3726 // threads currently exiting to complete their exit. 3727 3728 if (os::win32::has_exit_bug()) { 3729 // The array holds handles of the threads that have started exiting by calling 3730 // _endthreadex(). 3731 // Should be large enough to avoid blocking the exiting thread due to lack of 3732 // a free slot. 3733 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3734 static int handle_count = 0; 3735 3736 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3737 static CRITICAL_SECTION crit_sect; 3738 static volatile DWORD process_exiting = 0; 3739 int i, j; 3740 DWORD res; 3741 HANDLE hproc, hthr; 3742 3743 // We only attempt to register threads until a process exiting 3744 // thread manages to set the process_exiting flag. Any threads 3745 // that come through here after the process_exiting flag is set 3746 // are unregistered and will be caught in the SuspendThread() 3747 // infinite loop below. 3748 bool registered = false; 3749 3750 // The first thread that reached this point, initializes the critical section. 3751 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3752 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3753 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3754 if (what != EPT_THREAD) { 3755 // Atomically set process_exiting before the critical section 3756 // to increase the visibility between racing threads. 3757 Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0); 3758 } 3759 EnterCriticalSection(&crit_sect); 3760 3761 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3762 // Remove from the array those handles of the threads that have completed exiting. 3763 for (i = 0, j = 0; i < handle_count; ++i) { 3764 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3765 if (res == WAIT_TIMEOUT) { 3766 handles[j++] = handles[i]; 3767 } else { 3768 if (res == WAIT_FAILED) { 3769 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3770 GetLastError(), __FILE__, __LINE__); 3771 } 3772 // Don't keep the handle, if we failed waiting for it. 3773 CloseHandle(handles[i]); 3774 } 3775 } 3776 3777 // If there's no free slot in the array of the kept handles, we'll have to 3778 // wait until at least one thread completes exiting. 3779 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3780 // Raise the priority of the oldest exiting thread to increase its chances 3781 // to complete sooner. 3782 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3783 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3784 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3785 i = (res - WAIT_OBJECT_0); 3786 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3787 for (; i < handle_count; ++i) { 3788 handles[i] = handles[i + 1]; 3789 } 3790 } else { 3791 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3792 (res == WAIT_FAILED ? "failed" : "timed out"), 3793 GetLastError(), __FILE__, __LINE__); 3794 // Don't keep handles, if we failed waiting for them. 3795 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3796 CloseHandle(handles[i]); 3797 } 3798 handle_count = 0; 3799 } 3800 } 3801 3802 // Store a duplicate of the current thread handle in the array of handles. 3803 hproc = GetCurrentProcess(); 3804 hthr = GetCurrentThread(); 3805 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3806 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3807 warning("DuplicateHandle failed (%u) in %s: %d\n", 3808 GetLastError(), __FILE__, __LINE__); 3809 3810 // We can't register this thread (no more handles) so this thread 3811 // may be racing with a thread that is calling exit(). If the thread 3812 // that is calling exit() has managed to set the process_exiting 3813 // flag, then this thread will be caught in the SuspendThread() 3814 // infinite loop below which closes that race. A small timing 3815 // window remains before the process_exiting flag is set, but it 3816 // is only exposed when we are out of handles. 3817 } else { 3818 ++handle_count; 3819 registered = true; 3820 3821 // The current exiting thread has stored its handle in the array, and now 3822 // should leave the critical section before calling _endthreadex(). 3823 } 3824 3825 } else if (what != EPT_THREAD && handle_count > 0) { 3826 jlong start_time, finish_time, timeout_left; 3827 // Before ending the process, make sure all the threads that had called 3828 // _endthreadex() completed. 3829 3830 // Set the priority level of the current thread to the same value as 3831 // the priority level of exiting threads. 3832 // This is to ensure it will be given a fair chance to execute if 3833 // the timeout expires. 3834 hthr = GetCurrentThread(); 3835 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3836 start_time = os::javaTimeNanos(); 3837 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3838 for (i = 0; ; ) { 3839 int portion_count = handle_count - i; 3840 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3841 portion_count = MAXIMUM_WAIT_OBJECTS; 3842 } 3843 for (j = 0; j < portion_count; ++j) { 3844 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3845 } 3846 timeout_left = (finish_time - start_time) / 1000000L; 3847 if (timeout_left < 0) { 3848 timeout_left = 0; 3849 } 3850 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3851 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3852 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3853 (res == WAIT_FAILED ? "failed" : "timed out"), 3854 GetLastError(), __FILE__, __LINE__); 3855 // Reset portion_count so we close the remaining 3856 // handles due to this error. 3857 portion_count = handle_count - i; 3858 } 3859 for (j = 0; j < portion_count; ++j) { 3860 CloseHandle(handles[i + j]); 3861 } 3862 if ((i += portion_count) >= handle_count) { 3863 break; 3864 } 3865 start_time = os::javaTimeNanos(); 3866 } 3867 handle_count = 0; 3868 } 3869 3870 LeaveCriticalSection(&crit_sect); 3871 } 3872 3873 if (!registered && 3874 OrderAccess::load_acquire(&process_exiting) != 0 && 3875 process_exiting != GetCurrentThreadId()) { 3876 // Some other thread is about to call exit(), so we don't let 3877 // the current unregistered thread proceed to exit() or _endthreadex() 3878 while (true) { 3879 SuspendThread(GetCurrentThread()); 3880 // Avoid busy-wait loop, if SuspendThread() failed. 3881 Sleep(EXIT_TIMEOUT); 3882 } 3883 } 3884 } 3885 3886 // We are here if either 3887 // - there's no 'race at exit' bug on this OS release; 3888 // - initialization of the critical section failed (unlikely); 3889 // - the current thread has registered itself and left the critical section; 3890 // - the process-exiting thread has raised the flag and left the critical section. 3891 if (what == EPT_THREAD) { 3892 _endthreadex((unsigned)exit_code); 3893 } else if (what == EPT_PROCESS) { 3894 ::exit(exit_code); 3895 } else { 3896 _exit(exit_code); 3897 } 3898 3899 // Should not reach here 3900 return exit_code; 3901 } 3902 3903 #undef EXIT_TIMEOUT 3904 3905 void os::win32::setmode_streams() { 3906 _setmode(_fileno(stdin), _O_BINARY); 3907 _setmode(_fileno(stdout), _O_BINARY); 3908 _setmode(_fileno(stderr), _O_BINARY); 3909 } 3910 3911 3912 bool os::is_debugger_attached() { 3913 return IsDebuggerPresent() ? true : false; 3914 } 3915 3916 3917 void os::wait_for_keypress_at_exit(void) { 3918 if (PauseAtExit) { 3919 fprintf(stderr, "Press any key to continue...\n"); 3920 fgetc(stdin); 3921 } 3922 } 3923 3924 3925 bool os::message_box(const char* title, const char* message) { 3926 int result = MessageBox(NULL, message, title, 3927 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3928 return result == IDYES; 3929 } 3930 3931 #ifndef PRODUCT 3932 #ifndef _WIN64 3933 // Helpers to check whether NX protection is enabled 3934 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3935 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3936 pex->ExceptionRecord->NumberParameters > 0 && 3937 pex->ExceptionRecord->ExceptionInformation[0] == 3938 EXCEPTION_INFO_EXEC_VIOLATION) { 3939 return EXCEPTION_EXECUTE_HANDLER; 3940 } 3941 return EXCEPTION_CONTINUE_SEARCH; 3942 } 3943 3944 void nx_check_protection() { 3945 // If NX is enabled we'll get an exception calling into code on the stack 3946 char code[] = { (char)0xC3 }; // ret 3947 void *code_ptr = (void *)code; 3948 __try { 3949 __asm call code_ptr 3950 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3951 tty->print_raw_cr("NX protection detected."); 3952 } 3953 } 3954 #endif // _WIN64 3955 #endif // PRODUCT 3956 3957 // This is called _before_ the global arguments have been parsed 3958 void os::init(void) { 3959 _initial_pid = _getpid(); 3960 3961 init_random(1234567); 3962 3963 win32::initialize_system_info(); 3964 win32::setmode_streams(); 3965 init_page_sizes((size_t) win32::vm_page_size()); 3966 3967 // This may be overridden later when argument processing is done. 3968 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3969 3970 // Initialize main_process and main_thread 3971 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3972 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3973 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3974 fatal("DuplicateHandle failed\n"); 3975 } 3976 main_thread_id = (int) GetCurrentThreadId(); 3977 3978 // initialize fast thread access - only used for 32-bit 3979 win32::initialize_thread_ptr_offset(); 3980 } 3981 3982 // To install functions for atexit processing 3983 extern "C" { 3984 static void perfMemory_exit_helper() { 3985 perfMemory_exit(); 3986 } 3987 } 3988 3989 static jint initSock(); 3990 3991 // this is called _after_ the global arguments have been parsed 3992 jint os::init_2(void) { 3993 // Setup Windows Exceptions 3994 3995 // for debugging float code generation bugs 3996 if (ForceFloatExceptions) { 3997 #ifndef _WIN64 3998 static long fp_control_word = 0; 3999 __asm { fstcw fp_control_word } 4000 // see Intel PPro Manual, Vol. 2, p 7-16 4001 const long precision = 0x20; 4002 const long underflow = 0x10; 4003 const long overflow = 0x08; 4004 const long zero_div = 0x04; 4005 const long denorm = 0x02; 4006 const long invalid = 0x01; 4007 fp_control_word |= invalid; 4008 __asm { fldcw fp_control_word } 4009 #endif 4010 } 4011 4012 // If stack_commit_size is 0, windows will reserve the default size, 4013 // but only commit a small portion of it. 4014 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 4015 size_t default_reserve_size = os::win32::default_stack_size(); 4016 size_t actual_reserve_size = stack_commit_size; 4017 if (stack_commit_size < default_reserve_size) { 4018 // If stack_commit_size == 0, we want this too 4019 actual_reserve_size = default_reserve_size; 4020 } 4021 4022 // Check minimum allowable stack size for thread creation and to initialize 4023 // the java system classes, including StackOverflowError - depends on page 4024 // size. Add two 4K pages for compiler2 recursion in main thread. 4025 // Add in 4*BytesPerWord 4K pages to account for VM stack during 4026 // class initialization depending on 32 or 64 bit VM. 4027 size_t min_stack_allowed = 4028 (size_t)(JavaThread::stack_guard_zone_size() + 4029 JavaThread::stack_shadow_zone_size() + 4030 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 4031 4032 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 4033 4034 if (actual_reserve_size < min_stack_allowed) { 4035 tty->print_cr("\nThe Java thread stack size specified is too small. " 4036 "Specify at least %dk", 4037 min_stack_allowed / K); 4038 return JNI_ERR; 4039 } 4040 4041 JavaThread::set_stack_size_at_create(stack_commit_size); 4042 4043 // Calculate theoretical max. size of Threads to guard gainst artifical 4044 // out-of-memory situations, where all available address-space has been 4045 // reserved by thread stacks. 4046 assert(actual_reserve_size != 0, "Must have a stack"); 4047 4048 // Calculate the thread limit when we should start doing Virtual Memory 4049 // banging. Currently when the threads will have used all but 200Mb of space. 4050 // 4051 // TODO: consider performing a similar calculation for commit size instead 4052 // as reserve size, since on a 64-bit platform we'll run into that more 4053 // often than running out of virtual memory space. We can use the 4054 // lower value of the two calculations as the os_thread_limit. 4055 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4056 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4057 4058 // at exit methods are called in the reverse order of their registration. 4059 // there is no limit to the number of functions registered. atexit does 4060 // not set errno. 4061 4062 if (PerfAllowAtExitRegistration) { 4063 // only register atexit functions if PerfAllowAtExitRegistration is set. 4064 // atexit functions can be delayed until process exit time, which 4065 // can be problematic for embedded VM situations. Embedded VMs should 4066 // call DestroyJavaVM() to assure that VM resources are released. 4067 4068 // note: perfMemory_exit_helper atexit function may be removed in 4069 // the future if the appropriate cleanup code can be added to the 4070 // VM_Exit VMOperation's doit method. 4071 if (atexit(perfMemory_exit_helper) != 0) { 4072 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4073 } 4074 } 4075 4076 #ifndef _WIN64 4077 // Print something if NX is enabled (win32 on AMD64) 4078 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4079 #endif 4080 4081 // initialize thread priority policy 4082 prio_init(); 4083 4084 if (UseNUMA && !ForceNUMA) { 4085 UseNUMA = false; // We don't fully support this yet 4086 } 4087 4088 if (UseNUMAInterleaving) { 4089 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4090 bool success = numa_interleaving_init(); 4091 if (!success) UseNUMAInterleaving = false; 4092 } 4093 4094 if (initSock() != JNI_OK) { 4095 return JNI_ERR; 4096 } 4097 4098 SymbolEngine::recalc_search_path(); 4099 4100 return JNI_OK; 4101 } 4102 4103 // Mark the polling page as unreadable 4104 void os::make_polling_page_unreadable(void) { 4105 DWORD old_status; 4106 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4107 PAGE_NOACCESS, &old_status)) { 4108 fatal("Could not disable polling page"); 4109 } 4110 } 4111 4112 // Mark the polling page as readable 4113 void os::make_polling_page_readable(void) { 4114 DWORD old_status; 4115 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4116 PAGE_READONLY, &old_status)) { 4117 fatal("Could not enable polling page"); 4118 } 4119 } 4120 4121 // combine the high and low DWORD into a ULONGLONG 4122 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) { 4123 ULONGLONG value = high_word; 4124 value <<= sizeof(high_word) * 8; 4125 value |= low_word; 4126 return value; 4127 } 4128 4129 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat 4130 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) { 4131 ::memset((void*)sbuf, 0, sizeof(struct stat)); 4132 sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow); 4133 sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime, 4134 file_data.ftLastWriteTime.dwLowDateTime); 4135 sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime, 4136 file_data.ftCreationTime.dwLowDateTime); 4137 sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime, 4138 file_data.ftLastAccessTime.dwLowDateTime); 4139 if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) { 4140 sbuf->st_mode |= S_IFDIR; 4141 } else { 4142 sbuf->st_mode |= S_IFREG; 4143 } 4144 } 4145 4146 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c 4147 // Creates an UNC path from a single byte path. Return buffer is 4148 // allocated in C heap and needs to be freed by the caller. 4149 // Returns NULL on error. 4150 static wchar_t* create_unc_path(const char* path, errno_t &err) { 4151 wchar_t* wpath = NULL; 4152 size_t converted_chars = 0; 4153 size_t path_len = strlen(path) + 1; // includes the terminating NULL 4154 if (path[0] == '\\' && path[1] == '\\') { 4155 if (path[2] == '?' && path[3] == '\\'){ 4156 // if it already has a \\?\ don't do the prefix 4157 wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal); 4158 if (wpath != NULL) { 4159 err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len); 4160 } else { 4161 err = ENOMEM; 4162 } 4163 } else { 4164 // only UNC pathname includes double slashes here 4165 wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal); 4166 if (wpath != NULL) { 4167 ::wcscpy(wpath, L"\\\\?\\UNC\0"); 4168 err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len); 4169 } else { 4170 err = ENOMEM; 4171 } 4172 } 4173 } else { 4174 wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal); 4175 if (wpath != NULL) { 4176 ::wcscpy(wpath, L"\\\\?\\\0"); 4177 err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len); 4178 } else { 4179 err = ENOMEM; 4180 } 4181 } 4182 return wpath; 4183 } 4184 4185 static void destroy_unc_path(wchar_t* wpath) { 4186 os::free(wpath); 4187 } 4188 4189 int os::stat(const char *path, struct stat *sbuf) { 4190 char* pathbuf = (char*)os::strdup(path, mtInternal); 4191 if (pathbuf == NULL) { 4192 errno = ENOMEM; 4193 return -1; 4194 } 4195 os::native_path(pathbuf); 4196 int ret; 4197 WIN32_FILE_ATTRIBUTE_DATA file_data; 4198 // Not using stat() to avoid the problem described in JDK-6539723 4199 if (strlen(path) < MAX_PATH) { 4200 BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data); 4201 if (!bret) { 4202 errno = ::GetLastError(); 4203 ret = -1; 4204 } 4205 else { 4206 file_attribute_data_to_stat(sbuf, file_data); 4207 ret = 0; 4208 } 4209 } else { 4210 errno_t err = ERROR_SUCCESS; 4211 wchar_t* wpath = create_unc_path(pathbuf, err); 4212 if (err != ERROR_SUCCESS) { 4213 if (wpath != NULL) { 4214 destroy_unc_path(wpath); 4215 } 4216 os::free(pathbuf); 4217 errno = err; 4218 return -1; 4219 } 4220 BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data); 4221 if (!bret) { 4222 errno = ::GetLastError(); 4223 ret = -1; 4224 } else { 4225 file_attribute_data_to_stat(sbuf, file_data); 4226 ret = 0; 4227 } 4228 destroy_unc_path(wpath); 4229 } 4230 os::free(pathbuf); 4231 return ret; 4232 } 4233 4234 4235 #define FT2INT64(ft) \ 4236 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4237 4238 4239 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4240 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4241 // of a thread. 4242 // 4243 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4244 // the fast estimate available on the platform. 4245 4246 // current_thread_cpu_time() is not optimized for Windows yet 4247 jlong os::current_thread_cpu_time() { 4248 // return user + sys since the cost is the same 4249 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4250 } 4251 4252 jlong os::thread_cpu_time(Thread* thread) { 4253 // consistent with what current_thread_cpu_time() returns. 4254 return os::thread_cpu_time(thread, true /* user+sys */); 4255 } 4256 4257 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4258 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4259 } 4260 4261 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4262 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4263 // If this function changes, os::is_thread_cpu_time_supported() should too 4264 FILETIME CreationTime; 4265 FILETIME ExitTime; 4266 FILETIME KernelTime; 4267 FILETIME UserTime; 4268 4269 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4270 &ExitTime, &KernelTime, &UserTime) == 0) { 4271 return -1; 4272 } else if (user_sys_cpu_time) { 4273 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4274 } else { 4275 return FT2INT64(UserTime) * 100; 4276 } 4277 } 4278 4279 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4280 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4281 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4282 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4283 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4284 } 4285 4286 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4287 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4288 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4289 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4290 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4291 } 4292 4293 bool os::is_thread_cpu_time_supported() { 4294 // see os::thread_cpu_time 4295 FILETIME CreationTime; 4296 FILETIME ExitTime; 4297 FILETIME KernelTime; 4298 FILETIME UserTime; 4299 4300 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4301 &KernelTime, &UserTime) == 0) { 4302 return false; 4303 } else { 4304 return true; 4305 } 4306 } 4307 4308 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4309 // It does have primitives (PDH API) to get CPU usage and run queue length. 4310 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4311 // If we wanted to implement loadavg on Windows, we have a few options: 4312 // 4313 // a) Query CPU usage and run queue length and "fake" an answer by 4314 // returning the CPU usage if it's under 100%, and the run queue 4315 // length otherwise. It turns out that querying is pretty slow 4316 // on Windows, on the order of 200 microseconds on a fast machine. 4317 // Note that on the Windows the CPU usage value is the % usage 4318 // since the last time the API was called (and the first call 4319 // returns 100%), so we'd have to deal with that as well. 4320 // 4321 // b) Sample the "fake" answer using a sampling thread and store 4322 // the answer in a global variable. The call to loadavg would 4323 // just return the value of the global, avoiding the slow query. 4324 // 4325 // c) Sample a better answer using exponential decay to smooth the 4326 // value. This is basically the algorithm used by UNIX kernels. 4327 // 4328 // Note that sampling thread starvation could affect both (b) and (c). 4329 int os::loadavg(double loadavg[], int nelem) { 4330 return -1; 4331 } 4332 4333 4334 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4335 bool os::dont_yield() { 4336 return DontYieldALot; 4337 } 4338 4339 // This method is a slightly reworked copy of JDK's sysOpen 4340 // from src/windows/hpi/src/sys_api_md.c 4341 4342 int os::open(const char *path, int oflag, int mode) { 4343 char* pathbuf = (char*)os::strdup(path, mtInternal); 4344 if (pathbuf == NULL) { 4345 errno = ENOMEM; 4346 return -1; 4347 } 4348 os::native_path(pathbuf); 4349 int ret; 4350 if (strlen(path) < MAX_PATH) { 4351 ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4352 } else { 4353 errno_t err = ERROR_SUCCESS; 4354 wchar_t* wpath = create_unc_path(pathbuf, err); 4355 if (err != ERROR_SUCCESS) { 4356 if (wpath != NULL) { 4357 destroy_unc_path(wpath); 4358 } 4359 os::free(pathbuf); 4360 errno = err; 4361 return -1; 4362 } 4363 ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode); 4364 if (ret == -1) { 4365 errno = ::GetLastError(); 4366 } 4367 destroy_unc_path(wpath); 4368 } 4369 os::free(pathbuf); 4370 return ret; 4371 } 4372 4373 FILE* os::open(int fd, const char* mode) { 4374 return ::_fdopen(fd, mode); 4375 } 4376 4377 // Is a (classpath) directory empty? 4378 bool os::dir_is_empty(const char* path) { 4379 char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal); 4380 if (search_path == NULL) { 4381 errno = ENOMEM; 4382 return false; 4383 } 4384 strcpy(search_path, path); 4385 // Append "*", or possibly "\\*", to path 4386 if (path[1] == ':' && 4387 (path[2] == '\0' || 4388 (path[2] == '\\' && path[3] == '\0'))) { 4389 // No '\\' needed for cases like "Z:" or "Z:\" 4390 strcat(search_path, "*"); 4391 } 4392 else { 4393 strcat(search_path, "\\*"); 4394 } 4395 errno_t err = ERROR_SUCCESS; 4396 wchar_t* wpath = create_unc_path(search_path, err); 4397 if (err != ERROR_SUCCESS) { 4398 if (wpath != NULL) { 4399 destroy_unc_path(wpath); 4400 } 4401 os::free(search_path); 4402 errno = err; 4403 return false; 4404 } 4405 WIN32_FIND_DATAW fd; 4406 HANDLE f = ::FindFirstFileW(wpath, &fd); 4407 destroy_unc_path(wpath); 4408 bool is_empty = true; 4409 if (f != INVALID_HANDLE_VALUE) { 4410 while (is_empty && ::FindNextFileW(f, &fd)) { 4411 // An empty directory contains only the current directory file 4412 // and the previous directory file. 4413 if ((wcscmp(fd.cFileName, L".") != 0) && 4414 (wcscmp(fd.cFileName, L"..") != 0)) { 4415 is_empty = false; 4416 } 4417 } 4418 FindClose(f); 4419 } 4420 os::free(search_path); 4421 return is_empty; 4422 } 4423 4424 // create binary file, rewriting existing file if required 4425 int os::create_binary_file(const char* path, bool rewrite_existing) { 4426 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4427 if (!rewrite_existing) { 4428 oflags |= _O_EXCL; 4429 } 4430 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4431 } 4432 4433 // return current position of file pointer 4434 jlong os::current_file_offset(int fd) { 4435 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4436 } 4437 4438 // move file pointer to the specified offset 4439 jlong os::seek_to_file_offset(int fd, jlong offset) { 4440 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4441 } 4442 4443 4444 jlong os::lseek(int fd, jlong offset, int whence) { 4445 return (jlong) ::_lseeki64(fd, offset, whence); 4446 } 4447 4448 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4449 OVERLAPPED ov; 4450 DWORD nread; 4451 BOOL result; 4452 4453 ZeroMemory(&ov, sizeof(ov)); 4454 ov.Offset = (DWORD)offset; 4455 ov.OffsetHigh = (DWORD)(offset >> 32); 4456 4457 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4458 4459 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4460 4461 return result ? nread : 0; 4462 } 4463 4464 4465 // This method is a slightly reworked copy of JDK's sysNativePath 4466 // from src/windows/hpi/src/path_md.c 4467 4468 // Convert a pathname to native format. On win32, this involves forcing all 4469 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4470 // sometimes rejects '/') and removing redundant separators. The input path is 4471 // assumed to have been converted into the character encoding used by the local 4472 // system. Because this might be a double-byte encoding, care is taken to 4473 // treat double-byte lead characters correctly. 4474 // 4475 // This procedure modifies the given path in place, as the result is never 4476 // longer than the original. There is no error return; this operation always 4477 // succeeds. 4478 char * os::native_path(char *path) { 4479 char *src = path, *dst = path, *end = path; 4480 char *colon = NULL; // If a drive specifier is found, this will 4481 // point to the colon following the drive letter 4482 4483 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4484 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4485 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4486 4487 // Check for leading separators 4488 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4489 while (isfilesep(*src)) { 4490 src++; 4491 } 4492 4493 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4494 // Remove leading separators if followed by drive specifier. This 4495 // hack is necessary to support file URLs containing drive 4496 // specifiers (e.g., "file://c:/path"). As a side effect, 4497 // "/c:/path" can be used as an alternative to "c:/path". 4498 *dst++ = *src++; 4499 colon = dst; 4500 *dst++ = ':'; 4501 src++; 4502 } else { 4503 src = path; 4504 if (isfilesep(src[0]) && isfilesep(src[1])) { 4505 // UNC pathname: Retain first separator; leave src pointed at 4506 // second separator so that further separators will be collapsed 4507 // into the second separator. The result will be a pathname 4508 // beginning with "\\\\" followed (most likely) by a host name. 4509 src = dst = path + 1; 4510 path[0] = '\\'; // Force first separator to '\\' 4511 } 4512 } 4513 4514 end = dst; 4515 4516 // Remove redundant separators from remainder of path, forcing all 4517 // separators to be '\\' rather than '/'. Also, single byte space 4518 // characters are removed from the end of the path because those 4519 // are not legal ending characters on this operating system. 4520 // 4521 while (*src != '\0') { 4522 if (isfilesep(*src)) { 4523 *dst++ = '\\'; src++; 4524 while (isfilesep(*src)) src++; 4525 if (*src == '\0') { 4526 // Check for trailing separator 4527 end = dst; 4528 if (colon == dst - 2) break; // "z:\\" 4529 if (dst == path + 1) break; // "\\" 4530 if (dst == path + 2 && isfilesep(path[0])) { 4531 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4532 // beginning of a UNC pathname. Even though it is not, by 4533 // itself, a valid UNC pathname, we leave it as is in order 4534 // to be consistent with the path canonicalizer as well 4535 // as the win32 APIs, which treat this case as an invalid 4536 // UNC pathname rather than as an alias for the root 4537 // directory of the current drive. 4538 break; 4539 } 4540 end = --dst; // Path does not denote a root directory, so 4541 // remove trailing separator 4542 break; 4543 } 4544 end = dst; 4545 } else { 4546 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4547 *dst++ = *src++; 4548 if (*src) *dst++ = *src++; 4549 end = dst; 4550 } else { // Copy a single-byte character 4551 char c = *src++; 4552 *dst++ = c; 4553 // Space is not a legal ending character 4554 if (c != ' ') end = dst; 4555 } 4556 } 4557 } 4558 4559 *end = '\0'; 4560 4561 // For "z:", add "." to work around a bug in the C runtime library 4562 if (colon == dst - 1) { 4563 path[2] = '.'; 4564 path[3] = '\0'; 4565 } 4566 4567 return path; 4568 } 4569 4570 // This code is a copy of JDK's sysSetLength 4571 // from src/windows/hpi/src/sys_api_md.c 4572 4573 int os::ftruncate(int fd, jlong length) { 4574 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4575 long high = (long)(length >> 32); 4576 DWORD ret; 4577 4578 if (h == (HANDLE)(-1)) { 4579 return -1; 4580 } 4581 4582 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4583 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4584 return -1; 4585 } 4586 4587 if (::SetEndOfFile(h) == FALSE) { 4588 return -1; 4589 } 4590 4591 return 0; 4592 } 4593 4594 int os::get_fileno(FILE* fp) { 4595 return _fileno(fp); 4596 } 4597 4598 // This code is a copy of JDK's sysSync 4599 // from src/windows/hpi/src/sys_api_md.c 4600 // except for the legacy workaround for a bug in Win 98 4601 4602 int os::fsync(int fd) { 4603 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4604 4605 if ((!::FlushFileBuffers(handle)) && 4606 (GetLastError() != ERROR_ACCESS_DENIED)) { 4607 // from winerror.h 4608 return -1; 4609 } 4610 return 0; 4611 } 4612 4613 static int nonSeekAvailable(int, long *); 4614 static int stdinAvailable(int, long *); 4615 4616 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4617 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4618 4619 // This code is a copy of JDK's sysAvailable 4620 // from src/windows/hpi/src/sys_api_md.c 4621 4622 int os::available(int fd, jlong *bytes) { 4623 jlong cur, end; 4624 struct _stati64 stbuf64; 4625 4626 if (::_fstati64(fd, &stbuf64) >= 0) { 4627 int mode = stbuf64.st_mode; 4628 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4629 int ret; 4630 long lpbytes; 4631 if (fd == 0) { 4632 ret = stdinAvailable(fd, &lpbytes); 4633 } else { 4634 ret = nonSeekAvailable(fd, &lpbytes); 4635 } 4636 (*bytes) = (jlong)(lpbytes); 4637 return ret; 4638 } 4639 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4640 return FALSE; 4641 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4642 return FALSE; 4643 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4644 return FALSE; 4645 } 4646 *bytes = end - cur; 4647 return TRUE; 4648 } else { 4649 return FALSE; 4650 } 4651 } 4652 4653 void os::flockfile(FILE* fp) { 4654 _lock_file(fp); 4655 } 4656 4657 void os::funlockfile(FILE* fp) { 4658 _unlock_file(fp); 4659 } 4660 4661 // This code is a copy of JDK's nonSeekAvailable 4662 // from src/windows/hpi/src/sys_api_md.c 4663 4664 static int nonSeekAvailable(int fd, long *pbytes) { 4665 // This is used for available on non-seekable devices 4666 // (like both named and anonymous pipes, such as pipes 4667 // connected to an exec'd process). 4668 // Standard Input is a special case. 4669 HANDLE han; 4670 4671 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4672 return FALSE; 4673 } 4674 4675 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4676 // PeekNamedPipe fails when at EOF. In that case we 4677 // simply make *pbytes = 0 which is consistent with the 4678 // behavior we get on Solaris when an fd is at EOF. 4679 // The only alternative is to raise an Exception, 4680 // which isn't really warranted. 4681 // 4682 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4683 return FALSE; 4684 } 4685 *pbytes = 0; 4686 } 4687 return TRUE; 4688 } 4689 4690 #define MAX_INPUT_EVENTS 2000 4691 4692 // This code is a copy of JDK's stdinAvailable 4693 // from src/windows/hpi/src/sys_api_md.c 4694 4695 static int stdinAvailable(int fd, long *pbytes) { 4696 HANDLE han; 4697 DWORD numEventsRead = 0; // Number of events read from buffer 4698 DWORD numEvents = 0; // Number of events in buffer 4699 DWORD i = 0; // Loop index 4700 DWORD curLength = 0; // Position marker 4701 DWORD actualLength = 0; // Number of bytes readable 4702 BOOL error = FALSE; // Error holder 4703 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4704 4705 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4706 return FALSE; 4707 } 4708 4709 // Construct an array of input records in the console buffer 4710 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4711 if (error == 0) { 4712 return nonSeekAvailable(fd, pbytes); 4713 } 4714 4715 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4716 if (numEvents > MAX_INPUT_EVENTS) { 4717 numEvents = MAX_INPUT_EVENTS; 4718 } 4719 4720 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4721 if (lpBuffer == NULL) { 4722 return FALSE; 4723 } 4724 4725 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4726 if (error == 0) { 4727 os::free(lpBuffer); 4728 return FALSE; 4729 } 4730 4731 // Examine input records for the number of bytes available 4732 for (i=0; i<numEvents; i++) { 4733 if (lpBuffer[i].EventType == KEY_EVENT) { 4734 4735 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4736 &(lpBuffer[i].Event); 4737 if (keyRecord->bKeyDown == TRUE) { 4738 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4739 curLength++; 4740 if (*keyPressed == '\r') { 4741 actualLength = curLength; 4742 } 4743 } 4744 } 4745 } 4746 4747 if (lpBuffer != NULL) { 4748 os::free(lpBuffer); 4749 } 4750 4751 *pbytes = (long) actualLength; 4752 return TRUE; 4753 } 4754 4755 // Map a block of memory. 4756 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4757 char *addr, size_t bytes, bool read_only, 4758 bool allow_exec) { 4759 HANDLE hFile; 4760 char* base; 4761 4762 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4763 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4764 if (hFile == NULL) { 4765 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4766 return NULL; 4767 } 4768 4769 if (allow_exec) { 4770 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4771 // unless it comes from a PE image (which the shared archive is not.) 4772 // Even VirtualProtect refuses to give execute access to mapped memory 4773 // that was not previously executable. 4774 // 4775 // Instead, stick the executable region in anonymous memory. Yuck. 4776 // Penalty is that ~4 pages will not be shareable - in the future 4777 // we might consider DLLizing the shared archive with a proper PE 4778 // header so that mapping executable + sharing is possible. 4779 4780 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4781 PAGE_READWRITE); 4782 if (base == NULL) { 4783 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4784 CloseHandle(hFile); 4785 return NULL; 4786 } 4787 4788 DWORD bytes_read; 4789 OVERLAPPED overlapped; 4790 overlapped.Offset = (DWORD)file_offset; 4791 overlapped.OffsetHigh = 0; 4792 overlapped.hEvent = NULL; 4793 // ReadFile guarantees that if the return value is true, the requested 4794 // number of bytes were read before returning. 4795 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4796 if (!res) { 4797 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4798 release_memory(base, bytes); 4799 CloseHandle(hFile); 4800 return NULL; 4801 } 4802 } else { 4803 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4804 NULL /* file_name */); 4805 if (hMap == NULL) { 4806 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4807 CloseHandle(hFile); 4808 return NULL; 4809 } 4810 4811 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4812 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4813 (DWORD)bytes, addr); 4814 if (base == NULL) { 4815 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4816 CloseHandle(hMap); 4817 CloseHandle(hFile); 4818 return NULL; 4819 } 4820 4821 if (CloseHandle(hMap) == 0) { 4822 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4823 CloseHandle(hFile); 4824 return base; 4825 } 4826 } 4827 4828 if (allow_exec) { 4829 DWORD old_protect; 4830 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4831 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4832 4833 if (!res) { 4834 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4835 // Don't consider this a hard error, on IA32 even if the 4836 // VirtualProtect fails, we should still be able to execute 4837 CloseHandle(hFile); 4838 return base; 4839 } 4840 } 4841 4842 if (CloseHandle(hFile) == 0) { 4843 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4844 return base; 4845 } 4846 4847 return base; 4848 } 4849 4850 4851 // Remap a block of memory. 4852 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4853 char *addr, size_t bytes, bool read_only, 4854 bool allow_exec) { 4855 // This OS does not allow existing memory maps to be remapped so we 4856 // have to unmap the memory before we remap it. 4857 if (!os::unmap_memory(addr, bytes)) { 4858 return NULL; 4859 } 4860 4861 // There is a very small theoretical window between the unmap_memory() 4862 // call above and the map_memory() call below where a thread in native 4863 // code may be able to access an address that is no longer mapped. 4864 4865 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4866 read_only, allow_exec); 4867 } 4868 4869 4870 // Unmap a block of memory. 4871 // Returns true=success, otherwise false. 4872 4873 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4874 MEMORY_BASIC_INFORMATION mem_info; 4875 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4876 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4877 return false; 4878 } 4879 4880 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4881 // Instead, executable region was allocated using VirtualAlloc(). See 4882 // pd_map_memory() above. 4883 // 4884 // The following flags should match the 'exec_access' flages used for 4885 // VirtualProtect() in pd_map_memory(). 4886 if (mem_info.Protect == PAGE_EXECUTE_READ || 4887 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4888 return pd_release_memory(addr, bytes); 4889 } 4890 4891 BOOL result = UnmapViewOfFile(addr); 4892 if (result == 0) { 4893 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4894 return false; 4895 } 4896 return true; 4897 } 4898 4899 void os::pause() { 4900 char filename[MAX_PATH]; 4901 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4902 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4903 } else { 4904 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4905 } 4906 4907 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4908 if (fd != -1) { 4909 struct stat buf; 4910 ::close(fd); 4911 while (::stat(filename, &buf) == 0) { 4912 Sleep(100); 4913 } 4914 } else { 4915 jio_fprintf(stderr, 4916 "Could not open pause file '%s', continuing immediately.\n", filename); 4917 } 4918 } 4919 4920 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4921 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4922 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4923 4924 os::ThreadCrashProtection::ThreadCrashProtection() { 4925 } 4926 4927 // See the caveats for this class in os_windows.hpp 4928 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4929 // into this method and returns false. If no OS EXCEPTION was raised, returns 4930 // true. 4931 // The callback is supposed to provide the method that should be protected. 4932 // 4933 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4934 4935 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4936 4937 _protected_thread = Thread::current_or_null(); 4938 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4939 4940 bool success = true; 4941 __try { 4942 _crash_protection = this; 4943 cb.call(); 4944 } __except(EXCEPTION_EXECUTE_HANDLER) { 4945 // only for protection, nothing to do 4946 success = false; 4947 } 4948 _crash_protection = NULL; 4949 _protected_thread = NULL; 4950 Thread::muxRelease(&_crash_mux); 4951 return success; 4952 } 4953 4954 // An Event wraps a win32 "CreateEvent" kernel handle. 4955 // 4956 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4957 // 4958 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4959 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4960 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4961 // In addition, an unpark() operation might fetch the handle field, but the 4962 // event could recycle between the fetch and the SetEvent() operation. 4963 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4964 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4965 // on an stale but recycled handle would be harmless, but in practice this might 4966 // confuse other non-Sun code, so it's not a viable approach. 4967 // 4968 // 2: Once a win32 event handle is associated with an Event, it remains associated 4969 // with the Event. The event handle is never closed. This could be construed 4970 // as handle leakage, but only up to the maximum # of threads that have been extant 4971 // at any one time. This shouldn't be an issue, as windows platforms typically 4972 // permit a process to have hundreds of thousands of open handles. 4973 // 4974 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4975 // and release unused handles. 4976 // 4977 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4978 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4979 // 4980 // 5. Use an RCU-like mechanism (Read-Copy Update). 4981 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4982 // 4983 // We use (2). 4984 // 4985 // TODO-FIXME: 4986 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4987 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4988 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4989 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4990 // into a single win32 CreateEvent() handle. 4991 // 4992 // Assumption: 4993 // Only one parker can exist on an event, which is why we allocate 4994 // them per-thread. Multiple unparkers can coexist. 4995 // 4996 // _Event transitions in park() 4997 // -1 => -1 : illegal 4998 // 1 => 0 : pass - return immediately 4999 // 0 => -1 : block; then set _Event to 0 before returning 5000 // 5001 // _Event transitions in unpark() 5002 // 0 => 1 : just return 5003 // 1 => 1 : just return 5004 // -1 => either 0 or 1; must signal target thread 5005 // That is, we can safely transition _Event from -1 to either 5006 // 0 or 1. 5007 // 5008 // _Event serves as a restricted-range semaphore. 5009 // -1 : thread is blocked, i.e. there is a waiter 5010 // 0 : neutral: thread is running or ready, 5011 // could have been signaled after a wait started 5012 // 1 : signaled - thread is running or ready 5013 // 5014 // Another possible encoding of _Event would be with 5015 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5016 // 5017 5018 int os::PlatformEvent::park(jlong Millis) { 5019 // Transitions for _Event: 5020 // -1 => -1 : illegal 5021 // 1 => 0 : pass - return immediately 5022 // 0 => -1 : block; then set _Event to 0 before returning 5023 5024 guarantee(_ParkHandle != NULL , "Invariant"); 5025 guarantee(Millis > 0 , "Invariant"); 5026 5027 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 5028 // the initial park() operation. 5029 // Consider: use atomic decrement instead of CAS-loop 5030 5031 int v; 5032 for (;;) { 5033 v = _Event; 5034 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5035 } 5036 guarantee((v == 0) || (v == 1), "invariant"); 5037 if (v != 0) return OS_OK; 5038 5039 // Do this the hard way by blocking ... 5040 // TODO: consider a brief spin here, gated on the success of recent 5041 // spin attempts by this thread. 5042 // 5043 // We decompose long timeouts into series of shorter timed waits. 5044 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5045 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5046 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5047 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5048 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5049 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5050 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5051 // for the already waited time. This policy does not admit any new outcomes. 5052 // In the future, however, we might want to track the accumulated wait time and 5053 // adjust Millis accordingly if we encounter a spurious wakeup. 5054 5055 const int MAXTIMEOUT = 0x10000000; 5056 DWORD rv = WAIT_TIMEOUT; 5057 while (_Event < 0 && Millis > 0) { 5058 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5059 if (Millis > MAXTIMEOUT) { 5060 prd = MAXTIMEOUT; 5061 } 5062 rv = ::WaitForSingleObject(_ParkHandle, prd); 5063 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5064 if (rv == WAIT_TIMEOUT) { 5065 Millis -= prd; 5066 } 5067 } 5068 v = _Event; 5069 _Event = 0; 5070 // see comment at end of os::PlatformEvent::park() below: 5071 OrderAccess::fence(); 5072 // If we encounter a nearly simultanous timeout expiry and unpark() 5073 // we return OS_OK indicating we awoke via unpark(). 5074 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5075 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5076 } 5077 5078 void os::PlatformEvent::park() { 5079 // Transitions for _Event: 5080 // -1 => -1 : illegal 5081 // 1 => 0 : pass - return immediately 5082 // 0 => -1 : block; then set _Event to 0 before returning 5083 5084 guarantee(_ParkHandle != NULL, "Invariant"); 5085 // Invariant: Only the thread associated with the Event/PlatformEvent 5086 // may call park(). 5087 // Consider: use atomic decrement instead of CAS-loop 5088 int v; 5089 for (;;) { 5090 v = _Event; 5091 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5092 } 5093 guarantee((v == 0) || (v == 1), "invariant"); 5094 if (v != 0) return; 5095 5096 // Do this the hard way by blocking ... 5097 // TODO: consider a brief spin here, gated on the success of recent 5098 // spin attempts by this thread. 5099 while (_Event < 0) { 5100 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5101 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5102 } 5103 5104 // Usually we'll find _Event == 0 at this point, but as 5105 // an optional optimization we clear it, just in case can 5106 // multiple unpark() operations drove _Event up to 1. 5107 _Event = 0; 5108 OrderAccess::fence(); 5109 guarantee(_Event >= 0, "invariant"); 5110 } 5111 5112 void os::PlatformEvent::unpark() { 5113 guarantee(_ParkHandle != NULL, "Invariant"); 5114 5115 // Transitions for _Event: 5116 // 0 => 1 : just return 5117 // 1 => 1 : just return 5118 // -1 => either 0 or 1; must signal target thread 5119 // That is, we can safely transition _Event from -1 to either 5120 // 0 or 1. 5121 // See also: "Semaphores in Plan 9" by Mullender & Cox 5122 // 5123 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5124 // that it will take two back-to-back park() calls for the owning 5125 // thread to block. This has the benefit of forcing a spurious return 5126 // from the first park() call after an unpark() call which will help 5127 // shake out uses of park() and unpark() without condition variables. 5128 5129 if (Atomic::xchg(1, &_Event) >= 0) return; 5130 5131 ::SetEvent(_ParkHandle); 5132 } 5133 5134 5135 // JSR166 5136 // ------------------------------------------------------- 5137 5138 // The Windows implementation of Park is very straightforward: Basic 5139 // operations on Win32 Events turn out to have the right semantics to 5140 // use them directly. We opportunistically resuse the event inherited 5141 // from Monitor. 5142 5143 void Parker::park(bool isAbsolute, jlong time) { 5144 guarantee(_ParkEvent != NULL, "invariant"); 5145 // First, demultiplex/decode time arguments 5146 if (time < 0) { // don't wait 5147 return; 5148 } else if (time == 0 && !isAbsolute) { 5149 time = INFINITE; 5150 } else if (isAbsolute) { 5151 time -= os::javaTimeMillis(); // convert to relative time 5152 if (time <= 0) { // already elapsed 5153 return; 5154 } 5155 } else { // relative 5156 time /= 1000000; // Must coarsen from nanos to millis 5157 if (time == 0) { // Wait for the minimal time unit if zero 5158 time = 1; 5159 } 5160 } 5161 5162 JavaThread* thread = JavaThread::current(); 5163 5164 // Don't wait if interrupted or already triggered 5165 if (Thread::is_interrupted(thread, false) || 5166 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5167 ResetEvent(_ParkEvent); 5168 return; 5169 } else { 5170 ThreadBlockInVM tbivm(thread); 5171 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5172 thread->set_suspend_equivalent(); 5173 5174 WaitForSingleObject(_ParkEvent, time); 5175 ResetEvent(_ParkEvent); 5176 5177 // If externally suspended while waiting, re-suspend 5178 if (thread->handle_special_suspend_equivalent_condition()) { 5179 thread->java_suspend_self(); 5180 } 5181 } 5182 } 5183 5184 void Parker::unpark() { 5185 guarantee(_ParkEvent != NULL, "invariant"); 5186 SetEvent(_ParkEvent); 5187 } 5188 5189 // Run the specified command in a separate process. Return its exit value, 5190 // or -1 on failure (e.g. can't create a new process). 5191 int os::fork_and_exec(char* cmd) { 5192 STARTUPINFO si; 5193 PROCESS_INFORMATION pi; 5194 DWORD exit_code; 5195 5196 char * cmd_string; 5197 char * cmd_prefix = "cmd /C "; 5198 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5199 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5200 if (cmd_string == NULL) { 5201 return -1; 5202 } 5203 cmd_string[0] = '\0'; 5204 strcat(cmd_string, cmd_prefix); 5205 strcat(cmd_string, cmd); 5206 5207 // now replace all '\n' with '&' 5208 char * substring = cmd_string; 5209 while ((substring = strchr(substring, '\n')) != NULL) { 5210 substring[0] = '&'; 5211 substring++; 5212 } 5213 memset(&si, 0, sizeof(si)); 5214 si.cb = sizeof(si); 5215 memset(&pi, 0, sizeof(pi)); 5216 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5217 cmd_string, // command line 5218 NULL, // process security attribute 5219 NULL, // thread security attribute 5220 TRUE, // inherits system handles 5221 0, // no creation flags 5222 NULL, // use parent's environment block 5223 NULL, // use parent's starting directory 5224 &si, // (in) startup information 5225 &pi); // (out) process information 5226 5227 if (rslt) { 5228 // Wait until child process exits. 5229 WaitForSingleObject(pi.hProcess, INFINITE); 5230 5231 GetExitCodeProcess(pi.hProcess, &exit_code); 5232 5233 // Close process and thread handles. 5234 CloseHandle(pi.hProcess); 5235 CloseHandle(pi.hThread); 5236 } else { 5237 exit_code = -1; 5238 } 5239 5240 FREE_C_HEAP_ARRAY(char, cmd_string); 5241 return (int)exit_code; 5242 } 5243 5244 bool os::find(address addr, outputStream* st) { 5245 int offset = -1; 5246 bool result = false; 5247 char buf[256]; 5248 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5249 st->print(PTR_FORMAT " ", addr); 5250 if (strlen(buf) < sizeof(buf) - 1) { 5251 char* p = strrchr(buf, '\\'); 5252 if (p) { 5253 st->print("%s", p + 1); 5254 } else { 5255 st->print("%s", buf); 5256 } 5257 } else { 5258 // The library name is probably truncated. Let's omit the library name. 5259 // See also JDK-8147512. 5260 } 5261 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5262 st->print("::%s + 0x%x", buf, offset); 5263 } 5264 st->cr(); 5265 result = true; 5266 } 5267 return result; 5268 } 5269 5270 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5271 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5272 5273 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5274 JavaThread* thread = JavaThread::current(); 5275 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5276 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5277 5278 if (os::is_memory_serialize_page(thread, addr)) { 5279 return EXCEPTION_CONTINUE_EXECUTION; 5280 } 5281 } 5282 5283 return EXCEPTION_CONTINUE_SEARCH; 5284 } 5285 5286 static jint initSock() { 5287 WSADATA wsadata; 5288 5289 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5290 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5291 ::GetLastError()); 5292 return JNI_ERR; 5293 } 5294 return JNI_OK; 5295 } 5296 5297 struct hostent* os::get_host_by_name(char* name) { 5298 return (struct hostent*)gethostbyname(name); 5299 } 5300 5301 int os::socket_close(int fd) { 5302 return ::closesocket(fd); 5303 } 5304 5305 int os::socket(int domain, int type, int protocol) { 5306 return ::socket(domain, type, protocol); 5307 } 5308 5309 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5310 return ::connect(fd, him, len); 5311 } 5312 5313 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5314 return ::recv(fd, buf, (int)nBytes, flags); 5315 } 5316 5317 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5318 return ::send(fd, buf, (int)nBytes, flags); 5319 } 5320 5321 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5322 return ::send(fd, buf, (int)nBytes, flags); 5323 } 5324 5325 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5326 #if defined(IA32) 5327 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5328 #elif defined (AMD64) 5329 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5330 #endif 5331 5332 // returns true if thread could be suspended, 5333 // false otherwise 5334 static bool do_suspend(HANDLE* h) { 5335 if (h != NULL) { 5336 if (SuspendThread(*h) != ~0) { 5337 return true; 5338 } 5339 } 5340 return false; 5341 } 5342 5343 // resume the thread 5344 // calling resume on an active thread is a no-op 5345 static void do_resume(HANDLE* h) { 5346 if (h != NULL) { 5347 ResumeThread(*h); 5348 } 5349 } 5350 5351 // retrieve a suspend/resume context capable handle 5352 // from the tid. Caller validates handle return value. 5353 void get_thread_handle_for_extended_context(HANDLE* h, 5354 OSThread::thread_id_t tid) { 5355 if (h != NULL) { 5356 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5357 } 5358 } 5359 5360 // Thread sampling implementation 5361 // 5362 void os::SuspendedThreadTask::internal_do_task() { 5363 CONTEXT ctxt; 5364 HANDLE h = NULL; 5365 5366 // get context capable handle for thread 5367 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5368 5369 // sanity 5370 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5371 return; 5372 } 5373 5374 // suspend the thread 5375 if (do_suspend(&h)) { 5376 ctxt.ContextFlags = sampling_context_flags; 5377 // get thread context 5378 GetThreadContext(h, &ctxt); 5379 SuspendedThreadTaskContext context(_thread, &ctxt); 5380 // pass context to Thread Sampling impl 5381 do_task(context); 5382 // resume thread 5383 do_resume(&h); 5384 } 5385 5386 // close handle 5387 CloseHandle(h); 5388 } 5389 5390 bool os::start_debugging(char *buf, int buflen) { 5391 int len = (int)strlen(buf); 5392 char *p = &buf[len]; 5393 5394 jio_snprintf(p, buflen-len, 5395 "\n\n" 5396 "Do you want to debug the problem?\n\n" 5397 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5398 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5399 "Otherwise, select 'No' to abort...", 5400 os::current_process_id(), os::current_thread_id()); 5401 5402 bool yes = os::message_box("Unexpected Error", buf); 5403 5404 if (yes) { 5405 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5406 // exception. If VM is running inside a debugger, the debugger will 5407 // catch the exception. Otherwise, the breakpoint exception will reach 5408 // the default windows exception handler, which can spawn a debugger and 5409 // automatically attach to the dying VM. 5410 os::breakpoint(); 5411 yes = false; 5412 } 5413 return yes; 5414 } 5415 5416 void* os::get_default_process_handle() { 5417 return (void*)GetModuleHandle(NULL); 5418 } 5419 5420 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5421 // which is used to find statically linked in agents. 5422 // Additionally for windows, takes into account __stdcall names. 5423 // Parameters: 5424 // sym_name: Symbol in library we are looking for 5425 // lib_name: Name of library to look in, NULL for shared libs. 5426 // is_absolute_path == true if lib_name is absolute path to agent 5427 // such as "C:/a/b/L.dll" 5428 // == false if only the base name of the library is passed in 5429 // such as "L" 5430 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5431 bool is_absolute_path) { 5432 char *agent_entry_name; 5433 size_t len; 5434 size_t name_len; 5435 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5436 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5437 const char *start; 5438 5439 if (lib_name != NULL) { 5440 len = name_len = strlen(lib_name); 5441 if (is_absolute_path) { 5442 // Need to strip path, prefix and suffix 5443 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5444 lib_name = ++start; 5445 } else { 5446 // Need to check for drive prefix 5447 if ((start = strchr(lib_name, ':')) != NULL) { 5448 lib_name = ++start; 5449 } 5450 } 5451 if (len <= (prefix_len + suffix_len)) { 5452 return NULL; 5453 } 5454 lib_name += prefix_len; 5455 name_len = strlen(lib_name) - suffix_len; 5456 } 5457 } 5458 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5459 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5460 if (agent_entry_name == NULL) { 5461 return NULL; 5462 } 5463 if (lib_name != NULL) { 5464 const char *p = strrchr(sym_name, '@'); 5465 if (p != NULL && p != sym_name) { 5466 // sym_name == _Agent_OnLoad@XX 5467 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5468 agent_entry_name[(p-sym_name)] = '\0'; 5469 // agent_entry_name == _Agent_OnLoad 5470 strcat(agent_entry_name, "_"); 5471 strncat(agent_entry_name, lib_name, name_len); 5472 strcat(agent_entry_name, p); 5473 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5474 } else { 5475 strcpy(agent_entry_name, sym_name); 5476 strcat(agent_entry_name, "_"); 5477 strncat(agent_entry_name, lib_name, name_len); 5478 } 5479 } else { 5480 strcpy(agent_entry_name, sym_name); 5481 } 5482 return agent_entry_name; 5483 } 5484 5485 #ifndef PRODUCT 5486 5487 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5488 // contiguous memory block at a particular address. 5489 // The test first tries to find a good approximate address to allocate at by using the same 5490 // method to allocate some memory at any address. The test then tries to allocate memory in 5491 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5492 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5493 // the previously allocated memory is available for allocation. The only actual failure 5494 // that is reported is when the test tries to allocate at a particular location but gets a 5495 // different valid one. A NULL return value at this point is not considered an error but may 5496 // be legitimate. 5497 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5498 void TestReserveMemorySpecial_test() { 5499 if (!UseLargePages) { 5500 if (VerboseInternalVMTests) { 5501 tty->print("Skipping test because large pages are disabled"); 5502 } 5503 return; 5504 } 5505 // save current value of globals 5506 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5507 bool old_use_numa_interleaving = UseNUMAInterleaving; 5508 5509 // set globals to make sure we hit the correct code path 5510 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5511 5512 // do an allocation at an address selected by the OS to get a good one. 5513 const size_t large_allocation_size = os::large_page_size() * 4; 5514 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5515 if (result == NULL) { 5516 if (VerboseInternalVMTests) { 5517 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5518 large_allocation_size); 5519 } 5520 } else { 5521 os::release_memory_special(result, large_allocation_size); 5522 5523 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5524 // we managed to get it once. 5525 const size_t expected_allocation_size = os::large_page_size(); 5526 char* expected_location = result + os::large_page_size(); 5527 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5528 if (actual_location == NULL) { 5529 if (VerboseInternalVMTests) { 5530 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5531 expected_location, large_allocation_size); 5532 } 5533 } else { 5534 // release memory 5535 os::release_memory_special(actual_location, expected_allocation_size); 5536 // only now check, after releasing any memory to avoid any leaks. 5537 assert(actual_location == expected_location, 5538 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5539 expected_location, expected_allocation_size, actual_location); 5540 } 5541 } 5542 5543 // restore globals 5544 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5545 UseNUMAInterleaving = old_use_numa_interleaving; 5546 } 5547 #endif // PRODUCT 5548 5549 /* 5550 All the defined signal names for Windows. 5551 5552 NOTE that not all of these names are accepted by FindSignal! 5553 5554 For various reasons some of these may be rejected at runtime. 5555 5556 Here are the names currently accepted by a user of sun.misc.Signal with 5557 1.4.1 (ignoring potential interaction with use of chaining, etc): 5558 5559 (LIST TBD) 5560 5561 */ 5562 int os::get_signal_number(const char* name) { 5563 static const struct { 5564 char* name; 5565 int number; 5566 } siglabels [] = 5567 // derived from version 6.0 VC98/include/signal.h 5568 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5569 "FPE", SIGFPE, // floating point exception 5570 "SEGV", SIGSEGV, // segment violation 5571 "INT", SIGINT, // interrupt 5572 "TERM", SIGTERM, // software term signal from kill 5573 "BREAK", SIGBREAK, // Ctrl-Break sequence 5574 "ILL", SIGILL}; // illegal instruction 5575 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5576 if (strcmp(name, siglabels[i].name) == 0) { 5577 return siglabels[i].number; 5578 } 5579 } 5580 return -1; 5581 } 5582 5583 // Fast current thread access 5584 5585 int os::win32::_thread_ptr_offset = 0; 5586 5587 static void call_wrapper_dummy() {} 5588 5589 // We need to call the os_exception_wrapper once so that it sets 5590 // up the offset from FS of the thread pointer. 5591 void os::win32::initialize_thread_ptr_offset() { 5592 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5593 NULL, NULL, NULL, NULL); 5594 }