1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "jvm.h" 30 #include "classfile/classLoader.hpp" 31 #include "classfile/systemDictionary.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/vtableStubs.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/disassembler.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.hpp" 48 #include "runtime/extendedPC.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/interfaceSupport.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/orderAccess.inline.hpp" 56 #include "runtime/osThread.hpp" 57 #include "runtime/perfMemory.hpp" 58 #include "runtime/sharedRuntime.hpp" 59 #include "runtime/statSampler.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "runtime/thread.inline.hpp" 62 #include "runtime/threadCritical.hpp" 63 #include "runtime/timer.hpp" 64 #include "runtime/vm_version.hpp" 65 #include "services/attachListener.hpp" 66 #include "services/memTracker.hpp" 67 #include "services/runtimeService.hpp" 68 #include "utilities/align.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/macros.hpp" 74 #include "utilities/vmError.hpp" 75 #include "symbolengine.hpp" 76 #include "windbghelp.hpp" 77 78 79 #ifdef _DEBUG 80 #include <crtdbg.h> 81 #endif 82 83 84 #include <windows.h> 85 #include <sys/types.h> 86 #include <sys/stat.h> 87 #include <sys/timeb.h> 88 #include <objidl.h> 89 #include <shlobj.h> 90 91 #include <malloc.h> 92 #include <signal.h> 93 #include <direct.h> 94 #include <errno.h> 95 #include <fcntl.h> 96 #include <io.h> 97 #include <process.h> // For _beginthreadex(), _endthreadex() 98 #include <imagehlp.h> // For os::dll_address_to_function_name 99 // for enumerating dll libraries 100 #include <vdmdbg.h> 101 #include <psapi.h> 102 #include <mmsystem.h> 103 #include <winsock2.h> 104 105 // for timer info max values which include all bits 106 #define ALL_64_BITS CONST64(-1) 107 108 // For DLL loading/load error detection 109 // Values of PE COFF 110 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 111 #define IMAGE_FILE_SIGNATURE_LENGTH 4 112 113 static HANDLE main_process; 114 static HANDLE main_thread; 115 static int main_thread_id; 116 117 static FILETIME process_creation_time; 118 static FILETIME process_exit_time; 119 static FILETIME process_user_time; 120 static FILETIME process_kernel_time; 121 122 #ifdef _M_AMD64 123 #define __CPU__ amd64 124 #else 125 #define __CPU__ i486 126 #endif 127 128 // save DLL module handle, used by GetModuleFileName 129 130 HINSTANCE vm_lib_handle; 131 132 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 133 switch (reason) { 134 case DLL_PROCESS_ATTACH: 135 vm_lib_handle = hinst; 136 if (ForceTimeHighResolution) { 137 timeBeginPeriod(1L); 138 } 139 WindowsDbgHelp::pre_initialize(); 140 SymbolEngine::pre_initialize(); 141 break; 142 case DLL_PROCESS_DETACH: 143 if (ForceTimeHighResolution) { 144 timeEndPeriod(1L); 145 } 146 break; 147 default: 148 break; 149 } 150 return true; 151 } 152 153 static inline double fileTimeAsDouble(FILETIME* time) { 154 const double high = (double) ((unsigned int) ~0); 155 const double split = 10000000.0; 156 double result = (time->dwLowDateTime / split) + 157 time->dwHighDateTime * (high/split); 158 return result; 159 } 160 161 // Implementation of os 162 163 bool os::unsetenv(const char* name) { 164 assert(name != NULL, "Null pointer"); 165 return (SetEnvironmentVariable(name, NULL) == TRUE); 166 } 167 168 // No setuid programs under Windows. 169 bool os::have_special_privileges() { 170 return false; 171 } 172 173 174 // This method is a periodic task to check for misbehaving JNI applications 175 // under CheckJNI, we can add any periodic checks here. 176 // For Windows at the moment does nothing 177 void os::run_periodic_checks() { 178 return; 179 } 180 181 // previous UnhandledExceptionFilter, if there is one 182 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 183 184 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 185 186 void os::init_system_properties_values() { 187 // sysclasspath, java_home, dll_dir 188 { 189 char *home_path; 190 char *dll_path; 191 char *pslash; 192 char *bin = "\\bin"; 193 char home_dir[MAX_PATH + 1]; 194 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 195 196 if (alt_home_dir != NULL) { 197 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 198 home_dir[MAX_PATH] = '\0'; 199 } else { 200 os::jvm_path(home_dir, sizeof(home_dir)); 201 // Found the full path to jvm.dll. 202 // Now cut the path to <java_home>/jre if we can. 203 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 204 pslash = strrchr(home_dir, '\\'); 205 if (pslash != NULL) { 206 *pslash = '\0'; // get rid of \{client|server} 207 pslash = strrchr(home_dir, '\\'); 208 if (pslash != NULL) { 209 *pslash = '\0'; // get rid of \bin 210 } 211 } 212 } 213 214 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 215 if (home_path == NULL) { 216 return; 217 } 218 strcpy(home_path, home_dir); 219 Arguments::set_java_home(home_path); 220 FREE_C_HEAP_ARRAY(char, home_path); 221 222 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 223 mtInternal); 224 if (dll_path == NULL) { 225 return; 226 } 227 strcpy(dll_path, home_dir); 228 strcat(dll_path, bin); 229 Arguments::set_dll_dir(dll_path); 230 FREE_C_HEAP_ARRAY(char, dll_path); 231 232 if (!set_boot_path('\\', ';')) { 233 return; 234 } 235 } 236 237 // library_path 238 #define EXT_DIR "\\lib\\ext" 239 #define BIN_DIR "\\bin" 240 #define PACKAGE_DIR "\\Sun\\Java" 241 { 242 // Win32 library search order (See the documentation for LoadLibrary): 243 // 244 // 1. The directory from which application is loaded. 245 // 2. The system wide Java Extensions directory (Java only) 246 // 3. System directory (GetSystemDirectory) 247 // 4. Windows directory (GetWindowsDirectory) 248 // 5. The PATH environment variable 249 // 6. The current directory 250 251 char *library_path; 252 char tmp[MAX_PATH]; 253 char *path_str = ::getenv("PATH"); 254 255 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 256 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 257 258 library_path[0] = '\0'; 259 260 GetModuleFileName(NULL, tmp, sizeof(tmp)); 261 *(strrchr(tmp, '\\')) = '\0'; 262 strcat(library_path, tmp); 263 264 GetWindowsDirectory(tmp, sizeof(tmp)); 265 strcat(library_path, ";"); 266 strcat(library_path, tmp); 267 strcat(library_path, PACKAGE_DIR BIN_DIR); 268 269 GetSystemDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 GetWindowsDirectory(tmp, sizeof(tmp)); 274 strcat(library_path, ";"); 275 strcat(library_path, tmp); 276 277 if (path_str) { 278 strcat(library_path, ";"); 279 strcat(library_path, path_str); 280 } 281 282 strcat(library_path, ";."); 283 284 Arguments::set_library_path(library_path); 285 FREE_C_HEAP_ARRAY(char, library_path); 286 } 287 288 // Default extensions directory 289 { 290 char path[MAX_PATH]; 291 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 292 GetWindowsDirectory(path, MAX_PATH); 293 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 294 path, PACKAGE_DIR, EXT_DIR); 295 Arguments::set_ext_dirs(buf); 296 } 297 #undef EXT_DIR 298 #undef BIN_DIR 299 #undef PACKAGE_DIR 300 301 #ifndef _WIN64 302 // set our UnhandledExceptionFilter and save any previous one 303 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 304 #endif 305 306 // Done 307 return; 308 } 309 310 void os::breakpoint() { 311 DebugBreak(); 312 } 313 314 // Invoked from the BREAKPOINT Macro 315 extern "C" void breakpoint() { 316 os::breakpoint(); 317 } 318 319 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 320 // So far, this method is only used by Native Memory Tracking, which is 321 // only supported on Windows XP or later. 322 // 323 int os::get_native_stack(address* stack, int frames, int toSkip) { 324 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 325 for (int index = captured; index < frames; index ++) { 326 stack[index] = NULL; 327 } 328 return captured; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) { 350 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 351 if (stack_bottom == (address)minfo.AllocationBase) { 352 stack_size += minfo.RegionSize; 353 } else { 354 break; 355 } 356 } 357 return stack_bottom + stack_size; 358 } 359 360 size_t os::current_stack_size() { 361 size_t sz; 362 MEMORY_BASIC_INFORMATION minfo; 363 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 364 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 365 return sz; 366 } 367 368 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 369 const struct tm* time_struct_ptr = localtime(clock); 370 if (time_struct_ptr != NULL) { 371 *res = *time_struct_ptr; 372 return res; 373 } 374 return NULL; 375 } 376 377 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 378 const struct tm* time_struct_ptr = gmtime(clock); 379 if (time_struct_ptr != NULL) { 380 *res = *time_struct_ptr; 381 return res; 382 } 383 return NULL; 384 } 385 386 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 387 388 // Thread start routine for all newly created threads 389 static unsigned __stdcall thread_native_entry(Thread* thread) { 390 // Try to randomize the cache line index of hot stack frames. 391 // This helps when threads of the same stack traces evict each other's 392 // cache lines. The threads can be either from the same JVM instance, or 393 // from different JVM instances. The benefit is especially true for 394 // processors with hyperthreading technology. 395 static int counter = 0; 396 int pid = os::current_process_id(); 397 _alloca(((pid ^ counter++) & 7) * 128); 398 399 thread->initialize_thread_current(); 400 401 OSThread* osthr = thread->osthread(); 402 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 403 404 if (UseNUMA) { 405 int lgrp_id = os::numa_get_group_id(); 406 if (lgrp_id != -1) { 407 thread->set_lgrp_id(lgrp_id); 408 } 409 } 410 411 // Diagnostic code to investigate JDK-6573254 412 int res = 30115; // non-java thread 413 if (thread->is_Java_thread()) { 414 res = 20115; // java thread 415 } 416 417 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 418 419 // Install a win32 structured exception handler around every thread created 420 // by VM, so VM can generate error dump when an exception occurred in non- 421 // Java thread (e.g. VM thread). 422 __try { 423 thread->run(); 424 } __except(topLevelExceptionFilter( 425 (_EXCEPTION_POINTERS*)_exception_info())) { 426 // Nothing to do. 427 } 428 429 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 430 431 // One less thread is executing 432 // When the VMThread gets here, the main thread may have already exited 433 // which frees the CodeHeap containing the Atomic::add code 434 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 435 Atomic::dec(&os::win32::_os_thread_count); 436 } 437 438 // If a thread has not deleted itself ("delete this") as part of its 439 // termination sequence, we have to ensure thread-local-storage is 440 // cleared before we actually terminate. No threads should ever be 441 // deleted asynchronously with respect to their termination. 442 if (Thread::current_or_null_safe() != NULL) { 443 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 444 thread->clear_thread_current(); 445 } 446 447 // Thread must not return from exit_process_or_thread(), but if it does, 448 // let it proceed to exit normally 449 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 450 } 451 452 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 453 int thread_id) { 454 // Allocate the OSThread object 455 OSThread* osthread = new OSThread(NULL, NULL); 456 if (osthread == NULL) return NULL; 457 458 // Initialize support for Java interrupts 459 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 460 if (interrupt_event == NULL) { 461 delete osthread; 462 return NULL; 463 } 464 osthread->set_interrupt_event(interrupt_event); 465 466 // Store info on the Win32 thread into the OSThread 467 osthread->set_thread_handle(thread_handle); 468 osthread->set_thread_id(thread_id); 469 470 if (UseNUMA) { 471 int lgrp_id = os::numa_get_group_id(); 472 if (lgrp_id != -1) { 473 thread->set_lgrp_id(lgrp_id); 474 } 475 } 476 477 // Initial thread state is INITIALIZED, not SUSPENDED 478 osthread->set_state(INITIALIZED); 479 480 return osthread; 481 } 482 483 484 bool os::create_attached_thread(JavaThread* thread) { 485 #ifdef ASSERT 486 thread->verify_not_published(); 487 #endif 488 HANDLE thread_h; 489 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 490 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 491 fatal("DuplicateHandle failed\n"); 492 } 493 OSThread* osthread = create_os_thread(thread, thread_h, 494 (int)current_thread_id()); 495 if (osthread == NULL) { 496 return false; 497 } 498 499 // Initial thread state is RUNNABLE 500 osthread->set_state(RUNNABLE); 501 502 thread->set_osthread(osthread); 503 504 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 505 os::current_thread_id()); 506 507 return true; 508 } 509 510 bool os::create_main_thread(JavaThread* thread) { 511 #ifdef ASSERT 512 thread->verify_not_published(); 513 #endif 514 if (_starting_thread == NULL) { 515 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 516 if (_starting_thread == NULL) { 517 return false; 518 } 519 } 520 521 // The primordial thread is runnable from the start) 522 _starting_thread->set_state(RUNNABLE); 523 524 thread->set_osthread(_starting_thread); 525 return true; 526 } 527 528 // Helper function to trace _beginthreadex attributes, 529 // similar to os::Posix::describe_pthread_attr() 530 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 531 size_t stacksize, unsigned initflag) { 532 stringStream ss(buf, buflen); 533 if (stacksize == 0) { 534 ss.print("stacksize: default, "); 535 } else { 536 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 537 } 538 ss.print("flags: "); 539 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 540 #define ALL(X) \ 541 X(CREATE_SUSPENDED) \ 542 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 543 ALL(PRINT_FLAG) 544 #undef ALL 545 #undef PRINT_FLAG 546 return buf; 547 } 548 549 // Allocate and initialize a new OSThread 550 bool os::create_thread(Thread* thread, ThreadType thr_type, 551 size_t stack_size) { 552 unsigned thread_id; 553 554 // Allocate the OSThread object 555 OSThread* osthread = new OSThread(NULL, NULL); 556 if (osthread == NULL) { 557 return false; 558 } 559 560 // Initialize support for Java interrupts 561 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 562 if (interrupt_event == NULL) { 563 delete osthread; 564 return NULL; 565 } 566 osthread->set_interrupt_event(interrupt_event); 567 osthread->set_interrupted(false); 568 569 thread->set_osthread(osthread); 570 571 if (stack_size == 0) { 572 switch (thr_type) { 573 case os::java_thread: 574 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 575 if (JavaThread::stack_size_at_create() > 0) { 576 stack_size = JavaThread::stack_size_at_create(); 577 } 578 break; 579 case os::compiler_thread: 580 if (CompilerThreadStackSize > 0) { 581 stack_size = (size_t)(CompilerThreadStackSize * K); 582 break; 583 } // else fall through: 584 // use VMThreadStackSize if CompilerThreadStackSize is not defined 585 case os::vm_thread: 586 case os::pgc_thread: 587 case os::cgc_thread: 588 case os::watcher_thread: 589 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 590 break; 591 } 592 } 593 594 // Create the Win32 thread 595 // 596 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 597 // does not specify stack size. Instead, it specifies the size of 598 // initially committed space. The stack size is determined by 599 // PE header in the executable. If the committed "stack_size" is larger 600 // than default value in the PE header, the stack is rounded up to the 601 // nearest multiple of 1MB. For example if the launcher has default 602 // stack size of 320k, specifying any size less than 320k does not 603 // affect the actual stack size at all, it only affects the initial 604 // commitment. On the other hand, specifying 'stack_size' larger than 605 // default value may cause significant increase in memory usage, because 606 // not only the stack space will be rounded up to MB, but also the 607 // entire space is committed upfront. 608 // 609 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 610 // for CreateThread() that can treat 'stack_size' as stack size. However we 611 // are not supposed to call CreateThread() directly according to MSDN 612 // document because JVM uses C runtime library. The good news is that the 613 // flag appears to work with _beginthredex() as well. 614 615 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 616 HANDLE thread_handle = 617 (HANDLE)_beginthreadex(NULL, 618 (unsigned)stack_size, 619 (unsigned (__stdcall *)(void*)) thread_native_entry, 620 thread, 621 initflag, 622 &thread_id); 623 624 char buf[64]; 625 if (thread_handle != NULL) { 626 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 627 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 628 } else { 629 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 630 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 631 } 632 633 if (thread_handle == NULL) { 634 // Need to clean up stuff we've allocated so far 635 CloseHandle(osthread->interrupt_event()); 636 thread->set_osthread(NULL); 637 delete osthread; 638 return NULL; 639 } 640 641 Atomic::inc(&os::win32::_os_thread_count); 642 643 // Store info on the Win32 thread into the OSThread 644 osthread->set_thread_handle(thread_handle); 645 osthread->set_thread_id(thread_id); 646 647 // Initial thread state is INITIALIZED, not SUSPENDED 648 osthread->set_state(INITIALIZED); 649 650 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 651 return true; 652 } 653 654 655 // Free Win32 resources related to the OSThread 656 void os::free_thread(OSThread* osthread) { 657 assert(osthread != NULL, "osthread not set"); 658 659 // We are told to free resources of the argument thread, 660 // but we can only really operate on the current thread. 661 assert(Thread::current()->osthread() == osthread, 662 "os::free_thread but not current thread"); 663 664 CloseHandle(osthread->thread_handle()); 665 CloseHandle(osthread->interrupt_event()); 666 delete osthread; 667 } 668 669 static jlong first_filetime; 670 static jlong initial_performance_count; 671 static jlong performance_frequency; 672 673 674 jlong as_long(LARGE_INTEGER x) { 675 jlong result = 0; // initialization to avoid warning 676 set_high(&result, x.HighPart); 677 set_low(&result, x.LowPart); 678 return result; 679 } 680 681 682 jlong os::elapsed_counter() { 683 LARGE_INTEGER count; 684 QueryPerformanceCounter(&count); 685 return as_long(count) - initial_performance_count; 686 } 687 688 689 jlong os::elapsed_frequency() { 690 return performance_frequency; 691 } 692 693 694 julong os::available_memory() { 695 return win32::available_memory(); 696 } 697 698 julong os::win32::available_memory() { 699 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 700 // value if total memory is larger than 4GB 701 MEMORYSTATUSEX ms; 702 ms.dwLength = sizeof(ms); 703 GlobalMemoryStatusEx(&ms); 704 705 return (julong)ms.ullAvailPhys; 706 } 707 708 julong os::physical_memory() { 709 return win32::physical_memory(); 710 } 711 712 bool os::has_allocatable_memory_limit(julong* limit) { 713 MEMORYSTATUSEX ms; 714 ms.dwLength = sizeof(ms); 715 GlobalMemoryStatusEx(&ms); 716 #ifdef _LP64 717 *limit = (julong)ms.ullAvailVirtual; 718 return true; 719 #else 720 // Limit to 1400m because of the 2gb address space wall 721 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 722 return true; 723 #endif 724 } 725 726 int os::active_processor_count() { 727 // User has overridden the number of active processors 728 if (ActiveProcessorCount > 0) { 729 log_trace(os)("active_processor_count: " 730 "active processor count set by user : %d", 731 ActiveProcessorCount); 732 return ActiveProcessorCount; 733 } 734 735 DWORD_PTR lpProcessAffinityMask = 0; 736 DWORD_PTR lpSystemAffinityMask = 0; 737 int proc_count = processor_count(); 738 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 739 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 740 // Nof active processors is number of bits in process affinity mask 741 int bitcount = 0; 742 while (lpProcessAffinityMask != 0) { 743 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 744 bitcount++; 745 } 746 return bitcount; 747 } else { 748 return proc_count; 749 } 750 } 751 752 void os::set_native_thread_name(const char *name) { 753 754 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 755 // 756 // Note that unfortunately this only works if the process 757 // is already attached to a debugger; debugger must observe 758 // the exception below to show the correct name. 759 760 // If there is no debugger attached skip raising the exception 761 if (!IsDebuggerPresent()) { 762 return; 763 } 764 765 const DWORD MS_VC_EXCEPTION = 0x406D1388; 766 struct { 767 DWORD dwType; // must be 0x1000 768 LPCSTR szName; // pointer to name (in user addr space) 769 DWORD dwThreadID; // thread ID (-1=caller thread) 770 DWORD dwFlags; // reserved for future use, must be zero 771 } info; 772 773 info.dwType = 0x1000; 774 info.szName = name; 775 info.dwThreadID = -1; 776 info.dwFlags = 0; 777 778 __try { 779 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 780 } __except(EXCEPTION_EXECUTE_HANDLER) {} 781 } 782 783 bool os::distribute_processes(uint length, uint* distribution) { 784 // Not yet implemented. 785 return false; 786 } 787 788 bool os::bind_to_processor(uint processor_id) { 789 // Not yet implemented. 790 return false; 791 } 792 793 void os::win32::initialize_performance_counter() { 794 LARGE_INTEGER count; 795 QueryPerformanceFrequency(&count); 796 performance_frequency = as_long(count); 797 QueryPerformanceCounter(&count); 798 initial_performance_count = as_long(count); 799 } 800 801 802 double os::elapsedTime() { 803 return (double) elapsed_counter() / (double) elapsed_frequency(); 804 } 805 806 807 // Windows format: 808 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 809 // Java format: 810 // Java standards require the number of milliseconds since 1/1/1970 811 812 // Constant offset - calculated using offset() 813 static jlong _offset = 116444736000000000; 814 // Fake time counter for reproducible results when debugging 815 static jlong fake_time = 0; 816 817 #ifdef ASSERT 818 // Just to be safe, recalculate the offset in debug mode 819 static jlong _calculated_offset = 0; 820 static int _has_calculated_offset = 0; 821 822 jlong offset() { 823 if (_has_calculated_offset) return _calculated_offset; 824 SYSTEMTIME java_origin; 825 java_origin.wYear = 1970; 826 java_origin.wMonth = 1; 827 java_origin.wDayOfWeek = 0; // ignored 828 java_origin.wDay = 1; 829 java_origin.wHour = 0; 830 java_origin.wMinute = 0; 831 java_origin.wSecond = 0; 832 java_origin.wMilliseconds = 0; 833 FILETIME jot; 834 if (!SystemTimeToFileTime(&java_origin, &jot)) { 835 fatal("Error = %d\nWindows error", GetLastError()); 836 } 837 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 838 _has_calculated_offset = 1; 839 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 840 return _calculated_offset; 841 } 842 #else 843 jlong offset() { 844 return _offset; 845 } 846 #endif 847 848 jlong windows_to_java_time(FILETIME wt) { 849 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 850 return (a - offset()) / 10000; 851 } 852 853 // Returns time ticks in (10th of micro seconds) 854 jlong windows_to_time_ticks(FILETIME wt) { 855 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 856 return (a - offset()); 857 } 858 859 FILETIME java_to_windows_time(jlong l) { 860 jlong a = (l * 10000) + offset(); 861 FILETIME result; 862 result.dwHighDateTime = high(a); 863 result.dwLowDateTime = low(a); 864 return result; 865 } 866 867 bool os::supports_vtime() { return true; } 868 bool os::enable_vtime() { return false; } 869 bool os::vtime_enabled() { return false; } 870 871 double os::elapsedVTime() { 872 FILETIME created; 873 FILETIME exited; 874 FILETIME kernel; 875 FILETIME user; 876 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 877 // the resolution of windows_to_java_time() should be sufficient (ms) 878 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 879 } else { 880 return elapsedTime(); 881 } 882 } 883 884 jlong os::javaTimeMillis() { 885 if (UseFakeTimers) { 886 return fake_time++; 887 } else { 888 FILETIME wt; 889 GetSystemTimeAsFileTime(&wt); 890 return windows_to_java_time(wt); 891 } 892 } 893 894 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 895 FILETIME wt; 896 GetSystemTimeAsFileTime(&wt); 897 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 898 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 899 seconds = secs; 900 nanos = jlong(ticks - (secs*10000000)) * 100; 901 } 902 903 jlong os::javaTimeNanos() { 904 LARGE_INTEGER current_count; 905 QueryPerformanceCounter(¤t_count); 906 double current = as_long(current_count); 907 double freq = performance_frequency; 908 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 909 return time; 910 } 911 912 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 913 jlong freq = performance_frequency; 914 if (freq < NANOSECS_PER_SEC) { 915 // the performance counter is 64 bits and we will 916 // be multiplying it -- so no wrap in 64 bits 917 info_ptr->max_value = ALL_64_BITS; 918 } else if (freq > NANOSECS_PER_SEC) { 919 // use the max value the counter can reach to 920 // determine the max value which could be returned 921 julong max_counter = (julong)ALL_64_BITS; 922 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 923 } else { 924 // the performance counter is 64 bits and we will 925 // be using it directly -- so no wrap in 64 bits 926 info_ptr->max_value = ALL_64_BITS; 927 } 928 929 // using a counter, so no skipping 930 info_ptr->may_skip_backward = false; 931 info_ptr->may_skip_forward = false; 932 933 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 934 } 935 936 char* os::local_time_string(char *buf, size_t buflen) { 937 SYSTEMTIME st; 938 GetLocalTime(&st); 939 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 940 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 941 return buf; 942 } 943 944 bool os::getTimesSecs(double* process_real_time, 945 double* process_user_time, 946 double* process_system_time) { 947 HANDLE h_process = GetCurrentProcess(); 948 FILETIME create_time, exit_time, kernel_time, user_time; 949 BOOL result = GetProcessTimes(h_process, 950 &create_time, 951 &exit_time, 952 &kernel_time, 953 &user_time); 954 if (result != 0) { 955 FILETIME wt; 956 GetSystemTimeAsFileTime(&wt); 957 jlong rtc_millis = windows_to_java_time(wt); 958 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 959 *process_user_time = 960 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 961 *process_system_time = 962 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 963 return true; 964 } else { 965 return false; 966 } 967 } 968 969 void os::shutdown() { 970 // allow PerfMemory to attempt cleanup of any persistent resources 971 perfMemory_exit(); 972 973 // flush buffered output, finish log files 974 ostream_abort(); 975 976 // Check for abort hook 977 abort_hook_t abort_hook = Arguments::abort_hook(); 978 if (abort_hook != NULL) { 979 abort_hook(); 980 } 981 } 982 983 984 static HANDLE dumpFile = NULL; 985 986 // Check if dump file can be created. 987 void os::check_dump_limit(char* buffer, size_t buffsz) { 988 bool status = true; 989 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 990 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 991 status = false; 992 } 993 994 #ifndef ASSERT 995 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 996 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 997 status = false; 998 } 999 #endif 1000 1001 if (status) { 1002 const char* cwd = get_current_directory(NULL, 0); 1003 int pid = current_process_id(); 1004 if (cwd != NULL) { 1005 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1006 } else { 1007 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1008 } 1009 1010 if (dumpFile == NULL && 1011 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1012 == INVALID_HANDLE_VALUE) { 1013 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1014 status = false; 1015 } 1016 } 1017 VMError::record_coredump_status(buffer, status); 1018 } 1019 1020 void os::abort(bool dump_core, void* siginfo, const void* context) { 1021 EXCEPTION_POINTERS ep; 1022 MINIDUMP_EXCEPTION_INFORMATION mei; 1023 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1024 1025 HANDLE hProcess = GetCurrentProcess(); 1026 DWORD processId = GetCurrentProcessId(); 1027 MINIDUMP_TYPE dumpType; 1028 1029 shutdown(); 1030 if (!dump_core || dumpFile == NULL) { 1031 if (dumpFile != NULL) { 1032 CloseHandle(dumpFile); 1033 } 1034 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1035 } 1036 1037 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1038 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1039 1040 if (siginfo != NULL && context != NULL) { 1041 ep.ContextRecord = (PCONTEXT) context; 1042 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1043 1044 mei.ThreadId = GetCurrentThreadId(); 1045 mei.ExceptionPointers = &ep; 1046 pmei = &mei; 1047 } else { 1048 pmei = NULL; 1049 } 1050 1051 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1052 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1053 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1054 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1055 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1056 } 1057 CloseHandle(dumpFile); 1058 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1059 } 1060 1061 // Die immediately, no exit hook, no abort hook, no cleanup. 1062 void os::die() { 1063 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1064 } 1065 1066 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1067 // * dirent_md.c 1.15 00/02/02 1068 // 1069 // The declarations for DIR and struct dirent are in jvm_win32.h. 1070 1071 // Caller must have already run dirname through JVM_NativePath, which removes 1072 // duplicate slashes and converts all instances of '/' into '\\'. 1073 1074 DIR * os::opendir(const char *dirname) { 1075 assert(dirname != NULL, "just checking"); // hotspot change 1076 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1077 DWORD fattr; // hotspot change 1078 char alt_dirname[4] = { 0, 0, 0, 0 }; 1079 1080 if (dirp == 0) { 1081 errno = ENOMEM; 1082 return 0; 1083 } 1084 1085 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1086 // as a directory in FindFirstFile(). We detect this case here and 1087 // prepend the current drive name. 1088 // 1089 if (dirname[1] == '\0' && dirname[0] == '\\') { 1090 alt_dirname[0] = _getdrive() + 'A' - 1; 1091 alt_dirname[1] = ':'; 1092 alt_dirname[2] = '\\'; 1093 alt_dirname[3] = '\0'; 1094 dirname = alt_dirname; 1095 } 1096 1097 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1098 if (dirp->path == 0) { 1099 free(dirp); 1100 errno = ENOMEM; 1101 return 0; 1102 } 1103 strcpy(dirp->path, dirname); 1104 1105 fattr = GetFileAttributes(dirp->path); 1106 if (fattr == 0xffffffff) { 1107 free(dirp->path); 1108 free(dirp); 1109 errno = ENOENT; 1110 return 0; 1111 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1112 free(dirp->path); 1113 free(dirp); 1114 errno = ENOTDIR; 1115 return 0; 1116 } 1117 1118 // Append "*.*", or possibly "\\*.*", to path 1119 if (dirp->path[1] == ':' && 1120 (dirp->path[2] == '\0' || 1121 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1122 // No '\\' needed for cases like "Z:" or "Z:\" 1123 strcat(dirp->path, "*.*"); 1124 } else { 1125 strcat(dirp->path, "\\*.*"); 1126 } 1127 1128 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1129 if (dirp->handle == INVALID_HANDLE_VALUE) { 1130 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1131 free(dirp->path); 1132 free(dirp); 1133 errno = EACCES; 1134 return 0; 1135 } 1136 } 1137 return dirp; 1138 } 1139 1140 // parameter dbuf unused on Windows 1141 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1142 assert(dirp != NULL, "just checking"); // hotspot change 1143 if (dirp->handle == INVALID_HANDLE_VALUE) { 1144 return 0; 1145 } 1146 1147 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1148 1149 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1150 if (GetLastError() == ERROR_INVALID_HANDLE) { 1151 errno = EBADF; 1152 return 0; 1153 } 1154 FindClose(dirp->handle); 1155 dirp->handle = INVALID_HANDLE_VALUE; 1156 } 1157 1158 return &dirp->dirent; 1159 } 1160 1161 int os::closedir(DIR *dirp) { 1162 assert(dirp != NULL, "just checking"); // hotspot change 1163 if (dirp->handle != INVALID_HANDLE_VALUE) { 1164 if (!FindClose(dirp->handle)) { 1165 errno = EBADF; 1166 return -1; 1167 } 1168 dirp->handle = INVALID_HANDLE_VALUE; 1169 } 1170 free(dirp->path); 1171 free(dirp); 1172 return 0; 1173 } 1174 1175 // This must be hard coded because it's the system's temporary 1176 // directory not the java application's temp directory, ala java.io.tmpdir. 1177 const char* os::get_temp_directory() { 1178 static char path_buf[MAX_PATH]; 1179 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1180 return path_buf; 1181 } else { 1182 path_buf[0] = '\0'; 1183 return path_buf; 1184 } 1185 } 1186 1187 // Needs to be in os specific directory because windows requires another 1188 // header file <direct.h> 1189 const char* os::get_current_directory(char *buf, size_t buflen) { 1190 int n = static_cast<int>(buflen); 1191 if (buflen > INT_MAX) n = INT_MAX; 1192 return _getcwd(buf, n); 1193 } 1194 1195 //----------------------------------------------------------- 1196 // Helper functions for fatal error handler 1197 #ifdef _WIN64 1198 // Helper routine which returns true if address in 1199 // within the NTDLL address space. 1200 // 1201 static bool _addr_in_ntdll(address addr) { 1202 HMODULE hmod; 1203 MODULEINFO minfo; 1204 1205 hmod = GetModuleHandle("NTDLL.DLL"); 1206 if (hmod == NULL) return false; 1207 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1208 &minfo, sizeof(MODULEINFO))) { 1209 return false; 1210 } 1211 1212 if ((addr >= minfo.lpBaseOfDll) && 1213 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1214 return true; 1215 } else { 1216 return false; 1217 } 1218 } 1219 #endif 1220 1221 struct _modinfo { 1222 address addr; 1223 char* full_path; // point to a char buffer 1224 int buflen; // size of the buffer 1225 address base_addr; 1226 }; 1227 1228 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1229 address top_address, void * param) { 1230 struct _modinfo *pmod = (struct _modinfo *)param; 1231 if (!pmod) return -1; 1232 1233 if (base_addr <= pmod->addr && 1234 top_address > pmod->addr) { 1235 // if a buffer is provided, copy path name to the buffer 1236 if (pmod->full_path) { 1237 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1238 } 1239 pmod->base_addr = base_addr; 1240 return 1; 1241 } 1242 return 0; 1243 } 1244 1245 bool os::dll_address_to_library_name(address addr, char* buf, 1246 int buflen, int* offset) { 1247 // buf is not optional, but offset is optional 1248 assert(buf != NULL, "sanity check"); 1249 1250 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1251 // return the full path to the DLL file, sometimes it returns path 1252 // to the corresponding PDB file (debug info); sometimes it only 1253 // returns partial path, which makes life painful. 1254 1255 struct _modinfo mi; 1256 mi.addr = addr; 1257 mi.full_path = buf; 1258 mi.buflen = buflen; 1259 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1260 // buf already contains path name 1261 if (offset) *offset = addr - mi.base_addr; 1262 return true; 1263 } 1264 1265 buf[0] = '\0'; 1266 if (offset) *offset = -1; 1267 return false; 1268 } 1269 1270 bool os::dll_address_to_function_name(address addr, char *buf, 1271 int buflen, int *offset, 1272 bool demangle) { 1273 // buf is not optional, but offset is optional 1274 assert(buf != NULL, "sanity check"); 1275 1276 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1277 return true; 1278 } 1279 if (offset != NULL) *offset = -1; 1280 buf[0] = '\0'; 1281 return false; 1282 } 1283 1284 // save the start and end address of jvm.dll into param[0] and param[1] 1285 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1286 address top_address, void * param) { 1287 if (!param) return -1; 1288 1289 if (base_addr <= (address)_locate_jvm_dll && 1290 top_address > (address)_locate_jvm_dll) { 1291 ((address*)param)[0] = base_addr; 1292 ((address*)param)[1] = top_address; 1293 return 1; 1294 } 1295 return 0; 1296 } 1297 1298 address vm_lib_location[2]; // start and end address of jvm.dll 1299 1300 // check if addr is inside jvm.dll 1301 bool os::address_is_in_vm(address addr) { 1302 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1303 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1304 assert(false, "Can't find jvm module."); 1305 return false; 1306 } 1307 } 1308 1309 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1310 } 1311 1312 // print module info; param is outputStream* 1313 static int _print_module(const char* fname, address base_address, 1314 address top_address, void* param) { 1315 if (!param) return -1; 1316 1317 outputStream* st = (outputStream*)param; 1318 1319 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1320 return 0; 1321 } 1322 1323 // Loads .dll/.so and 1324 // in case of error it checks if .dll/.so was built for the 1325 // same architecture as Hotspot is running on 1326 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1327 void * result = LoadLibrary(name); 1328 if (result != NULL) { 1329 // Recalculate pdb search path if a DLL was loaded successfully. 1330 SymbolEngine::recalc_search_path(); 1331 return result; 1332 } 1333 1334 DWORD errcode = GetLastError(); 1335 if (errcode == ERROR_MOD_NOT_FOUND) { 1336 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1337 ebuf[ebuflen - 1] = '\0'; 1338 return NULL; 1339 } 1340 1341 // Parsing dll below 1342 // If we can read dll-info and find that dll was built 1343 // for an architecture other than Hotspot is running in 1344 // - then print to buffer "DLL was built for a different architecture" 1345 // else call os::lasterror to obtain system error message 1346 1347 // Read system error message into ebuf 1348 // It may or may not be overwritten below (in the for loop and just above) 1349 lasterror(ebuf, (size_t) ebuflen); 1350 ebuf[ebuflen - 1] = '\0'; 1351 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1352 if (fd < 0) { 1353 return NULL; 1354 } 1355 1356 uint32_t signature_offset; 1357 uint16_t lib_arch = 0; 1358 bool failed_to_get_lib_arch = 1359 ( // Go to position 3c in the dll 1360 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1361 || 1362 // Read location of signature 1363 (sizeof(signature_offset) != 1364 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1365 || 1366 // Go to COFF File Header in dll 1367 // that is located after "signature" (4 bytes long) 1368 (os::seek_to_file_offset(fd, 1369 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1370 || 1371 // Read field that contains code of architecture 1372 // that dll was built for 1373 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1374 ); 1375 1376 ::close(fd); 1377 if (failed_to_get_lib_arch) { 1378 // file i/o error - report os::lasterror(...) msg 1379 return NULL; 1380 } 1381 1382 typedef struct { 1383 uint16_t arch_code; 1384 char* arch_name; 1385 } arch_t; 1386 1387 static const arch_t arch_array[] = { 1388 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1389 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1390 }; 1391 #if (defined _M_AMD64) 1392 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1393 #elif (defined _M_IX86) 1394 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1395 #else 1396 #error Method os::dll_load requires that one of following \ 1397 is defined :_M_AMD64 or _M_IX86 1398 #endif 1399 1400 1401 // Obtain a string for printf operation 1402 // lib_arch_str shall contain string what platform this .dll was built for 1403 // running_arch_str shall string contain what platform Hotspot was built for 1404 char *running_arch_str = NULL, *lib_arch_str = NULL; 1405 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1406 if (lib_arch == arch_array[i].arch_code) { 1407 lib_arch_str = arch_array[i].arch_name; 1408 } 1409 if (running_arch == arch_array[i].arch_code) { 1410 running_arch_str = arch_array[i].arch_name; 1411 } 1412 } 1413 1414 assert(running_arch_str, 1415 "Didn't find running architecture code in arch_array"); 1416 1417 // If the architecture is right 1418 // but some other error took place - report os::lasterror(...) msg 1419 if (lib_arch == running_arch) { 1420 return NULL; 1421 } 1422 1423 if (lib_arch_str != NULL) { 1424 ::_snprintf(ebuf, ebuflen - 1, 1425 "Can't load %s-bit .dll on a %s-bit platform", 1426 lib_arch_str, running_arch_str); 1427 } else { 1428 // don't know what architecture this dll was build for 1429 ::_snprintf(ebuf, ebuflen - 1, 1430 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1431 lib_arch, running_arch_str); 1432 } 1433 1434 return NULL; 1435 } 1436 1437 void os::print_dll_info(outputStream *st) { 1438 st->print_cr("Dynamic libraries:"); 1439 get_loaded_modules_info(_print_module, (void *)st); 1440 } 1441 1442 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1443 HANDLE hProcess; 1444 1445 # define MAX_NUM_MODULES 128 1446 HMODULE modules[MAX_NUM_MODULES]; 1447 static char filename[MAX_PATH]; 1448 int result = 0; 1449 1450 int pid = os::current_process_id(); 1451 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1452 FALSE, pid); 1453 if (hProcess == NULL) return 0; 1454 1455 DWORD size_needed; 1456 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1457 CloseHandle(hProcess); 1458 return 0; 1459 } 1460 1461 // number of modules that are currently loaded 1462 int num_modules = size_needed / sizeof(HMODULE); 1463 1464 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1465 // Get Full pathname: 1466 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1467 filename[0] = '\0'; 1468 } 1469 1470 MODULEINFO modinfo; 1471 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1472 modinfo.lpBaseOfDll = NULL; 1473 modinfo.SizeOfImage = 0; 1474 } 1475 1476 // Invoke callback function 1477 result = callback(filename, (address)modinfo.lpBaseOfDll, 1478 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1479 if (result) break; 1480 } 1481 1482 CloseHandle(hProcess); 1483 return result; 1484 } 1485 1486 bool os::get_host_name(char* buf, size_t buflen) { 1487 DWORD size = (DWORD)buflen; 1488 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1489 } 1490 1491 void os::get_summary_os_info(char* buf, size_t buflen) { 1492 stringStream sst(buf, buflen); 1493 os::win32::print_windows_version(&sst); 1494 // chop off newline character 1495 char* nl = strchr(buf, '\n'); 1496 if (nl != NULL) *nl = '\0'; 1497 } 1498 1499 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1500 #if _MSC_VER >= 1900 1501 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1502 int result = ::vsnprintf(buf, len, fmt, args); 1503 // If an encoding error occurred (result < 0) then it's not clear 1504 // whether the buffer is NUL terminated, so ensure it is. 1505 if ((result < 0) && (len > 0)) { 1506 buf[len - 1] = '\0'; 1507 } 1508 return result; 1509 #else 1510 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1511 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1512 // versions. However, when len == 0, avoid _vsnprintf too, and just 1513 // go straight to _vscprintf. The output is going to be truncated in 1514 // that case, except in the unusual case of empty output. More 1515 // importantly, the documentation for various versions of Visual Studio 1516 // are inconsistent about the behavior of _vsnprintf when len == 0, 1517 // including it possibly being an error. 1518 int result = -1; 1519 if (len > 0) { 1520 result = _vsnprintf(buf, len, fmt, args); 1521 // If output (including NUL terminator) is truncated, the buffer 1522 // won't be NUL terminated. Add the trailing NUL specified by C99. 1523 if ((result < 0) || ((size_t)result >= len)) { 1524 buf[len - 1] = '\0'; 1525 } 1526 } 1527 if (result < 0) { 1528 result = _vscprintf(fmt, args); 1529 } 1530 return result; 1531 #endif // _MSC_VER dispatch 1532 } 1533 1534 static inline time_t get_mtime(const char* filename) { 1535 struct stat st; 1536 int ret = os::stat(filename, &st); 1537 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1538 return st.st_mtime; 1539 } 1540 1541 int os::compare_file_modified_times(const char* file1, const char* file2) { 1542 time_t t1 = get_mtime(file1); 1543 time_t t2 = get_mtime(file2); 1544 return t1 - t2; 1545 } 1546 1547 void os::print_os_info_brief(outputStream* st) { 1548 os::print_os_info(st); 1549 } 1550 1551 void os::print_os_info(outputStream* st) { 1552 #ifdef ASSERT 1553 char buffer[1024]; 1554 st->print("HostName: "); 1555 if (get_host_name(buffer, sizeof(buffer))) { 1556 st->print("%s ", buffer); 1557 } else { 1558 st->print("N/A "); 1559 } 1560 #endif 1561 st->print("OS:"); 1562 os::win32::print_windows_version(st); 1563 } 1564 1565 void os::win32::print_windows_version(outputStream* st) { 1566 OSVERSIONINFOEX osvi; 1567 VS_FIXEDFILEINFO *file_info; 1568 TCHAR kernel32_path[MAX_PATH]; 1569 UINT len, ret; 1570 1571 // Use the GetVersionEx information to see if we're on a server or 1572 // workstation edition of Windows. Starting with Windows 8.1 we can't 1573 // trust the OS version information returned by this API. 1574 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1575 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1576 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1577 st->print_cr("Call to GetVersionEx failed"); 1578 return; 1579 } 1580 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1581 1582 // Get the full path to \Windows\System32\kernel32.dll and use that for 1583 // determining what version of Windows we're running on. 1584 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1585 ret = GetSystemDirectory(kernel32_path, len); 1586 if (ret == 0 || ret > len) { 1587 st->print_cr("Call to GetSystemDirectory failed"); 1588 return; 1589 } 1590 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1591 1592 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1593 if (version_size == 0) { 1594 st->print_cr("Call to GetFileVersionInfoSize failed"); 1595 return; 1596 } 1597 1598 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1599 if (version_info == NULL) { 1600 st->print_cr("Failed to allocate version_info"); 1601 return; 1602 } 1603 1604 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1605 os::free(version_info); 1606 st->print_cr("Call to GetFileVersionInfo failed"); 1607 return; 1608 } 1609 1610 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1611 os::free(version_info); 1612 st->print_cr("Call to VerQueryValue failed"); 1613 return; 1614 } 1615 1616 int major_version = HIWORD(file_info->dwProductVersionMS); 1617 int minor_version = LOWORD(file_info->dwProductVersionMS); 1618 int build_number = HIWORD(file_info->dwProductVersionLS); 1619 int build_minor = LOWORD(file_info->dwProductVersionLS); 1620 int os_vers = major_version * 1000 + minor_version; 1621 os::free(version_info); 1622 1623 st->print(" Windows "); 1624 switch (os_vers) { 1625 1626 case 6000: 1627 if (is_workstation) { 1628 st->print("Vista"); 1629 } else { 1630 st->print("Server 2008"); 1631 } 1632 break; 1633 1634 case 6001: 1635 if (is_workstation) { 1636 st->print("7"); 1637 } else { 1638 st->print("Server 2008 R2"); 1639 } 1640 break; 1641 1642 case 6002: 1643 if (is_workstation) { 1644 st->print("8"); 1645 } else { 1646 st->print("Server 2012"); 1647 } 1648 break; 1649 1650 case 6003: 1651 if (is_workstation) { 1652 st->print("8.1"); 1653 } else { 1654 st->print("Server 2012 R2"); 1655 } 1656 break; 1657 1658 case 10000: 1659 if (is_workstation) { 1660 st->print("10"); 1661 } else { 1662 st->print("Server 2016"); 1663 } 1664 break; 1665 1666 default: 1667 // Unrecognized windows, print out its major and minor versions 1668 st->print("%d.%d", major_version, minor_version); 1669 break; 1670 } 1671 1672 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1673 // find out whether we are running on 64 bit processor or not 1674 SYSTEM_INFO si; 1675 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1676 GetNativeSystemInfo(&si); 1677 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1678 st->print(" , 64 bit"); 1679 } 1680 1681 st->print(" Build %d", build_number); 1682 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1683 st->cr(); 1684 } 1685 1686 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1687 // Nothing to do for now. 1688 } 1689 1690 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1691 HKEY key; 1692 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1693 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1694 if (status == ERROR_SUCCESS) { 1695 DWORD size = (DWORD)buflen; 1696 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1697 if (status != ERROR_SUCCESS) { 1698 strncpy(buf, "## __CPU__", buflen); 1699 } 1700 RegCloseKey(key); 1701 } else { 1702 // Put generic cpu info to return 1703 strncpy(buf, "## __CPU__", buflen); 1704 } 1705 } 1706 1707 void os::print_memory_info(outputStream* st) { 1708 st->print("Memory:"); 1709 st->print(" %dk page", os::vm_page_size()>>10); 1710 1711 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1712 // value if total memory is larger than 4GB 1713 MEMORYSTATUSEX ms; 1714 ms.dwLength = sizeof(ms); 1715 GlobalMemoryStatusEx(&ms); 1716 1717 st->print(", system-wide physical " INT64_FORMAT "M ", (int64_t) ms.ullTotalPhys >> 20); 1718 st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20); 1719 1720 st->print("TotalPageFile size " INT64_FORMAT "M ", (int64_t) ms.ullTotalPageFile >> 20); 1721 st->print("(AvailPageFile size " INT64_FORMAT "M)", (int64_t) ms.ullAvailPageFile >> 20); 1722 1723 // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders) 1724 #if defined(_M_IX86) 1725 st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ", (int64_t) ms.ullTotalVirtual >> 20); 1726 st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20); 1727 #endif 1728 1729 // extended memory statistics for a process 1730 PROCESS_MEMORY_COUNTERS_EX pmex; 1731 ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX)); 1732 pmex.cb = sizeof(pmex); 1733 GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex)); 1734 1735 st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ", (int64_t) pmex.WorkingSetSize >> 20); 1736 st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20); 1737 1738 st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ", (int64_t) pmex.PrivateUsage >> 20); 1739 st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20); 1740 1741 st->cr(); 1742 } 1743 1744 void os::print_siginfo(outputStream *st, const void* siginfo) { 1745 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1746 st->print("siginfo:"); 1747 1748 char tmp[64]; 1749 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1750 strcpy(tmp, "EXCEPTION_??"); 1751 } 1752 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1753 1754 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1755 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1756 er->NumberParameters >= 2) { 1757 switch (er->ExceptionInformation[0]) { 1758 case 0: st->print(", reading address"); break; 1759 case 1: st->print(", writing address"); break; 1760 case 8: st->print(", data execution prevention violation at address"); break; 1761 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1762 er->ExceptionInformation[0]); 1763 } 1764 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1765 } else { 1766 int num = er->NumberParameters; 1767 if (num > 0) { 1768 st->print(", ExceptionInformation="); 1769 for (int i = 0; i < num; i++) { 1770 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1771 } 1772 } 1773 } 1774 st->cr(); 1775 } 1776 1777 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1778 // do nothing 1779 } 1780 1781 static char saved_jvm_path[MAX_PATH] = {0}; 1782 1783 // Find the full path to the current module, jvm.dll 1784 void os::jvm_path(char *buf, jint buflen) { 1785 // Error checking. 1786 if (buflen < MAX_PATH) { 1787 assert(false, "must use a large-enough buffer"); 1788 buf[0] = '\0'; 1789 return; 1790 } 1791 // Lazy resolve the path to current module. 1792 if (saved_jvm_path[0] != 0) { 1793 strcpy(buf, saved_jvm_path); 1794 return; 1795 } 1796 1797 buf[0] = '\0'; 1798 if (Arguments::sun_java_launcher_is_altjvm()) { 1799 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1800 // for a JAVA_HOME environment variable and fix up the path so it 1801 // looks like jvm.dll is installed there (append a fake suffix 1802 // hotspot/jvm.dll). 1803 char* java_home_var = ::getenv("JAVA_HOME"); 1804 if (java_home_var != NULL && java_home_var[0] != 0 && 1805 strlen(java_home_var) < (size_t)buflen) { 1806 strncpy(buf, java_home_var, buflen); 1807 1808 // determine if this is a legacy image or modules image 1809 // modules image doesn't have "jre" subdirectory 1810 size_t len = strlen(buf); 1811 char* jrebin_p = buf + len; 1812 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1813 if (0 != _access(buf, 0)) { 1814 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1815 } 1816 len = strlen(buf); 1817 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1818 } 1819 } 1820 1821 if (buf[0] == '\0') { 1822 GetModuleFileName(vm_lib_handle, buf, buflen); 1823 } 1824 strncpy(saved_jvm_path, buf, MAX_PATH); 1825 saved_jvm_path[MAX_PATH - 1] = '\0'; 1826 } 1827 1828 1829 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1830 #ifndef _WIN64 1831 st->print("_"); 1832 #endif 1833 } 1834 1835 1836 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1837 #ifndef _WIN64 1838 st->print("@%d", args_size * sizeof(int)); 1839 #endif 1840 } 1841 1842 // This method is a copy of JDK's sysGetLastErrorString 1843 // from src/windows/hpi/src/system_md.c 1844 1845 size_t os::lasterror(char* buf, size_t len) { 1846 DWORD errval; 1847 1848 if ((errval = GetLastError()) != 0) { 1849 // DOS error 1850 size_t n = (size_t)FormatMessage( 1851 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1852 NULL, 1853 errval, 1854 0, 1855 buf, 1856 (DWORD)len, 1857 NULL); 1858 if (n > 3) { 1859 // Drop final '.', CR, LF 1860 if (buf[n - 1] == '\n') n--; 1861 if (buf[n - 1] == '\r') n--; 1862 if (buf[n - 1] == '.') n--; 1863 buf[n] = '\0'; 1864 } 1865 return n; 1866 } 1867 1868 if (errno != 0) { 1869 // C runtime error that has no corresponding DOS error code 1870 const char* s = os::strerror(errno); 1871 size_t n = strlen(s); 1872 if (n >= len) n = len - 1; 1873 strncpy(buf, s, n); 1874 buf[n] = '\0'; 1875 return n; 1876 } 1877 1878 return 0; 1879 } 1880 1881 int os::get_last_error() { 1882 DWORD error = GetLastError(); 1883 if (error == 0) { 1884 error = errno; 1885 } 1886 return (int)error; 1887 } 1888 1889 // sun.misc.Signal 1890 // NOTE that this is a workaround for an apparent kernel bug where if 1891 // a signal handler for SIGBREAK is installed then that signal handler 1892 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1893 // See bug 4416763. 1894 static void (*sigbreakHandler)(int) = NULL; 1895 1896 static void UserHandler(int sig, void *siginfo, void *context) { 1897 os::signal_notify(sig); 1898 // We need to reinstate the signal handler each time... 1899 os::signal(sig, (void*)UserHandler); 1900 } 1901 1902 void* os::user_handler() { 1903 return (void*) UserHandler; 1904 } 1905 1906 void* os::signal(int signal_number, void* handler) { 1907 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1908 void (*oldHandler)(int) = sigbreakHandler; 1909 sigbreakHandler = (void (*)(int)) handler; 1910 return (void*) oldHandler; 1911 } else { 1912 return (void*)::signal(signal_number, (void (*)(int))handler); 1913 } 1914 } 1915 1916 void os::signal_raise(int signal_number) { 1917 raise(signal_number); 1918 } 1919 1920 // The Win32 C runtime library maps all console control events other than ^C 1921 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1922 // logoff, and shutdown events. We therefore install our own console handler 1923 // that raises SIGTERM for the latter cases. 1924 // 1925 static BOOL WINAPI consoleHandler(DWORD event) { 1926 switch (event) { 1927 case CTRL_C_EVENT: 1928 if (VMError::is_error_reported()) { 1929 // Ctrl-C is pressed during error reporting, likely because the error 1930 // handler fails to abort. Let VM die immediately. 1931 os::die(); 1932 } 1933 1934 os::signal_raise(SIGINT); 1935 return TRUE; 1936 break; 1937 case CTRL_BREAK_EVENT: 1938 if (sigbreakHandler != NULL) { 1939 (*sigbreakHandler)(SIGBREAK); 1940 } 1941 return TRUE; 1942 break; 1943 case CTRL_LOGOFF_EVENT: { 1944 // Don't terminate JVM if it is running in a non-interactive session, 1945 // such as a service process. 1946 USEROBJECTFLAGS flags; 1947 HANDLE handle = GetProcessWindowStation(); 1948 if (handle != NULL && 1949 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1950 sizeof(USEROBJECTFLAGS), NULL)) { 1951 // If it is a non-interactive session, let next handler to deal 1952 // with it. 1953 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1954 return FALSE; 1955 } 1956 } 1957 } 1958 case CTRL_CLOSE_EVENT: 1959 case CTRL_SHUTDOWN_EVENT: 1960 os::signal_raise(SIGTERM); 1961 return TRUE; 1962 break; 1963 default: 1964 break; 1965 } 1966 return FALSE; 1967 } 1968 1969 // The following code is moved from os.cpp for making this 1970 // code platform specific, which it is by its very nature. 1971 1972 // Return maximum OS signal used + 1 for internal use only 1973 // Used as exit signal for signal_thread 1974 int os::sigexitnum_pd() { 1975 return NSIG; 1976 } 1977 1978 // a counter for each possible signal value, including signal_thread exit signal 1979 static volatile jint pending_signals[NSIG+1] = { 0 }; 1980 static Semaphore* sig_sem = NULL; 1981 1982 void os::signal_init_pd() { 1983 // Initialize signal structures 1984 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1985 1986 // Initialize signal semaphore 1987 sig_sem = new Semaphore(); 1988 1989 // Programs embedding the VM do not want it to attempt to receive 1990 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1991 // shutdown hooks mechanism introduced in 1.3. For example, when 1992 // the VM is run as part of a Windows NT service (i.e., a servlet 1993 // engine in a web server), the correct behavior is for any console 1994 // control handler to return FALSE, not TRUE, because the OS's 1995 // "final" handler for such events allows the process to continue if 1996 // it is a service (while terminating it if it is not a service). 1997 // To make this behavior uniform and the mechanism simpler, we 1998 // completely disable the VM's usage of these console events if -Xrs 1999 // (=ReduceSignalUsage) is specified. This means, for example, that 2000 // the CTRL-BREAK thread dump mechanism is also disabled in this 2001 // case. See bugs 4323062, 4345157, and related bugs. 2002 2003 if (!ReduceSignalUsage) { 2004 // Add a CTRL-C handler 2005 SetConsoleCtrlHandler(consoleHandler, TRUE); 2006 } 2007 } 2008 2009 void os::signal_notify(int sig) { 2010 if (sig_sem != NULL) { 2011 Atomic::inc(&pending_signals[sig]); 2012 sig_sem->signal(); 2013 } else { 2014 // Signal thread is not created with ReduceSignalUsage and signal_init_pd 2015 // initialization isn't called. 2016 assert(ReduceSignalUsage, "signal semaphore should be created"); 2017 } 2018 } 2019 2020 static int check_pending_signals() { 2021 while (true) { 2022 for (int i = 0; i < NSIG + 1; i++) { 2023 jint n = pending_signals[i]; 2024 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2025 return i; 2026 } 2027 } 2028 JavaThread *thread = JavaThread::current(); 2029 2030 ThreadBlockInVM tbivm(thread); 2031 2032 bool threadIsSuspended; 2033 do { 2034 thread->set_suspend_equivalent(); 2035 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2036 sig_sem->wait(); 2037 2038 // were we externally suspended while we were waiting? 2039 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2040 if (threadIsSuspended) { 2041 // The semaphore has been incremented, but while we were waiting 2042 // another thread suspended us. We don't want to continue running 2043 // while suspended because that would surprise the thread that 2044 // suspended us. 2045 sig_sem->signal(); 2046 2047 thread->java_suspend_self(); 2048 } 2049 } while (threadIsSuspended); 2050 } 2051 } 2052 2053 int os::signal_wait() { 2054 return check_pending_signals(); 2055 } 2056 2057 // Implicit OS exception handling 2058 2059 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2060 address handler) { 2061 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2062 // Save pc in thread 2063 #ifdef _M_AMD64 2064 // Do not blow up if no thread info available. 2065 if (thread) { 2066 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2067 } 2068 // Set pc to handler 2069 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2070 #else 2071 // Do not blow up if no thread info available. 2072 if (thread) { 2073 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2074 } 2075 // Set pc to handler 2076 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2077 #endif 2078 2079 // Continue the execution 2080 return EXCEPTION_CONTINUE_EXECUTION; 2081 } 2082 2083 2084 // Used for PostMortemDump 2085 extern "C" void safepoints(); 2086 extern "C" void find(int x); 2087 extern "C" void events(); 2088 2089 // According to Windows API documentation, an illegal instruction sequence should generate 2090 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2091 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2092 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2093 2094 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2095 2096 // From "Execution Protection in the Windows Operating System" draft 0.35 2097 // Once a system header becomes available, the "real" define should be 2098 // included or copied here. 2099 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2100 2101 // Windows Vista/2008 heap corruption check 2102 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2103 2104 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2105 // C++ compiler contain this error code. Because this is a compiler-generated 2106 // error, the code is not listed in the Win32 API header files. 2107 // The code is actually a cryptic mnemonic device, with the initial "E" 2108 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2109 // ASCII values of "msc". 2110 2111 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2112 2113 #define def_excpt(val) { #val, (val) } 2114 2115 static const struct { char* name; uint number; } exceptlabels[] = { 2116 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2117 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2118 def_excpt(EXCEPTION_BREAKPOINT), 2119 def_excpt(EXCEPTION_SINGLE_STEP), 2120 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2121 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2122 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2123 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2124 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2125 def_excpt(EXCEPTION_FLT_OVERFLOW), 2126 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2127 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2128 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2129 def_excpt(EXCEPTION_INT_OVERFLOW), 2130 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2131 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2132 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2133 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2134 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2135 def_excpt(EXCEPTION_STACK_OVERFLOW), 2136 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2137 def_excpt(EXCEPTION_GUARD_PAGE), 2138 def_excpt(EXCEPTION_INVALID_HANDLE), 2139 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2140 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2141 }; 2142 2143 #undef def_excpt 2144 2145 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2146 uint code = static_cast<uint>(exception_code); 2147 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2148 if (exceptlabels[i].number == code) { 2149 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2150 return buf; 2151 } 2152 } 2153 2154 return NULL; 2155 } 2156 2157 //----------------------------------------------------------------------------- 2158 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2159 // handle exception caused by idiv; should only happen for -MinInt/-1 2160 // (division by zero is handled explicitly) 2161 #ifdef _M_AMD64 2162 PCONTEXT ctx = exceptionInfo->ContextRecord; 2163 address pc = (address)ctx->Rip; 2164 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2165 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2166 if (pc[0] == 0xF7) { 2167 // set correct result values and continue after idiv instruction 2168 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2169 } else { 2170 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2171 } 2172 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2173 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2174 // idiv opcode (0xF7). 2175 ctx->Rdx = (DWORD)0; // remainder 2176 // Continue the execution 2177 #else 2178 PCONTEXT ctx = exceptionInfo->ContextRecord; 2179 address pc = (address)ctx->Eip; 2180 assert(pc[0] == 0xF7, "not an idiv opcode"); 2181 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2182 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2183 // set correct result values and continue after idiv instruction 2184 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2185 ctx->Eax = (DWORD)min_jint; // result 2186 ctx->Edx = (DWORD)0; // remainder 2187 // Continue the execution 2188 #endif 2189 return EXCEPTION_CONTINUE_EXECUTION; 2190 } 2191 2192 //----------------------------------------------------------------------------- 2193 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2194 PCONTEXT ctx = exceptionInfo->ContextRecord; 2195 #ifndef _WIN64 2196 // handle exception caused by native method modifying control word 2197 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2198 2199 switch (exception_code) { 2200 case EXCEPTION_FLT_DENORMAL_OPERAND: 2201 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2202 case EXCEPTION_FLT_INEXACT_RESULT: 2203 case EXCEPTION_FLT_INVALID_OPERATION: 2204 case EXCEPTION_FLT_OVERFLOW: 2205 case EXCEPTION_FLT_STACK_CHECK: 2206 case EXCEPTION_FLT_UNDERFLOW: 2207 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2208 if (fp_control_word != ctx->FloatSave.ControlWord) { 2209 // Restore FPCW and mask out FLT exceptions 2210 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2211 // Mask out pending FLT exceptions 2212 ctx->FloatSave.StatusWord &= 0xffffff00; 2213 return EXCEPTION_CONTINUE_EXECUTION; 2214 } 2215 } 2216 2217 if (prev_uef_handler != NULL) { 2218 // We didn't handle this exception so pass it to the previous 2219 // UnhandledExceptionFilter. 2220 return (prev_uef_handler)(exceptionInfo); 2221 } 2222 #else // !_WIN64 2223 // On Windows, the mxcsr control bits are non-volatile across calls 2224 // See also CR 6192333 2225 // 2226 jint MxCsr = INITIAL_MXCSR; 2227 // we can't use StubRoutines::addr_mxcsr_std() 2228 // because in Win64 mxcsr is not saved there 2229 if (MxCsr != ctx->MxCsr) { 2230 ctx->MxCsr = MxCsr; 2231 return EXCEPTION_CONTINUE_EXECUTION; 2232 } 2233 #endif // !_WIN64 2234 2235 return EXCEPTION_CONTINUE_SEARCH; 2236 } 2237 2238 static inline void report_error(Thread* t, DWORD exception_code, 2239 address addr, void* siginfo, void* context) { 2240 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2241 2242 // If UseOsErrorReporting, this will return here and save the error file 2243 // somewhere where we can find it in the minidump. 2244 } 2245 2246 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2247 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2248 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2249 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2250 if (Interpreter::contains(pc)) { 2251 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2252 if (!fr->is_first_java_frame()) { 2253 // get_frame_at_stack_banging_point() is only called when we 2254 // have well defined stacks so java_sender() calls do not need 2255 // to assert safe_for_sender() first. 2256 *fr = fr->java_sender(); 2257 } 2258 } else { 2259 // more complex code with compiled code 2260 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2261 CodeBlob* cb = CodeCache::find_blob(pc); 2262 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2263 // Not sure where the pc points to, fallback to default 2264 // stack overflow handling 2265 return false; 2266 } else { 2267 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2268 // in compiled code, the stack banging is performed just after the return pc 2269 // has been pushed on the stack 2270 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2271 if (!fr->is_java_frame()) { 2272 // See java_sender() comment above. 2273 *fr = fr->java_sender(); 2274 } 2275 } 2276 } 2277 assert(fr->is_java_frame(), "Safety check"); 2278 return true; 2279 } 2280 2281 //----------------------------------------------------------------------------- 2282 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2283 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2284 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2285 #ifdef _M_AMD64 2286 address pc = (address) exceptionInfo->ContextRecord->Rip; 2287 #else 2288 address pc = (address) exceptionInfo->ContextRecord->Eip; 2289 #endif 2290 Thread* t = Thread::current_or_null_safe(); 2291 2292 // Handle SafeFetch32 and SafeFetchN exceptions. 2293 if (StubRoutines::is_safefetch_fault(pc)) { 2294 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2295 } 2296 2297 #ifndef _WIN64 2298 // Execution protection violation - win32 running on AMD64 only 2299 // Handled first to avoid misdiagnosis as a "normal" access violation; 2300 // This is safe to do because we have a new/unique ExceptionInformation 2301 // code for this condition. 2302 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2303 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2304 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2305 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2306 2307 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2308 int page_size = os::vm_page_size(); 2309 2310 // Make sure the pc and the faulting address are sane. 2311 // 2312 // If an instruction spans a page boundary, and the page containing 2313 // the beginning of the instruction is executable but the following 2314 // page is not, the pc and the faulting address might be slightly 2315 // different - we still want to unguard the 2nd page in this case. 2316 // 2317 // 15 bytes seems to be a (very) safe value for max instruction size. 2318 bool pc_is_near_addr = 2319 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2320 bool instr_spans_page_boundary = 2321 (align_down((intptr_t) pc ^ (intptr_t) addr, 2322 (intptr_t) page_size) > 0); 2323 2324 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2325 static volatile address last_addr = 2326 (address) os::non_memory_address_word(); 2327 2328 // In conservative mode, don't unguard unless the address is in the VM 2329 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2330 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2331 2332 // Set memory to RWX and retry 2333 address page_start = align_down(addr, page_size); 2334 bool res = os::protect_memory((char*) page_start, page_size, 2335 os::MEM_PROT_RWX); 2336 2337 log_debug(os)("Execution protection violation " 2338 "at " INTPTR_FORMAT 2339 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2340 p2i(page_start), (res ? "success" : os::strerror(errno))); 2341 2342 // Set last_addr so if we fault again at the same address, we don't 2343 // end up in an endless loop. 2344 // 2345 // There are two potential complications here. Two threads trapping 2346 // at the same address at the same time could cause one of the 2347 // threads to think it already unguarded, and abort the VM. Likely 2348 // very rare. 2349 // 2350 // The other race involves two threads alternately trapping at 2351 // different addresses and failing to unguard the page, resulting in 2352 // an endless loop. This condition is probably even more unlikely 2353 // than the first. 2354 // 2355 // Although both cases could be avoided by using locks or thread 2356 // local last_addr, these solutions are unnecessary complication: 2357 // this handler is a best-effort safety net, not a complete solution. 2358 // It is disabled by default and should only be used as a workaround 2359 // in case we missed any no-execute-unsafe VM code. 2360 2361 last_addr = addr; 2362 2363 return EXCEPTION_CONTINUE_EXECUTION; 2364 } 2365 } 2366 2367 // Last unguard failed or not unguarding 2368 tty->print_raw_cr("Execution protection violation"); 2369 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2370 exceptionInfo->ContextRecord); 2371 return EXCEPTION_CONTINUE_SEARCH; 2372 } 2373 } 2374 #endif // _WIN64 2375 2376 // Check to see if we caught the safepoint code in the 2377 // process of write protecting the memory serialization page. 2378 // It write enables the page immediately after protecting it 2379 // so just return. 2380 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2381 if (t != NULL && t->is_Java_thread()) { 2382 JavaThread* thread = (JavaThread*) t; 2383 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2384 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2385 if (os::is_memory_serialize_page(thread, addr)) { 2386 // Block current thread until the memory serialize page permission restored. 2387 os::block_on_serialize_page_trap(); 2388 return EXCEPTION_CONTINUE_EXECUTION; 2389 } 2390 } 2391 } 2392 2393 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2394 VM_Version::is_cpuinfo_segv_addr(pc)) { 2395 // Verify that OS save/restore AVX registers. 2396 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2397 } 2398 2399 if (t != NULL && t->is_Java_thread()) { 2400 JavaThread* thread = (JavaThread*) t; 2401 bool in_java = thread->thread_state() == _thread_in_Java; 2402 2403 // Handle potential stack overflows up front. 2404 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2405 if (thread->stack_guards_enabled()) { 2406 if (in_java) { 2407 frame fr; 2408 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2409 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2410 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2411 assert(fr.is_java_frame(), "Must be a Java frame"); 2412 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2413 } 2414 } 2415 // Yellow zone violation. The o/s has unprotected the first yellow 2416 // zone page for us. Note: must call disable_stack_yellow_zone to 2417 // update the enabled status, even if the zone contains only one page. 2418 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2419 thread->disable_stack_yellow_reserved_zone(); 2420 // If not in java code, return and hope for the best. 2421 return in_java 2422 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2423 : EXCEPTION_CONTINUE_EXECUTION; 2424 } else { 2425 // Fatal red zone violation. 2426 thread->disable_stack_red_zone(); 2427 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2428 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2429 exceptionInfo->ContextRecord); 2430 return EXCEPTION_CONTINUE_SEARCH; 2431 } 2432 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2433 // Either stack overflow or null pointer exception. 2434 if (in_java) { 2435 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2436 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2437 address stack_end = thread->stack_end(); 2438 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2439 // Stack overflow. 2440 assert(!os::uses_stack_guard_pages(), 2441 "should be caught by red zone code above."); 2442 return Handle_Exception(exceptionInfo, 2443 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2444 } 2445 // Check for safepoint polling and implicit null 2446 // We only expect null pointers in the stubs (vtable) 2447 // the rest are checked explicitly now. 2448 CodeBlob* cb = CodeCache::find_blob(pc); 2449 if (cb != NULL) { 2450 if (os::is_poll_address(addr)) { 2451 address stub = SharedRuntime::get_poll_stub(pc); 2452 return Handle_Exception(exceptionInfo, stub); 2453 } 2454 } 2455 { 2456 #ifdef _WIN64 2457 // If it's a legal stack address map the entire region in 2458 // 2459 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2460 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2461 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2462 addr = (address)((uintptr_t)addr & 2463 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2464 os::commit_memory((char *)addr, thread->stack_base() - addr, 2465 !ExecMem); 2466 return EXCEPTION_CONTINUE_EXECUTION; 2467 } else 2468 #endif 2469 { 2470 // Null pointer exception. 2471 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2472 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2473 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2474 } 2475 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2476 exceptionInfo->ContextRecord); 2477 return EXCEPTION_CONTINUE_SEARCH; 2478 } 2479 } 2480 } 2481 2482 #ifdef _WIN64 2483 // Special care for fast JNI field accessors. 2484 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2485 // in and the heap gets shrunk before the field access. 2486 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2487 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2488 if (addr != (address)-1) { 2489 return Handle_Exception(exceptionInfo, addr); 2490 } 2491 } 2492 #endif 2493 2494 // Stack overflow or null pointer exception in native code. 2495 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2496 exceptionInfo->ContextRecord); 2497 return EXCEPTION_CONTINUE_SEARCH; 2498 } // /EXCEPTION_ACCESS_VIOLATION 2499 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2500 2501 if (exception_code == EXCEPTION_IN_PAGE_ERROR) { 2502 CompiledMethod* nm = NULL; 2503 JavaThread* thread = (JavaThread*)t; 2504 if (in_java) { 2505 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); 2506 nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 2507 } 2508 if ((thread->thread_state() == _thread_in_vm && 2509 thread->doing_unsafe_access()) || 2510 (nm != NULL && nm->has_unsafe_access())) { 2511 return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc))); 2512 } 2513 } 2514 2515 if (in_java) { 2516 switch (exception_code) { 2517 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2518 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2519 2520 case EXCEPTION_INT_OVERFLOW: 2521 return Handle_IDiv_Exception(exceptionInfo); 2522 2523 } // switch 2524 } 2525 if (((thread->thread_state() == _thread_in_Java) || 2526 (thread->thread_state() == _thread_in_native)) && 2527 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2528 LONG result=Handle_FLT_Exception(exceptionInfo); 2529 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2530 } 2531 } 2532 2533 if (exception_code != EXCEPTION_BREAKPOINT) { 2534 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2535 exceptionInfo->ContextRecord); 2536 } 2537 return EXCEPTION_CONTINUE_SEARCH; 2538 } 2539 2540 #ifndef _WIN64 2541 // Special care for fast JNI accessors. 2542 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2543 // the heap gets shrunk before the field access. 2544 // Need to install our own structured exception handler since native code may 2545 // install its own. 2546 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2547 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2548 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2549 address pc = (address) exceptionInfo->ContextRecord->Eip; 2550 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2551 if (addr != (address)-1) { 2552 return Handle_Exception(exceptionInfo, addr); 2553 } 2554 } 2555 return EXCEPTION_CONTINUE_SEARCH; 2556 } 2557 2558 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2559 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2560 jobject obj, \ 2561 jfieldID fieldID) { \ 2562 __try { \ 2563 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2564 obj, \ 2565 fieldID); \ 2566 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2567 _exception_info())) { \ 2568 } \ 2569 return 0; \ 2570 } 2571 2572 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2573 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2574 DEFINE_FAST_GETFIELD(jchar, char, Char) 2575 DEFINE_FAST_GETFIELD(jshort, short, Short) 2576 DEFINE_FAST_GETFIELD(jint, int, Int) 2577 DEFINE_FAST_GETFIELD(jlong, long, Long) 2578 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2579 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2580 2581 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2582 switch (type) { 2583 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2584 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2585 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2586 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2587 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2588 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2589 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2590 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2591 default: ShouldNotReachHere(); 2592 } 2593 return (address)-1; 2594 } 2595 #endif 2596 2597 // Virtual Memory 2598 2599 int os::vm_page_size() { return os::win32::vm_page_size(); } 2600 int os::vm_allocation_granularity() { 2601 return os::win32::vm_allocation_granularity(); 2602 } 2603 2604 // Windows large page support is available on Windows 2003. In order to use 2605 // large page memory, the administrator must first assign additional privilege 2606 // to the user: 2607 // + select Control Panel -> Administrative Tools -> Local Security Policy 2608 // + select Local Policies -> User Rights Assignment 2609 // + double click "Lock pages in memory", add users and/or groups 2610 // + reboot 2611 // Note the above steps are needed for administrator as well, as administrators 2612 // by default do not have the privilege to lock pages in memory. 2613 // 2614 // Note about Windows 2003: although the API supports committing large page 2615 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2616 // scenario, I found through experiment it only uses large page if the entire 2617 // memory region is reserved and committed in a single VirtualAlloc() call. 2618 // This makes Windows large page support more or less like Solaris ISM, in 2619 // that the entire heap must be committed upfront. This probably will change 2620 // in the future, if so the code below needs to be revisited. 2621 2622 #ifndef MEM_LARGE_PAGES 2623 #define MEM_LARGE_PAGES 0x20000000 2624 #endif 2625 2626 static HANDLE _hProcess; 2627 static HANDLE _hToken; 2628 2629 // Container for NUMA node list info 2630 class NUMANodeListHolder { 2631 private: 2632 int *_numa_used_node_list; // allocated below 2633 int _numa_used_node_count; 2634 2635 void free_node_list() { 2636 if (_numa_used_node_list != NULL) { 2637 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2638 } 2639 } 2640 2641 public: 2642 NUMANodeListHolder() { 2643 _numa_used_node_count = 0; 2644 _numa_used_node_list = NULL; 2645 // do rest of initialization in build routine (after function pointers are set up) 2646 } 2647 2648 ~NUMANodeListHolder() { 2649 free_node_list(); 2650 } 2651 2652 bool build() { 2653 DWORD_PTR proc_aff_mask; 2654 DWORD_PTR sys_aff_mask; 2655 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2656 ULONG highest_node_number; 2657 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2658 free_node_list(); 2659 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2660 for (unsigned int i = 0; i <= highest_node_number; i++) { 2661 ULONGLONG proc_mask_numa_node; 2662 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2663 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2664 _numa_used_node_list[_numa_used_node_count++] = i; 2665 } 2666 } 2667 return (_numa_used_node_count > 1); 2668 } 2669 2670 int get_count() { return _numa_used_node_count; } 2671 int get_node_list_entry(int n) { 2672 // for indexes out of range, returns -1 2673 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2674 } 2675 2676 } numa_node_list_holder; 2677 2678 2679 2680 static size_t _large_page_size = 0; 2681 2682 static bool request_lock_memory_privilege() { 2683 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2684 os::current_process_id()); 2685 2686 LUID luid; 2687 if (_hProcess != NULL && 2688 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2689 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2690 2691 TOKEN_PRIVILEGES tp; 2692 tp.PrivilegeCount = 1; 2693 tp.Privileges[0].Luid = luid; 2694 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2695 2696 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2697 // privilege. Check GetLastError() too. See MSDN document. 2698 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2699 (GetLastError() == ERROR_SUCCESS)) { 2700 return true; 2701 } 2702 } 2703 2704 return false; 2705 } 2706 2707 static void cleanup_after_large_page_init() { 2708 if (_hProcess) CloseHandle(_hProcess); 2709 _hProcess = NULL; 2710 if (_hToken) CloseHandle(_hToken); 2711 _hToken = NULL; 2712 } 2713 2714 static bool numa_interleaving_init() { 2715 bool success = false; 2716 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2717 2718 // print a warning if UseNUMAInterleaving flag is specified on command line 2719 bool warn_on_failure = use_numa_interleaving_specified; 2720 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2721 2722 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2723 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2724 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2725 2726 if (numa_node_list_holder.build()) { 2727 if (log_is_enabled(Debug, os, cpu)) { 2728 Log(os, cpu) log; 2729 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2730 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2731 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2732 } 2733 } 2734 success = true; 2735 } else { 2736 WARN("Process does not cover multiple NUMA nodes."); 2737 } 2738 if (!success) { 2739 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2740 } 2741 return success; 2742 #undef WARN 2743 } 2744 2745 // this routine is used whenever we need to reserve a contiguous VA range 2746 // but we need to make separate VirtualAlloc calls for each piece of the range 2747 // Reasons for doing this: 2748 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2749 // * UseNUMAInterleaving requires a separate node for each piece 2750 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2751 DWORD prot, 2752 bool should_inject_error = false) { 2753 char * p_buf; 2754 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2755 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2756 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2757 2758 // first reserve enough address space in advance since we want to be 2759 // able to break a single contiguous virtual address range into multiple 2760 // large page commits but WS2003 does not allow reserving large page space 2761 // so we just use 4K pages for reserve, this gives us a legal contiguous 2762 // address space. then we will deallocate that reservation, and re alloc 2763 // using large pages 2764 const size_t size_of_reserve = bytes + chunk_size; 2765 if (bytes > size_of_reserve) { 2766 // Overflowed. 2767 return NULL; 2768 } 2769 p_buf = (char *) VirtualAlloc(addr, 2770 size_of_reserve, // size of Reserve 2771 MEM_RESERVE, 2772 PAGE_READWRITE); 2773 // If reservation failed, return NULL 2774 if (p_buf == NULL) return NULL; 2775 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2776 os::release_memory(p_buf, bytes + chunk_size); 2777 2778 // we still need to round up to a page boundary (in case we are using large pages) 2779 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2780 // instead we handle this in the bytes_to_rq computation below 2781 p_buf = align_up(p_buf, page_size); 2782 2783 // now go through and allocate one chunk at a time until all bytes are 2784 // allocated 2785 size_t bytes_remaining = bytes; 2786 // An overflow of align_up() would have been caught above 2787 // in the calculation of size_of_reserve. 2788 char * next_alloc_addr = p_buf; 2789 HANDLE hProc = GetCurrentProcess(); 2790 2791 #ifdef ASSERT 2792 // Variable for the failure injection 2793 int ran_num = os::random(); 2794 size_t fail_after = ran_num % bytes; 2795 #endif 2796 2797 int count=0; 2798 while (bytes_remaining) { 2799 // select bytes_to_rq to get to the next chunk_size boundary 2800 2801 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2802 // Note allocate and commit 2803 char * p_new; 2804 2805 #ifdef ASSERT 2806 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2807 #else 2808 const bool inject_error_now = false; 2809 #endif 2810 2811 if (inject_error_now) { 2812 p_new = NULL; 2813 } else { 2814 if (!UseNUMAInterleaving) { 2815 p_new = (char *) VirtualAlloc(next_alloc_addr, 2816 bytes_to_rq, 2817 flags, 2818 prot); 2819 } else { 2820 // get the next node to use from the used_node_list 2821 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2822 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2823 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2824 } 2825 } 2826 2827 if (p_new == NULL) { 2828 // Free any allocated pages 2829 if (next_alloc_addr > p_buf) { 2830 // Some memory was committed so release it. 2831 size_t bytes_to_release = bytes - bytes_remaining; 2832 // NMT has yet to record any individual blocks, so it 2833 // need to create a dummy 'reserve' record to match 2834 // the release. 2835 MemTracker::record_virtual_memory_reserve((address)p_buf, 2836 bytes_to_release, CALLER_PC); 2837 os::release_memory(p_buf, bytes_to_release); 2838 } 2839 #ifdef ASSERT 2840 if (should_inject_error) { 2841 log_develop_debug(pagesize)("Reserving pages individually failed."); 2842 } 2843 #endif 2844 return NULL; 2845 } 2846 2847 bytes_remaining -= bytes_to_rq; 2848 next_alloc_addr += bytes_to_rq; 2849 count++; 2850 } 2851 // Although the memory is allocated individually, it is returned as one. 2852 // NMT records it as one block. 2853 if ((flags & MEM_COMMIT) != 0) { 2854 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2855 } else { 2856 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2857 } 2858 2859 // made it this far, success 2860 return p_buf; 2861 } 2862 2863 2864 2865 void os::large_page_init() { 2866 if (!UseLargePages) return; 2867 2868 // print a warning if any large page related flag is specified on command line 2869 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2870 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2871 bool success = false; 2872 2873 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2874 if (request_lock_memory_privilege()) { 2875 size_t s = GetLargePageMinimum(); 2876 if (s) { 2877 #if defined(IA32) || defined(AMD64) 2878 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2879 WARN("JVM cannot use large pages bigger than 4mb."); 2880 } else { 2881 #endif 2882 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2883 _large_page_size = LargePageSizeInBytes; 2884 } else { 2885 _large_page_size = s; 2886 } 2887 success = true; 2888 #if defined(IA32) || defined(AMD64) 2889 } 2890 #endif 2891 } else { 2892 WARN("Large page is not supported by the processor."); 2893 } 2894 } else { 2895 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2896 } 2897 #undef WARN 2898 2899 const size_t default_page_size = (size_t) vm_page_size(); 2900 if (success && _large_page_size > default_page_size) { 2901 _page_sizes[0] = _large_page_size; 2902 _page_sizes[1] = default_page_size; 2903 _page_sizes[2] = 0; 2904 } 2905 2906 cleanup_after_large_page_init(); 2907 UseLargePages = success; 2908 } 2909 2910 int os::create_file_for_heap(const char* dir) { 2911 2912 const char name_template[] = "/jvmheap.XXXXXX"; 2913 char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal); 2914 if (fullname == NULL) { 2915 vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno))); 2916 return -1; 2917 } 2918 2919 (void)strncpy(fullname, dir, strlen(dir)+1); 2920 (void)strncat(fullname, name_template, strlen(name_template)); 2921 2922 os::native_path(fullname); 2923 2924 char *path = _mktemp(fullname); 2925 if (path == NULL) { 2926 warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno)); 2927 os::free(fullname); 2928 return -1; 2929 } 2930 2931 int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD); 2932 2933 os::free(fullname); 2934 if (fd < 0) { 2935 warning("Problem opening file for heap (%s)", os::strerror(errno)); 2936 return -1; 2937 } 2938 return fd; 2939 } 2940 2941 // If 'base' is not NULL, function will return NULL if it cannot get 'base' 2942 char* os::map_memory_to_file(char* base, size_t size, int fd) { 2943 assert(fd != -1, "File descriptor is not valid"); 2944 2945 HANDLE fh = (HANDLE)_get_osfhandle(fd); 2946 #ifdef _LP64 2947 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2948 (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL); 2949 #else 2950 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2951 0, (DWORD)size, NULL); 2952 #endif 2953 if (fileMapping == NULL) { 2954 if (GetLastError() == ERROR_DISK_FULL) { 2955 vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap")); 2956 } 2957 else { 2958 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); 2959 } 2960 2961 return NULL; 2962 } 2963 2964 LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base); 2965 2966 CloseHandle(fileMapping); 2967 2968 return (char*)addr; 2969 } 2970 2971 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { 2972 assert(fd != -1, "File descriptor is not valid"); 2973 assert(base != NULL, "Base address cannot be NULL"); 2974 2975 release_memory(base, size); 2976 return map_memory_to_file(base, size, fd); 2977 } 2978 2979 // On win32, one cannot release just a part of reserved memory, it's an 2980 // all or nothing deal. When we split a reservation, we must break the 2981 // reservation into two reservations. 2982 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2983 bool realloc) { 2984 if (size > 0) { 2985 release_memory(base, size); 2986 if (realloc) { 2987 reserve_memory(split, base); 2988 } 2989 if (size != split) { 2990 reserve_memory(size - split, base + split); 2991 } 2992 } 2993 } 2994 2995 // Multiple threads can race in this code but it's not possible to unmap small sections of 2996 // virtual space to get requested alignment, like posix-like os's. 2997 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2998 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { 2999 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3000 "Alignment must be a multiple of allocation granularity (page size)"); 3001 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3002 3003 size_t extra_size = size + alignment; 3004 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3005 3006 char* aligned_base = NULL; 3007 3008 do { 3009 char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc); 3010 if (extra_base == NULL) { 3011 return NULL; 3012 } 3013 // Do manual alignment 3014 aligned_base = align_up(extra_base, alignment); 3015 3016 if (file_desc != -1) { 3017 os::unmap_memory(extra_base, extra_size); 3018 } else { 3019 os::release_memory(extra_base, extra_size); 3020 } 3021 3022 aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc); 3023 3024 } while (aligned_base == NULL); 3025 3026 return aligned_base; 3027 } 3028 3029 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3030 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3031 "reserve alignment"); 3032 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3033 char* res; 3034 // note that if UseLargePages is on, all the areas that require interleaving 3035 // will go thru reserve_memory_special rather than thru here. 3036 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3037 if (!use_individual) { 3038 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3039 } else { 3040 elapsedTimer reserveTimer; 3041 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3042 // in numa interleaving, we have to allocate pages individually 3043 // (well really chunks of NUMAInterleaveGranularity size) 3044 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3045 if (res == NULL) { 3046 warning("NUMA page allocation failed"); 3047 } 3048 if (Verbose && PrintMiscellaneous) { 3049 reserveTimer.stop(); 3050 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3051 reserveTimer.milliseconds(), reserveTimer.ticks()); 3052 } 3053 } 3054 assert(res == NULL || addr == NULL || addr == res, 3055 "Unexpected address from reserve."); 3056 3057 return res; 3058 } 3059 3060 // Reserve memory at an arbitrary address, only if that area is 3061 // available (and not reserved for something else). 3062 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3063 // Windows os::reserve_memory() fails of the requested address range is 3064 // not avilable. 3065 return reserve_memory(bytes, requested_addr); 3066 } 3067 3068 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) { 3069 assert(file_desc >= 0, "file_desc is not valid"); 3070 return map_memory_to_file(requested_addr, bytes, file_desc); 3071 } 3072 3073 size_t os::large_page_size() { 3074 return _large_page_size; 3075 } 3076 3077 bool os::can_commit_large_page_memory() { 3078 // Windows only uses large page memory when the entire region is reserved 3079 // and committed in a single VirtualAlloc() call. This may change in the 3080 // future, but with Windows 2003 it's not possible to commit on demand. 3081 return false; 3082 } 3083 3084 bool os::can_execute_large_page_memory() { 3085 return true; 3086 } 3087 3088 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3089 bool exec) { 3090 assert(UseLargePages, "only for large pages"); 3091 3092 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3093 return NULL; // Fallback to small pages. 3094 } 3095 3096 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3097 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3098 3099 // with large pages, there are two cases where we need to use Individual Allocation 3100 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3101 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3102 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3103 log_debug(pagesize)("Reserving large pages individually."); 3104 3105 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3106 if (p_buf == NULL) { 3107 // give an appropriate warning message 3108 if (UseNUMAInterleaving) { 3109 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3110 } 3111 if (UseLargePagesIndividualAllocation) { 3112 warning("Individually allocated large pages failed, " 3113 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3114 } 3115 return NULL; 3116 } 3117 3118 return p_buf; 3119 3120 } else { 3121 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3122 3123 // normal policy just allocate it all at once 3124 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3125 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3126 if (res != NULL) { 3127 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3128 } 3129 3130 return res; 3131 } 3132 } 3133 3134 bool os::release_memory_special(char* base, size_t bytes) { 3135 assert(base != NULL, "Sanity check"); 3136 return release_memory(base, bytes); 3137 } 3138 3139 void os::print_statistics() { 3140 } 3141 3142 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3143 int err = os::get_last_error(); 3144 char buf[256]; 3145 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3146 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3147 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3148 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3149 } 3150 3151 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3152 if (bytes == 0) { 3153 // Don't bother the OS with noops. 3154 return true; 3155 } 3156 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3157 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3158 // Don't attempt to print anything if the OS call fails. We're 3159 // probably low on resources, so the print itself may cause crashes. 3160 3161 // unless we have NUMAInterleaving enabled, the range of a commit 3162 // is always within a reserve covered by a single VirtualAlloc 3163 // in that case we can just do a single commit for the requested size 3164 if (!UseNUMAInterleaving) { 3165 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3166 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3167 return false; 3168 } 3169 if (exec) { 3170 DWORD oldprot; 3171 // Windows doc says to use VirtualProtect to get execute permissions 3172 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3173 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3174 return false; 3175 } 3176 } 3177 return true; 3178 } else { 3179 3180 // when NUMAInterleaving is enabled, the commit might cover a range that 3181 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3182 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3183 // returns represents the number of bytes that can be committed in one step. 3184 size_t bytes_remaining = bytes; 3185 char * next_alloc_addr = addr; 3186 while (bytes_remaining > 0) { 3187 MEMORY_BASIC_INFORMATION alloc_info; 3188 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3189 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3190 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3191 PAGE_READWRITE) == NULL) { 3192 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3193 exec);) 3194 return false; 3195 } 3196 if (exec) { 3197 DWORD oldprot; 3198 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3199 PAGE_EXECUTE_READWRITE, &oldprot)) { 3200 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3201 exec);) 3202 return false; 3203 } 3204 } 3205 bytes_remaining -= bytes_to_rq; 3206 next_alloc_addr += bytes_to_rq; 3207 } 3208 } 3209 // if we made it this far, return true 3210 return true; 3211 } 3212 3213 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3214 bool exec) { 3215 // alignment_hint is ignored on this OS 3216 return pd_commit_memory(addr, size, exec); 3217 } 3218 3219 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3220 const char* mesg) { 3221 assert(mesg != NULL, "mesg must be specified"); 3222 if (!pd_commit_memory(addr, size, exec)) { 3223 warn_fail_commit_memory(addr, size, exec); 3224 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3225 } 3226 } 3227 3228 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3229 size_t alignment_hint, bool exec, 3230 const char* mesg) { 3231 // alignment_hint is ignored on this OS 3232 pd_commit_memory_or_exit(addr, size, exec, mesg); 3233 } 3234 3235 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3236 if (bytes == 0) { 3237 // Don't bother the OS with noops. 3238 return true; 3239 } 3240 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3241 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3242 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3243 } 3244 3245 bool os::pd_release_memory(char* addr, size_t bytes) { 3246 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3247 } 3248 3249 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3250 return os::commit_memory(addr, size, !ExecMem); 3251 } 3252 3253 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3254 return os::uncommit_memory(addr, size); 3255 } 3256 3257 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3258 uint count = 0; 3259 bool ret = false; 3260 size_t bytes_remaining = bytes; 3261 char * next_protect_addr = addr; 3262 3263 // Use VirtualQuery() to get the chunk size. 3264 while (bytes_remaining) { 3265 MEMORY_BASIC_INFORMATION alloc_info; 3266 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3267 return false; 3268 } 3269 3270 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3271 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3272 // but we don't distinguish here as both cases are protected by same API. 3273 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3274 warning("Failed protecting pages individually for chunk #%u", count); 3275 if (!ret) { 3276 return false; 3277 } 3278 3279 bytes_remaining -= bytes_to_protect; 3280 next_protect_addr += bytes_to_protect; 3281 count++; 3282 } 3283 return ret; 3284 } 3285 3286 // Set protections specified 3287 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3288 bool is_committed) { 3289 unsigned int p = 0; 3290 switch (prot) { 3291 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3292 case MEM_PROT_READ: p = PAGE_READONLY; break; 3293 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3294 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3295 default: 3296 ShouldNotReachHere(); 3297 } 3298 3299 DWORD old_status; 3300 3301 // Strange enough, but on Win32 one can change protection only for committed 3302 // memory, not a big deal anyway, as bytes less or equal than 64K 3303 if (!is_committed) { 3304 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3305 "cannot commit protection page"); 3306 } 3307 // One cannot use os::guard_memory() here, as on Win32 guard page 3308 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3309 // 3310 // Pages in the region become guard pages. Any attempt to access a guard page 3311 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3312 // the guard page status. Guard pages thus act as a one-time access alarm. 3313 bool ret; 3314 if (UseNUMAInterleaving) { 3315 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3316 // so we must protect the chunks individually. 3317 ret = protect_pages_individually(addr, bytes, p, &old_status); 3318 } else { 3319 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3320 } 3321 #ifdef ASSERT 3322 if (!ret) { 3323 int err = os::get_last_error(); 3324 char buf[256]; 3325 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3326 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3327 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3328 buf_len != 0 ? buf : "<no_error_string>", err); 3329 } 3330 #endif 3331 return ret; 3332 } 3333 3334 bool os::guard_memory(char* addr, size_t bytes) { 3335 DWORD old_status; 3336 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3337 } 3338 3339 bool os::unguard_memory(char* addr, size_t bytes) { 3340 DWORD old_status; 3341 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3342 } 3343 3344 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3345 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3346 void os::numa_make_global(char *addr, size_t bytes) { } 3347 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3348 bool os::numa_topology_changed() { return false; } 3349 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3350 int os::numa_get_group_id() { return 0; } 3351 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3352 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3353 // Provide an answer for UMA systems 3354 ids[0] = 0; 3355 return 1; 3356 } else { 3357 // check for size bigger than actual groups_num 3358 size = MIN2(size, numa_get_groups_num()); 3359 for (int i = 0; i < (int)size; i++) { 3360 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3361 } 3362 return size; 3363 } 3364 } 3365 3366 bool os::get_page_info(char *start, page_info* info) { 3367 return false; 3368 } 3369 3370 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3371 page_info* page_found) { 3372 return end; 3373 } 3374 3375 char* os::non_memory_address_word() { 3376 // Must never look like an address returned by reserve_memory, 3377 // even in its subfields (as defined by the CPU immediate fields, 3378 // if the CPU splits constants across multiple instructions). 3379 return (char*)-1; 3380 } 3381 3382 #define MAX_ERROR_COUNT 100 3383 #define SYS_THREAD_ERROR 0xffffffffUL 3384 3385 void os::pd_start_thread(Thread* thread) { 3386 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3387 // Returns previous suspend state: 3388 // 0: Thread was not suspended 3389 // 1: Thread is running now 3390 // >1: Thread is still suspended. 3391 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3392 } 3393 3394 class HighResolutionInterval : public CHeapObj<mtThread> { 3395 // The default timer resolution seems to be 10 milliseconds. 3396 // (Where is this written down?) 3397 // If someone wants to sleep for only a fraction of the default, 3398 // then we set the timer resolution down to 1 millisecond for 3399 // the duration of their interval. 3400 // We carefully set the resolution back, since otherwise we 3401 // seem to incur an overhead (3%?) that we don't need. 3402 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3403 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3404 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3405 // timeBeginPeriod() if the relative error exceeded some threshold. 3406 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3407 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3408 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3409 // resolution timers running. 3410 private: 3411 jlong resolution; 3412 public: 3413 HighResolutionInterval(jlong ms) { 3414 resolution = ms % 10L; 3415 if (resolution != 0) { 3416 MMRESULT result = timeBeginPeriod(1L); 3417 } 3418 } 3419 ~HighResolutionInterval() { 3420 if (resolution != 0) { 3421 MMRESULT result = timeEndPeriod(1L); 3422 } 3423 resolution = 0L; 3424 } 3425 }; 3426 3427 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3428 jlong limit = (jlong) MAXDWORD; 3429 3430 while (ms > limit) { 3431 int res; 3432 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3433 return res; 3434 } 3435 ms -= limit; 3436 } 3437 3438 assert(thread == Thread::current(), "thread consistency check"); 3439 OSThread* osthread = thread->osthread(); 3440 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3441 int result; 3442 if (interruptable) { 3443 assert(thread->is_Java_thread(), "must be java thread"); 3444 JavaThread *jt = (JavaThread *) thread; 3445 ThreadBlockInVM tbivm(jt); 3446 3447 jt->set_suspend_equivalent(); 3448 // cleared by handle_special_suspend_equivalent_condition() or 3449 // java_suspend_self() via check_and_wait_while_suspended() 3450 3451 HANDLE events[1]; 3452 events[0] = osthread->interrupt_event(); 3453 HighResolutionInterval *phri=NULL; 3454 if (!ForceTimeHighResolution) { 3455 phri = new HighResolutionInterval(ms); 3456 } 3457 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3458 result = OS_TIMEOUT; 3459 } else { 3460 ResetEvent(osthread->interrupt_event()); 3461 osthread->set_interrupted(false); 3462 result = OS_INTRPT; 3463 } 3464 delete phri; //if it is NULL, harmless 3465 3466 // were we externally suspended while we were waiting? 3467 jt->check_and_wait_while_suspended(); 3468 } else { 3469 assert(!thread->is_Java_thread(), "must not be java thread"); 3470 Sleep((long) ms); 3471 result = OS_TIMEOUT; 3472 } 3473 return result; 3474 } 3475 3476 // Short sleep, direct OS call. 3477 // 3478 // ms = 0, means allow others (if any) to run. 3479 // 3480 void os::naked_short_sleep(jlong ms) { 3481 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3482 Sleep(ms); 3483 } 3484 3485 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3486 void os::infinite_sleep() { 3487 while (true) { // sleep forever ... 3488 Sleep(100000); // ... 100 seconds at a time 3489 } 3490 } 3491 3492 typedef BOOL (WINAPI * STTSignature)(void); 3493 3494 void os::naked_yield() { 3495 // Consider passing back the return value from SwitchToThread(). 3496 SwitchToThread(); 3497 } 3498 3499 // Win32 only gives you access to seven real priorities at a time, 3500 // so we compress Java's ten down to seven. It would be better 3501 // if we dynamically adjusted relative priorities. 3502 3503 int os::java_to_os_priority[CriticalPriority + 1] = { 3504 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3505 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3506 THREAD_PRIORITY_LOWEST, // 2 3507 THREAD_PRIORITY_BELOW_NORMAL, // 3 3508 THREAD_PRIORITY_BELOW_NORMAL, // 4 3509 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3510 THREAD_PRIORITY_NORMAL, // 6 3511 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3512 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3513 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3514 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3515 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3516 }; 3517 3518 int prio_policy1[CriticalPriority + 1] = { 3519 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3520 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3521 THREAD_PRIORITY_LOWEST, // 2 3522 THREAD_PRIORITY_BELOW_NORMAL, // 3 3523 THREAD_PRIORITY_BELOW_NORMAL, // 4 3524 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3525 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3526 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3527 THREAD_PRIORITY_HIGHEST, // 8 3528 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3529 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3530 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3531 }; 3532 3533 static int prio_init() { 3534 // If ThreadPriorityPolicy is 1, switch tables 3535 if (ThreadPriorityPolicy == 1) { 3536 int i; 3537 for (i = 0; i < CriticalPriority + 1; i++) { 3538 os::java_to_os_priority[i] = prio_policy1[i]; 3539 } 3540 } 3541 if (UseCriticalJavaThreadPriority) { 3542 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3543 } 3544 return 0; 3545 } 3546 3547 OSReturn os::set_native_priority(Thread* thread, int priority) { 3548 if (!UseThreadPriorities) return OS_OK; 3549 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3550 return ret ? OS_OK : OS_ERR; 3551 } 3552 3553 OSReturn os::get_native_priority(const Thread* const thread, 3554 int* priority_ptr) { 3555 if (!UseThreadPriorities) { 3556 *priority_ptr = java_to_os_priority[NormPriority]; 3557 return OS_OK; 3558 } 3559 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3560 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3561 assert(false, "GetThreadPriority failed"); 3562 return OS_ERR; 3563 } 3564 *priority_ptr = os_prio; 3565 return OS_OK; 3566 } 3567 3568 3569 // Hint to the underlying OS that a task switch would not be good. 3570 // Void return because it's a hint and can fail. 3571 void os::hint_no_preempt() {} 3572 3573 void os::interrupt(Thread* thread) { 3574 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3575 3576 OSThread* osthread = thread->osthread(); 3577 osthread->set_interrupted(true); 3578 // More than one thread can get here with the same value of osthread, 3579 // resulting in multiple notifications. We do, however, want the store 3580 // to interrupted() to be visible to other threads before we post 3581 // the interrupt event. 3582 OrderAccess::release(); 3583 SetEvent(osthread->interrupt_event()); 3584 // For JSR166: unpark after setting status 3585 if (thread->is_Java_thread()) { 3586 ((JavaThread*)thread)->parker()->unpark(); 3587 } 3588 3589 ParkEvent * ev = thread->_ParkEvent; 3590 if (ev != NULL) ev->unpark(); 3591 } 3592 3593 3594 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3595 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3596 3597 OSThread* osthread = thread->osthread(); 3598 // There is no synchronization between the setting of the interrupt 3599 // and it being cleared here. It is critical - see 6535709 - that 3600 // we only clear the interrupt state, and reset the interrupt event, 3601 // if we are going to report that we were indeed interrupted - else 3602 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3603 // depending on the timing. By checking thread interrupt event to see 3604 // if the thread gets real interrupt thus prevent spurious wakeup. 3605 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3606 if (interrupted && clear_interrupted) { 3607 osthread->set_interrupted(false); 3608 ResetEvent(osthread->interrupt_event()); 3609 } // Otherwise leave the interrupted state alone 3610 3611 return interrupted; 3612 } 3613 3614 // GetCurrentThreadId() returns DWORD 3615 intx os::current_thread_id() { return GetCurrentThreadId(); } 3616 3617 static int _initial_pid = 0; 3618 3619 int os::current_process_id() { 3620 return (_initial_pid ? _initial_pid : _getpid()); 3621 } 3622 3623 int os::win32::_vm_page_size = 0; 3624 int os::win32::_vm_allocation_granularity = 0; 3625 int os::win32::_processor_type = 0; 3626 // Processor level is not available on non-NT systems, use vm_version instead 3627 int os::win32::_processor_level = 0; 3628 julong os::win32::_physical_memory = 0; 3629 size_t os::win32::_default_stack_size = 0; 3630 3631 intx os::win32::_os_thread_limit = 0; 3632 volatile intx os::win32::_os_thread_count = 0; 3633 3634 bool os::win32::_is_windows_server = false; 3635 3636 // 6573254 3637 // Currently, the bug is observed across all the supported Windows releases, 3638 // including the latest one (as of this writing - Windows Server 2012 R2) 3639 bool os::win32::_has_exit_bug = true; 3640 3641 void os::win32::initialize_system_info() { 3642 SYSTEM_INFO si; 3643 GetSystemInfo(&si); 3644 _vm_page_size = si.dwPageSize; 3645 _vm_allocation_granularity = si.dwAllocationGranularity; 3646 _processor_type = si.dwProcessorType; 3647 _processor_level = si.wProcessorLevel; 3648 set_processor_count(si.dwNumberOfProcessors); 3649 3650 MEMORYSTATUSEX ms; 3651 ms.dwLength = sizeof(ms); 3652 3653 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3654 // dwMemoryLoad (% of memory in use) 3655 GlobalMemoryStatusEx(&ms); 3656 _physical_memory = ms.ullTotalPhys; 3657 3658 if (FLAG_IS_DEFAULT(MaxRAM)) { 3659 // Adjust MaxRAM according to the maximum virtual address space available. 3660 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3661 } 3662 3663 OSVERSIONINFOEX oi; 3664 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3665 GetVersionEx((OSVERSIONINFO*)&oi); 3666 switch (oi.dwPlatformId) { 3667 case VER_PLATFORM_WIN32_NT: 3668 { 3669 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3670 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3671 oi.wProductType == VER_NT_SERVER) { 3672 _is_windows_server = true; 3673 } 3674 } 3675 break; 3676 default: fatal("Unknown platform"); 3677 } 3678 3679 _default_stack_size = os::current_stack_size(); 3680 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3681 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3682 "stack size not a multiple of page size"); 3683 3684 initialize_performance_counter(); 3685 } 3686 3687 3688 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3689 int ebuflen) { 3690 char path[MAX_PATH]; 3691 DWORD size; 3692 DWORD pathLen = (DWORD)sizeof(path); 3693 HINSTANCE result = NULL; 3694 3695 // only allow library name without path component 3696 assert(strchr(name, '\\') == NULL, "path not allowed"); 3697 assert(strchr(name, ':') == NULL, "path not allowed"); 3698 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3699 jio_snprintf(ebuf, ebuflen, 3700 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3701 return NULL; 3702 } 3703 3704 // search system directory 3705 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3706 if (size >= pathLen) { 3707 return NULL; // truncated 3708 } 3709 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3710 return NULL; // truncated 3711 } 3712 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3713 return result; 3714 } 3715 } 3716 3717 // try Windows directory 3718 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3719 if (size >= pathLen) { 3720 return NULL; // truncated 3721 } 3722 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3723 return NULL; // truncated 3724 } 3725 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3726 return result; 3727 } 3728 } 3729 3730 jio_snprintf(ebuf, ebuflen, 3731 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3732 return NULL; 3733 } 3734 3735 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3736 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3737 3738 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3739 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3740 return TRUE; 3741 } 3742 3743 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3744 // Basic approach: 3745 // - Each exiting thread registers its intent to exit and then does so. 3746 // - A thread trying to terminate the process must wait for all 3747 // threads currently exiting to complete their exit. 3748 3749 if (os::win32::has_exit_bug()) { 3750 // The array holds handles of the threads that have started exiting by calling 3751 // _endthreadex(). 3752 // Should be large enough to avoid blocking the exiting thread due to lack of 3753 // a free slot. 3754 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3755 static int handle_count = 0; 3756 3757 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3758 static CRITICAL_SECTION crit_sect; 3759 static volatile DWORD process_exiting = 0; 3760 int i, j; 3761 DWORD res; 3762 HANDLE hproc, hthr; 3763 3764 // We only attempt to register threads until a process exiting 3765 // thread manages to set the process_exiting flag. Any threads 3766 // that come through here after the process_exiting flag is set 3767 // are unregistered and will be caught in the SuspendThread() 3768 // infinite loop below. 3769 bool registered = false; 3770 3771 // The first thread that reached this point, initializes the critical section. 3772 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3773 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3774 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3775 if (what != EPT_THREAD) { 3776 // Atomically set process_exiting before the critical section 3777 // to increase the visibility between racing threads. 3778 Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0); 3779 } 3780 EnterCriticalSection(&crit_sect); 3781 3782 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3783 // Remove from the array those handles of the threads that have completed exiting. 3784 for (i = 0, j = 0; i < handle_count; ++i) { 3785 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3786 if (res == WAIT_TIMEOUT) { 3787 handles[j++] = handles[i]; 3788 } else { 3789 if (res == WAIT_FAILED) { 3790 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3791 GetLastError(), __FILE__, __LINE__); 3792 } 3793 // Don't keep the handle, if we failed waiting for it. 3794 CloseHandle(handles[i]); 3795 } 3796 } 3797 3798 // If there's no free slot in the array of the kept handles, we'll have to 3799 // wait until at least one thread completes exiting. 3800 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3801 // Raise the priority of the oldest exiting thread to increase its chances 3802 // to complete sooner. 3803 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3804 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3805 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3806 i = (res - WAIT_OBJECT_0); 3807 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3808 for (; i < handle_count; ++i) { 3809 handles[i] = handles[i + 1]; 3810 } 3811 } else { 3812 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3813 (res == WAIT_FAILED ? "failed" : "timed out"), 3814 GetLastError(), __FILE__, __LINE__); 3815 // Don't keep handles, if we failed waiting for them. 3816 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3817 CloseHandle(handles[i]); 3818 } 3819 handle_count = 0; 3820 } 3821 } 3822 3823 // Store a duplicate of the current thread handle in the array of handles. 3824 hproc = GetCurrentProcess(); 3825 hthr = GetCurrentThread(); 3826 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3827 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3828 warning("DuplicateHandle failed (%u) in %s: %d\n", 3829 GetLastError(), __FILE__, __LINE__); 3830 3831 // We can't register this thread (no more handles) so this thread 3832 // may be racing with a thread that is calling exit(). If the thread 3833 // that is calling exit() has managed to set the process_exiting 3834 // flag, then this thread will be caught in the SuspendThread() 3835 // infinite loop below which closes that race. A small timing 3836 // window remains before the process_exiting flag is set, but it 3837 // is only exposed when we are out of handles. 3838 } else { 3839 ++handle_count; 3840 registered = true; 3841 3842 // The current exiting thread has stored its handle in the array, and now 3843 // should leave the critical section before calling _endthreadex(). 3844 } 3845 3846 } else if (what != EPT_THREAD && handle_count > 0) { 3847 jlong start_time, finish_time, timeout_left; 3848 // Before ending the process, make sure all the threads that had called 3849 // _endthreadex() completed. 3850 3851 // Set the priority level of the current thread to the same value as 3852 // the priority level of exiting threads. 3853 // This is to ensure it will be given a fair chance to execute if 3854 // the timeout expires. 3855 hthr = GetCurrentThread(); 3856 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3857 start_time = os::javaTimeNanos(); 3858 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3859 for (i = 0; ; ) { 3860 int portion_count = handle_count - i; 3861 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3862 portion_count = MAXIMUM_WAIT_OBJECTS; 3863 } 3864 for (j = 0; j < portion_count; ++j) { 3865 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3866 } 3867 timeout_left = (finish_time - start_time) / 1000000L; 3868 if (timeout_left < 0) { 3869 timeout_left = 0; 3870 } 3871 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3872 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3873 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3874 (res == WAIT_FAILED ? "failed" : "timed out"), 3875 GetLastError(), __FILE__, __LINE__); 3876 // Reset portion_count so we close the remaining 3877 // handles due to this error. 3878 portion_count = handle_count - i; 3879 } 3880 for (j = 0; j < portion_count; ++j) { 3881 CloseHandle(handles[i + j]); 3882 } 3883 if ((i += portion_count) >= handle_count) { 3884 break; 3885 } 3886 start_time = os::javaTimeNanos(); 3887 } 3888 handle_count = 0; 3889 } 3890 3891 LeaveCriticalSection(&crit_sect); 3892 } 3893 3894 if (!registered && 3895 OrderAccess::load_acquire(&process_exiting) != 0 && 3896 process_exiting != GetCurrentThreadId()) { 3897 // Some other thread is about to call exit(), so we don't let 3898 // the current unregistered thread proceed to exit() or _endthreadex() 3899 while (true) { 3900 SuspendThread(GetCurrentThread()); 3901 // Avoid busy-wait loop, if SuspendThread() failed. 3902 Sleep(EXIT_TIMEOUT); 3903 } 3904 } 3905 } 3906 3907 // We are here if either 3908 // - there's no 'race at exit' bug on this OS release; 3909 // - initialization of the critical section failed (unlikely); 3910 // - the current thread has registered itself and left the critical section; 3911 // - the process-exiting thread has raised the flag and left the critical section. 3912 if (what == EPT_THREAD) { 3913 _endthreadex((unsigned)exit_code); 3914 } else if (what == EPT_PROCESS) { 3915 ::exit(exit_code); 3916 } else { 3917 _exit(exit_code); 3918 } 3919 3920 // Should not reach here 3921 return exit_code; 3922 } 3923 3924 #undef EXIT_TIMEOUT 3925 3926 void os::win32::setmode_streams() { 3927 _setmode(_fileno(stdin), _O_BINARY); 3928 _setmode(_fileno(stdout), _O_BINARY); 3929 _setmode(_fileno(stderr), _O_BINARY); 3930 } 3931 3932 3933 bool os::is_debugger_attached() { 3934 return IsDebuggerPresent() ? true : false; 3935 } 3936 3937 3938 void os::wait_for_keypress_at_exit(void) { 3939 if (PauseAtExit) { 3940 fprintf(stderr, "Press any key to continue...\n"); 3941 fgetc(stdin); 3942 } 3943 } 3944 3945 3946 bool os::message_box(const char* title, const char* message) { 3947 int result = MessageBox(NULL, message, title, 3948 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3949 return result == IDYES; 3950 } 3951 3952 #ifndef PRODUCT 3953 #ifndef _WIN64 3954 // Helpers to check whether NX protection is enabled 3955 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3956 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3957 pex->ExceptionRecord->NumberParameters > 0 && 3958 pex->ExceptionRecord->ExceptionInformation[0] == 3959 EXCEPTION_INFO_EXEC_VIOLATION) { 3960 return EXCEPTION_EXECUTE_HANDLER; 3961 } 3962 return EXCEPTION_CONTINUE_SEARCH; 3963 } 3964 3965 void nx_check_protection() { 3966 // If NX is enabled we'll get an exception calling into code on the stack 3967 char code[] = { (char)0xC3 }; // ret 3968 void *code_ptr = (void *)code; 3969 __try { 3970 __asm call code_ptr 3971 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3972 tty->print_raw_cr("NX protection detected."); 3973 } 3974 } 3975 #endif // _WIN64 3976 #endif // PRODUCT 3977 3978 // This is called _before_ the global arguments have been parsed 3979 void os::init(void) { 3980 _initial_pid = _getpid(); 3981 3982 init_random(1234567); 3983 3984 win32::initialize_system_info(); 3985 win32::setmode_streams(); 3986 init_page_sizes((size_t) win32::vm_page_size()); 3987 3988 // This may be overridden later when argument processing is done. 3989 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3990 3991 // Initialize main_process and main_thread 3992 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3993 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3994 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3995 fatal("DuplicateHandle failed\n"); 3996 } 3997 main_thread_id = (int) GetCurrentThreadId(); 3998 3999 // initialize fast thread access - only used for 32-bit 4000 win32::initialize_thread_ptr_offset(); 4001 } 4002 4003 // To install functions for atexit processing 4004 extern "C" { 4005 static void perfMemory_exit_helper() { 4006 perfMemory_exit(); 4007 } 4008 } 4009 4010 static jint initSock(); 4011 4012 // this is called _after_ the global arguments have been parsed 4013 jint os::init_2(void) { 4014 // Setup Windows Exceptions 4015 4016 // for debugging float code generation bugs 4017 if (ForceFloatExceptions) { 4018 #ifndef _WIN64 4019 static long fp_control_word = 0; 4020 __asm { fstcw fp_control_word } 4021 // see Intel PPro Manual, Vol. 2, p 7-16 4022 const long precision = 0x20; 4023 const long underflow = 0x10; 4024 const long overflow = 0x08; 4025 const long zero_div = 0x04; 4026 const long denorm = 0x02; 4027 const long invalid = 0x01; 4028 fp_control_word |= invalid; 4029 __asm { fldcw fp_control_word } 4030 #endif 4031 } 4032 4033 // If stack_commit_size is 0, windows will reserve the default size, 4034 // but only commit a small portion of it. 4035 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 4036 size_t default_reserve_size = os::win32::default_stack_size(); 4037 size_t actual_reserve_size = stack_commit_size; 4038 if (stack_commit_size < default_reserve_size) { 4039 // If stack_commit_size == 0, we want this too 4040 actual_reserve_size = default_reserve_size; 4041 } 4042 4043 // Check minimum allowable stack size for thread creation and to initialize 4044 // the java system classes, including StackOverflowError - depends on page 4045 // size. Add two 4K pages for compiler2 recursion in main thread. 4046 // Add in 4*BytesPerWord 4K pages to account for VM stack during 4047 // class initialization depending on 32 or 64 bit VM. 4048 size_t min_stack_allowed = 4049 (size_t)(JavaThread::stack_guard_zone_size() + 4050 JavaThread::stack_shadow_zone_size() + 4051 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 4052 4053 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 4054 4055 if (actual_reserve_size < min_stack_allowed) { 4056 tty->print_cr("\nThe Java thread stack size specified is too small. " 4057 "Specify at least %dk", 4058 min_stack_allowed / K); 4059 return JNI_ERR; 4060 } 4061 4062 JavaThread::set_stack_size_at_create(stack_commit_size); 4063 4064 // Calculate theoretical max. size of Threads to guard gainst artifical 4065 // out-of-memory situations, where all available address-space has been 4066 // reserved by thread stacks. 4067 assert(actual_reserve_size != 0, "Must have a stack"); 4068 4069 // Calculate the thread limit when we should start doing Virtual Memory 4070 // banging. Currently when the threads will have used all but 200Mb of space. 4071 // 4072 // TODO: consider performing a similar calculation for commit size instead 4073 // as reserve size, since on a 64-bit platform we'll run into that more 4074 // often than running out of virtual memory space. We can use the 4075 // lower value of the two calculations as the os_thread_limit. 4076 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4077 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4078 4079 // at exit methods are called in the reverse order of their registration. 4080 // there is no limit to the number of functions registered. atexit does 4081 // not set errno. 4082 4083 if (PerfAllowAtExitRegistration) { 4084 // only register atexit functions if PerfAllowAtExitRegistration is set. 4085 // atexit functions can be delayed until process exit time, which 4086 // can be problematic for embedded VM situations. Embedded VMs should 4087 // call DestroyJavaVM() to assure that VM resources are released. 4088 4089 // note: perfMemory_exit_helper atexit function may be removed in 4090 // the future if the appropriate cleanup code can be added to the 4091 // VM_Exit VMOperation's doit method. 4092 if (atexit(perfMemory_exit_helper) != 0) { 4093 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4094 } 4095 } 4096 4097 #ifndef _WIN64 4098 // Print something if NX is enabled (win32 on AMD64) 4099 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4100 #endif 4101 4102 // initialize thread priority policy 4103 prio_init(); 4104 4105 if (UseNUMA && !ForceNUMA) { 4106 UseNUMA = false; // We don't fully support this yet 4107 } 4108 4109 if (UseNUMAInterleaving) { 4110 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4111 bool success = numa_interleaving_init(); 4112 if (!success) UseNUMAInterleaving = false; 4113 } 4114 4115 if (initSock() != JNI_OK) { 4116 return JNI_ERR; 4117 } 4118 4119 SymbolEngine::recalc_search_path(); 4120 4121 return JNI_OK; 4122 } 4123 4124 // Mark the polling page as unreadable 4125 void os::make_polling_page_unreadable(void) { 4126 DWORD old_status; 4127 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4128 PAGE_NOACCESS, &old_status)) { 4129 fatal("Could not disable polling page"); 4130 } 4131 } 4132 4133 // Mark the polling page as readable 4134 void os::make_polling_page_readable(void) { 4135 DWORD old_status; 4136 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4137 PAGE_READONLY, &old_status)) { 4138 fatal("Could not enable polling page"); 4139 } 4140 } 4141 4142 // combine the high and low DWORD into a ULONGLONG 4143 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) { 4144 ULONGLONG value = high_word; 4145 value <<= sizeof(high_word) * 8; 4146 value |= low_word; 4147 return value; 4148 } 4149 4150 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat 4151 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) { 4152 ::memset((void*)sbuf, 0, sizeof(struct stat)); 4153 sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow); 4154 sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime, 4155 file_data.ftLastWriteTime.dwLowDateTime); 4156 sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime, 4157 file_data.ftCreationTime.dwLowDateTime); 4158 sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime, 4159 file_data.ftLastAccessTime.dwLowDateTime); 4160 if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) { 4161 sbuf->st_mode |= S_IFDIR; 4162 } else { 4163 sbuf->st_mode |= S_IFREG; 4164 } 4165 } 4166 4167 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c 4168 // Creates an UNC path from a single byte path. Return buffer is 4169 // allocated in C heap and needs to be freed by the caller. 4170 // Returns NULL on error. 4171 static wchar_t* create_unc_path(const char* path, errno_t &err) { 4172 wchar_t* wpath = NULL; 4173 size_t converted_chars = 0; 4174 size_t path_len = strlen(path) + 1; // includes the terminating NULL 4175 if (path[0] == '\\' && path[1] == '\\') { 4176 if (path[2] == '?' && path[3] == '\\'){ 4177 // if it already has a \\?\ don't do the prefix 4178 wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal); 4179 if (wpath != NULL) { 4180 err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len); 4181 } else { 4182 err = ENOMEM; 4183 } 4184 } else { 4185 // only UNC pathname includes double slashes here 4186 wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal); 4187 if (wpath != NULL) { 4188 ::wcscpy(wpath, L"\\\\?\\UNC\0"); 4189 err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len); 4190 } else { 4191 err = ENOMEM; 4192 } 4193 } 4194 } else { 4195 wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal); 4196 if (wpath != NULL) { 4197 ::wcscpy(wpath, L"\\\\?\\\0"); 4198 err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len); 4199 } else { 4200 err = ENOMEM; 4201 } 4202 } 4203 return wpath; 4204 } 4205 4206 static void destroy_unc_path(wchar_t* wpath) { 4207 os::free(wpath); 4208 } 4209 4210 int os::stat(const char *path, struct stat *sbuf) { 4211 char* pathbuf = (char*)os::strdup(path, mtInternal); 4212 if (pathbuf == NULL) { 4213 errno = ENOMEM; 4214 return -1; 4215 } 4216 os::native_path(pathbuf); 4217 int ret; 4218 WIN32_FILE_ATTRIBUTE_DATA file_data; 4219 // Not using stat() to avoid the problem described in JDK-6539723 4220 if (strlen(path) < MAX_PATH) { 4221 BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data); 4222 if (!bret) { 4223 errno = ::GetLastError(); 4224 ret = -1; 4225 } 4226 else { 4227 file_attribute_data_to_stat(sbuf, file_data); 4228 ret = 0; 4229 } 4230 } else { 4231 errno_t err = ERROR_SUCCESS; 4232 wchar_t* wpath = create_unc_path(pathbuf, err); 4233 if (err != ERROR_SUCCESS) { 4234 if (wpath != NULL) { 4235 destroy_unc_path(wpath); 4236 } 4237 os::free(pathbuf); 4238 errno = err; 4239 return -1; 4240 } 4241 BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data); 4242 if (!bret) { 4243 errno = ::GetLastError(); 4244 ret = -1; 4245 } else { 4246 file_attribute_data_to_stat(sbuf, file_data); 4247 ret = 0; 4248 } 4249 destroy_unc_path(wpath); 4250 } 4251 os::free(pathbuf); 4252 return ret; 4253 } 4254 4255 4256 #define FT2INT64(ft) \ 4257 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4258 4259 4260 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4261 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4262 // of a thread. 4263 // 4264 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4265 // the fast estimate available on the platform. 4266 4267 // current_thread_cpu_time() is not optimized for Windows yet 4268 jlong os::current_thread_cpu_time() { 4269 // return user + sys since the cost is the same 4270 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4271 } 4272 4273 jlong os::thread_cpu_time(Thread* thread) { 4274 // consistent with what current_thread_cpu_time() returns. 4275 return os::thread_cpu_time(thread, true /* user+sys */); 4276 } 4277 4278 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4279 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4280 } 4281 4282 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4283 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4284 // If this function changes, os::is_thread_cpu_time_supported() should too 4285 FILETIME CreationTime; 4286 FILETIME ExitTime; 4287 FILETIME KernelTime; 4288 FILETIME UserTime; 4289 4290 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4291 &ExitTime, &KernelTime, &UserTime) == 0) { 4292 return -1; 4293 } else if (user_sys_cpu_time) { 4294 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4295 } else { 4296 return FT2INT64(UserTime) * 100; 4297 } 4298 } 4299 4300 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4301 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4302 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4303 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4304 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4305 } 4306 4307 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4308 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4309 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4310 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4311 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4312 } 4313 4314 bool os::is_thread_cpu_time_supported() { 4315 // see os::thread_cpu_time 4316 FILETIME CreationTime; 4317 FILETIME ExitTime; 4318 FILETIME KernelTime; 4319 FILETIME UserTime; 4320 4321 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4322 &KernelTime, &UserTime) == 0) { 4323 return false; 4324 } else { 4325 return true; 4326 } 4327 } 4328 4329 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4330 // It does have primitives (PDH API) to get CPU usage and run queue length. 4331 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4332 // If we wanted to implement loadavg on Windows, we have a few options: 4333 // 4334 // a) Query CPU usage and run queue length and "fake" an answer by 4335 // returning the CPU usage if it's under 100%, and the run queue 4336 // length otherwise. It turns out that querying is pretty slow 4337 // on Windows, on the order of 200 microseconds on a fast machine. 4338 // Note that on the Windows the CPU usage value is the % usage 4339 // since the last time the API was called (and the first call 4340 // returns 100%), so we'd have to deal with that as well. 4341 // 4342 // b) Sample the "fake" answer using a sampling thread and store 4343 // the answer in a global variable. The call to loadavg would 4344 // just return the value of the global, avoiding the slow query. 4345 // 4346 // c) Sample a better answer using exponential decay to smooth the 4347 // value. This is basically the algorithm used by UNIX kernels. 4348 // 4349 // Note that sampling thread starvation could affect both (b) and (c). 4350 int os::loadavg(double loadavg[], int nelem) { 4351 return -1; 4352 } 4353 4354 4355 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4356 bool os::dont_yield() { 4357 return DontYieldALot; 4358 } 4359 4360 // This method is a slightly reworked copy of JDK's sysOpen 4361 // from src/windows/hpi/src/sys_api_md.c 4362 4363 int os::open(const char *path, int oflag, int mode) { 4364 char* pathbuf = (char*)os::strdup(path, mtInternal); 4365 if (pathbuf == NULL) { 4366 errno = ENOMEM; 4367 return -1; 4368 } 4369 os::native_path(pathbuf); 4370 int ret; 4371 if (strlen(path) < MAX_PATH) { 4372 ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4373 } else { 4374 errno_t err = ERROR_SUCCESS; 4375 wchar_t* wpath = create_unc_path(pathbuf, err); 4376 if (err != ERROR_SUCCESS) { 4377 if (wpath != NULL) { 4378 destroy_unc_path(wpath); 4379 } 4380 os::free(pathbuf); 4381 errno = err; 4382 return -1; 4383 } 4384 ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode); 4385 if (ret == -1) { 4386 errno = ::GetLastError(); 4387 } 4388 destroy_unc_path(wpath); 4389 } 4390 os::free(pathbuf); 4391 return ret; 4392 } 4393 4394 FILE* os::open(int fd, const char* mode) { 4395 return ::_fdopen(fd, mode); 4396 } 4397 4398 // Is a (classpath) directory empty? 4399 bool os::dir_is_empty(const char* path) { 4400 char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal); 4401 if (search_path == NULL) { 4402 errno = ENOMEM; 4403 return false; 4404 } 4405 strcpy(search_path, path); 4406 // Append "*", or possibly "\\*", to path 4407 if (path[1] == ':' && 4408 (path[2] == '\0' || 4409 (path[2] == '\\' && path[3] == '\0'))) { 4410 // No '\\' needed for cases like "Z:" or "Z:\" 4411 strcat(search_path, "*"); 4412 } 4413 else { 4414 strcat(search_path, "\\*"); 4415 } 4416 errno_t err = ERROR_SUCCESS; 4417 wchar_t* wpath = create_unc_path(search_path, err); 4418 if (err != ERROR_SUCCESS) { 4419 if (wpath != NULL) { 4420 destroy_unc_path(wpath); 4421 } 4422 os::free(search_path); 4423 errno = err; 4424 return false; 4425 } 4426 WIN32_FIND_DATAW fd; 4427 HANDLE f = ::FindFirstFileW(wpath, &fd); 4428 destroy_unc_path(wpath); 4429 bool is_empty = true; 4430 if (f != INVALID_HANDLE_VALUE) { 4431 while (is_empty && ::FindNextFileW(f, &fd)) { 4432 // An empty directory contains only the current directory file 4433 // and the previous directory file. 4434 if ((wcscmp(fd.cFileName, L".") != 0) && 4435 (wcscmp(fd.cFileName, L"..") != 0)) { 4436 is_empty = false; 4437 } 4438 } 4439 FindClose(f); 4440 } 4441 os::free(search_path); 4442 return is_empty; 4443 } 4444 4445 // create binary file, rewriting existing file if required 4446 int os::create_binary_file(const char* path, bool rewrite_existing) { 4447 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4448 if (!rewrite_existing) { 4449 oflags |= _O_EXCL; 4450 } 4451 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4452 } 4453 4454 // return current position of file pointer 4455 jlong os::current_file_offset(int fd) { 4456 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4457 } 4458 4459 // move file pointer to the specified offset 4460 jlong os::seek_to_file_offset(int fd, jlong offset) { 4461 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4462 } 4463 4464 4465 jlong os::lseek(int fd, jlong offset, int whence) { 4466 return (jlong) ::_lseeki64(fd, offset, whence); 4467 } 4468 4469 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4470 OVERLAPPED ov; 4471 DWORD nread; 4472 BOOL result; 4473 4474 ZeroMemory(&ov, sizeof(ov)); 4475 ov.Offset = (DWORD)offset; 4476 ov.OffsetHigh = (DWORD)(offset >> 32); 4477 4478 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4479 4480 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4481 4482 return result ? nread : 0; 4483 } 4484 4485 4486 // This method is a slightly reworked copy of JDK's sysNativePath 4487 // from src/windows/hpi/src/path_md.c 4488 4489 // Convert a pathname to native format. On win32, this involves forcing all 4490 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4491 // sometimes rejects '/') and removing redundant separators. The input path is 4492 // assumed to have been converted into the character encoding used by the local 4493 // system. Because this might be a double-byte encoding, care is taken to 4494 // treat double-byte lead characters correctly. 4495 // 4496 // This procedure modifies the given path in place, as the result is never 4497 // longer than the original. There is no error return; this operation always 4498 // succeeds. 4499 char * os::native_path(char *path) { 4500 char *src = path, *dst = path, *end = path; 4501 char *colon = NULL; // If a drive specifier is found, this will 4502 // point to the colon following the drive letter 4503 4504 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4505 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4506 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4507 4508 // Check for leading separators 4509 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4510 while (isfilesep(*src)) { 4511 src++; 4512 } 4513 4514 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4515 // Remove leading separators if followed by drive specifier. This 4516 // hack is necessary to support file URLs containing drive 4517 // specifiers (e.g., "file://c:/path"). As a side effect, 4518 // "/c:/path" can be used as an alternative to "c:/path". 4519 *dst++ = *src++; 4520 colon = dst; 4521 *dst++ = ':'; 4522 src++; 4523 } else { 4524 src = path; 4525 if (isfilesep(src[0]) && isfilesep(src[1])) { 4526 // UNC pathname: Retain first separator; leave src pointed at 4527 // second separator so that further separators will be collapsed 4528 // into the second separator. The result will be a pathname 4529 // beginning with "\\\\" followed (most likely) by a host name. 4530 src = dst = path + 1; 4531 path[0] = '\\'; // Force first separator to '\\' 4532 } 4533 } 4534 4535 end = dst; 4536 4537 // Remove redundant separators from remainder of path, forcing all 4538 // separators to be '\\' rather than '/'. Also, single byte space 4539 // characters are removed from the end of the path because those 4540 // are not legal ending characters on this operating system. 4541 // 4542 while (*src != '\0') { 4543 if (isfilesep(*src)) { 4544 *dst++ = '\\'; src++; 4545 while (isfilesep(*src)) src++; 4546 if (*src == '\0') { 4547 // Check for trailing separator 4548 end = dst; 4549 if (colon == dst - 2) break; // "z:\\" 4550 if (dst == path + 1) break; // "\\" 4551 if (dst == path + 2 && isfilesep(path[0])) { 4552 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4553 // beginning of a UNC pathname. Even though it is not, by 4554 // itself, a valid UNC pathname, we leave it as is in order 4555 // to be consistent with the path canonicalizer as well 4556 // as the win32 APIs, which treat this case as an invalid 4557 // UNC pathname rather than as an alias for the root 4558 // directory of the current drive. 4559 break; 4560 } 4561 end = --dst; // Path does not denote a root directory, so 4562 // remove trailing separator 4563 break; 4564 } 4565 end = dst; 4566 } else { 4567 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4568 *dst++ = *src++; 4569 if (*src) *dst++ = *src++; 4570 end = dst; 4571 } else { // Copy a single-byte character 4572 char c = *src++; 4573 *dst++ = c; 4574 // Space is not a legal ending character 4575 if (c != ' ') end = dst; 4576 } 4577 } 4578 } 4579 4580 *end = '\0'; 4581 4582 // For "z:", add "." to work around a bug in the C runtime library 4583 if (colon == dst - 1) { 4584 path[2] = '.'; 4585 path[3] = '\0'; 4586 } 4587 4588 return path; 4589 } 4590 4591 // This code is a copy of JDK's sysSetLength 4592 // from src/windows/hpi/src/sys_api_md.c 4593 4594 int os::ftruncate(int fd, jlong length) { 4595 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4596 long high = (long)(length >> 32); 4597 DWORD ret; 4598 4599 if (h == (HANDLE)(-1)) { 4600 return -1; 4601 } 4602 4603 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4604 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4605 return -1; 4606 } 4607 4608 if (::SetEndOfFile(h) == FALSE) { 4609 return -1; 4610 } 4611 4612 return 0; 4613 } 4614 4615 int os::get_fileno(FILE* fp) { 4616 return _fileno(fp); 4617 } 4618 4619 // This code is a copy of JDK's sysSync 4620 // from src/windows/hpi/src/sys_api_md.c 4621 // except for the legacy workaround for a bug in Win 98 4622 4623 int os::fsync(int fd) { 4624 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4625 4626 if ((!::FlushFileBuffers(handle)) && 4627 (GetLastError() != ERROR_ACCESS_DENIED)) { 4628 // from winerror.h 4629 return -1; 4630 } 4631 return 0; 4632 } 4633 4634 static int nonSeekAvailable(int, long *); 4635 static int stdinAvailable(int, long *); 4636 4637 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4638 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4639 4640 // This code is a copy of JDK's sysAvailable 4641 // from src/windows/hpi/src/sys_api_md.c 4642 4643 int os::available(int fd, jlong *bytes) { 4644 jlong cur, end; 4645 struct _stati64 stbuf64; 4646 4647 if (::_fstati64(fd, &stbuf64) >= 0) { 4648 int mode = stbuf64.st_mode; 4649 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4650 int ret; 4651 long lpbytes; 4652 if (fd == 0) { 4653 ret = stdinAvailable(fd, &lpbytes); 4654 } else { 4655 ret = nonSeekAvailable(fd, &lpbytes); 4656 } 4657 (*bytes) = (jlong)(lpbytes); 4658 return ret; 4659 } 4660 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4661 return FALSE; 4662 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4663 return FALSE; 4664 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4665 return FALSE; 4666 } 4667 *bytes = end - cur; 4668 return TRUE; 4669 } else { 4670 return FALSE; 4671 } 4672 } 4673 4674 void os::flockfile(FILE* fp) { 4675 _lock_file(fp); 4676 } 4677 4678 void os::funlockfile(FILE* fp) { 4679 _unlock_file(fp); 4680 } 4681 4682 // This code is a copy of JDK's nonSeekAvailable 4683 // from src/windows/hpi/src/sys_api_md.c 4684 4685 static int nonSeekAvailable(int fd, long *pbytes) { 4686 // This is used for available on non-seekable devices 4687 // (like both named and anonymous pipes, such as pipes 4688 // connected to an exec'd process). 4689 // Standard Input is a special case. 4690 HANDLE han; 4691 4692 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4693 return FALSE; 4694 } 4695 4696 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4697 // PeekNamedPipe fails when at EOF. In that case we 4698 // simply make *pbytes = 0 which is consistent with the 4699 // behavior we get on Solaris when an fd is at EOF. 4700 // The only alternative is to raise an Exception, 4701 // which isn't really warranted. 4702 // 4703 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4704 return FALSE; 4705 } 4706 *pbytes = 0; 4707 } 4708 return TRUE; 4709 } 4710 4711 #define MAX_INPUT_EVENTS 2000 4712 4713 // This code is a copy of JDK's stdinAvailable 4714 // from src/windows/hpi/src/sys_api_md.c 4715 4716 static int stdinAvailable(int fd, long *pbytes) { 4717 HANDLE han; 4718 DWORD numEventsRead = 0; // Number of events read from buffer 4719 DWORD numEvents = 0; // Number of events in buffer 4720 DWORD i = 0; // Loop index 4721 DWORD curLength = 0; // Position marker 4722 DWORD actualLength = 0; // Number of bytes readable 4723 BOOL error = FALSE; // Error holder 4724 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4725 4726 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4727 return FALSE; 4728 } 4729 4730 // Construct an array of input records in the console buffer 4731 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4732 if (error == 0) { 4733 return nonSeekAvailable(fd, pbytes); 4734 } 4735 4736 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4737 if (numEvents > MAX_INPUT_EVENTS) { 4738 numEvents = MAX_INPUT_EVENTS; 4739 } 4740 4741 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4742 if (lpBuffer == NULL) { 4743 return FALSE; 4744 } 4745 4746 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4747 if (error == 0) { 4748 os::free(lpBuffer); 4749 return FALSE; 4750 } 4751 4752 // Examine input records for the number of bytes available 4753 for (i=0; i<numEvents; i++) { 4754 if (lpBuffer[i].EventType == KEY_EVENT) { 4755 4756 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4757 &(lpBuffer[i].Event); 4758 if (keyRecord->bKeyDown == TRUE) { 4759 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4760 curLength++; 4761 if (*keyPressed == '\r') { 4762 actualLength = curLength; 4763 } 4764 } 4765 } 4766 } 4767 4768 if (lpBuffer != NULL) { 4769 os::free(lpBuffer); 4770 } 4771 4772 *pbytes = (long) actualLength; 4773 return TRUE; 4774 } 4775 4776 // Map a block of memory. 4777 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4778 char *addr, size_t bytes, bool read_only, 4779 bool allow_exec) { 4780 HANDLE hFile; 4781 char* base; 4782 4783 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4784 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4785 if (hFile == NULL) { 4786 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4787 return NULL; 4788 } 4789 4790 if (allow_exec) { 4791 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4792 // unless it comes from a PE image (which the shared archive is not.) 4793 // Even VirtualProtect refuses to give execute access to mapped memory 4794 // that was not previously executable. 4795 // 4796 // Instead, stick the executable region in anonymous memory. Yuck. 4797 // Penalty is that ~4 pages will not be shareable - in the future 4798 // we might consider DLLizing the shared archive with a proper PE 4799 // header so that mapping executable + sharing is possible. 4800 4801 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4802 PAGE_READWRITE); 4803 if (base == NULL) { 4804 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4805 CloseHandle(hFile); 4806 return NULL; 4807 } 4808 4809 DWORD bytes_read; 4810 OVERLAPPED overlapped; 4811 overlapped.Offset = (DWORD)file_offset; 4812 overlapped.OffsetHigh = 0; 4813 overlapped.hEvent = NULL; 4814 // ReadFile guarantees that if the return value is true, the requested 4815 // number of bytes were read before returning. 4816 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4817 if (!res) { 4818 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4819 release_memory(base, bytes); 4820 CloseHandle(hFile); 4821 return NULL; 4822 } 4823 } else { 4824 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4825 NULL /* file_name */); 4826 if (hMap == NULL) { 4827 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4828 CloseHandle(hFile); 4829 return NULL; 4830 } 4831 4832 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4833 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4834 (DWORD)bytes, addr); 4835 if (base == NULL) { 4836 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4837 CloseHandle(hMap); 4838 CloseHandle(hFile); 4839 return NULL; 4840 } 4841 4842 if (CloseHandle(hMap) == 0) { 4843 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4844 CloseHandle(hFile); 4845 return base; 4846 } 4847 } 4848 4849 if (allow_exec) { 4850 DWORD old_protect; 4851 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4852 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4853 4854 if (!res) { 4855 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4856 // Don't consider this a hard error, on IA32 even if the 4857 // VirtualProtect fails, we should still be able to execute 4858 CloseHandle(hFile); 4859 return base; 4860 } 4861 } 4862 4863 if (CloseHandle(hFile) == 0) { 4864 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4865 return base; 4866 } 4867 4868 return base; 4869 } 4870 4871 4872 // Remap a block of memory. 4873 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4874 char *addr, size_t bytes, bool read_only, 4875 bool allow_exec) { 4876 // This OS does not allow existing memory maps to be remapped so we 4877 // have to unmap the memory before we remap it. 4878 if (!os::unmap_memory(addr, bytes)) { 4879 return NULL; 4880 } 4881 4882 // There is a very small theoretical window between the unmap_memory() 4883 // call above and the map_memory() call below where a thread in native 4884 // code may be able to access an address that is no longer mapped. 4885 4886 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4887 read_only, allow_exec); 4888 } 4889 4890 4891 // Unmap a block of memory. 4892 // Returns true=success, otherwise false. 4893 4894 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4895 MEMORY_BASIC_INFORMATION mem_info; 4896 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4897 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4898 return false; 4899 } 4900 4901 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4902 // Instead, executable region was allocated using VirtualAlloc(). See 4903 // pd_map_memory() above. 4904 // 4905 // The following flags should match the 'exec_access' flages used for 4906 // VirtualProtect() in pd_map_memory(). 4907 if (mem_info.Protect == PAGE_EXECUTE_READ || 4908 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4909 return pd_release_memory(addr, bytes); 4910 } 4911 4912 BOOL result = UnmapViewOfFile(addr); 4913 if (result == 0) { 4914 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4915 return false; 4916 } 4917 return true; 4918 } 4919 4920 void os::pause() { 4921 char filename[MAX_PATH]; 4922 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4923 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4924 } else { 4925 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4926 } 4927 4928 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4929 if (fd != -1) { 4930 struct stat buf; 4931 ::close(fd); 4932 while (::stat(filename, &buf) == 0) { 4933 Sleep(100); 4934 } 4935 } else { 4936 jio_fprintf(stderr, 4937 "Could not open pause file '%s', continuing immediately.\n", filename); 4938 } 4939 } 4940 4941 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4942 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4943 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4944 4945 os::ThreadCrashProtection::ThreadCrashProtection() { 4946 } 4947 4948 // See the caveats for this class in os_windows.hpp 4949 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4950 // into this method and returns false. If no OS EXCEPTION was raised, returns 4951 // true. 4952 // The callback is supposed to provide the method that should be protected. 4953 // 4954 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4955 4956 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4957 4958 _protected_thread = Thread::current_or_null(); 4959 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 4960 4961 bool success = true; 4962 __try { 4963 _crash_protection = this; 4964 cb.call(); 4965 } __except(EXCEPTION_EXECUTE_HANDLER) { 4966 // only for protection, nothing to do 4967 success = false; 4968 } 4969 _crash_protection = NULL; 4970 _protected_thread = NULL; 4971 Thread::muxRelease(&_crash_mux); 4972 return success; 4973 } 4974 4975 // An Event wraps a win32 "CreateEvent" kernel handle. 4976 // 4977 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4978 // 4979 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4980 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4981 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4982 // In addition, an unpark() operation might fetch the handle field, but the 4983 // event could recycle between the fetch and the SetEvent() operation. 4984 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4985 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4986 // on an stale but recycled handle would be harmless, but in practice this might 4987 // confuse other non-Sun code, so it's not a viable approach. 4988 // 4989 // 2: Once a win32 event handle is associated with an Event, it remains associated 4990 // with the Event. The event handle is never closed. This could be construed 4991 // as handle leakage, but only up to the maximum # of threads that have been extant 4992 // at any one time. This shouldn't be an issue, as windows platforms typically 4993 // permit a process to have hundreds of thousands of open handles. 4994 // 4995 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4996 // and release unused handles. 4997 // 4998 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4999 // It's not clear, however, that we wouldn't be trading one type of leak for another. 5000 // 5001 // 5. Use an RCU-like mechanism (Read-Copy Update). 5002 // Or perhaps something similar to Maged Michael's "Hazard pointers". 5003 // 5004 // We use (2). 5005 // 5006 // TODO-FIXME: 5007 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 5008 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 5009 // to recover from (or at least detect) the dreaded Windows 841176 bug. 5010 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 5011 // into a single win32 CreateEvent() handle. 5012 // 5013 // Assumption: 5014 // Only one parker can exist on an event, which is why we allocate 5015 // them per-thread. Multiple unparkers can coexist. 5016 // 5017 // _Event transitions in park() 5018 // -1 => -1 : illegal 5019 // 1 => 0 : pass - return immediately 5020 // 0 => -1 : block; then set _Event to 0 before returning 5021 // 5022 // _Event transitions in unpark() 5023 // 0 => 1 : just return 5024 // 1 => 1 : just return 5025 // -1 => either 0 or 1; must signal target thread 5026 // That is, we can safely transition _Event from -1 to either 5027 // 0 or 1. 5028 // 5029 // _Event serves as a restricted-range semaphore. 5030 // -1 : thread is blocked, i.e. there is a waiter 5031 // 0 : neutral: thread is running or ready, 5032 // could have been signaled after a wait started 5033 // 1 : signaled - thread is running or ready 5034 // 5035 // Another possible encoding of _Event would be with 5036 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5037 // 5038 5039 int os::PlatformEvent::park(jlong Millis) { 5040 // Transitions for _Event: 5041 // -1 => -1 : illegal 5042 // 1 => 0 : pass - return immediately 5043 // 0 => -1 : block; then set _Event to 0 before returning 5044 5045 guarantee(_ParkHandle != NULL , "Invariant"); 5046 guarantee(Millis > 0 , "Invariant"); 5047 5048 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 5049 // the initial park() operation. 5050 // Consider: use atomic decrement instead of CAS-loop 5051 5052 int v; 5053 for (;;) { 5054 v = _Event; 5055 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5056 } 5057 guarantee((v == 0) || (v == 1), "invariant"); 5058 if (v != 0) return OS_OK; 5059 5060 // Do this the hard way by blocking ... 5061 // TODO: consider a brief spin here, gated on the success of recent 5062 // spin attempts by this thread. 5063 // 5064 // We decompose long timeouts into series of shorter timed waits. 5065 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5066 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5067 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5068 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5069 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5070 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5071 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5072 // for the already waited time. This policy does not admit any new outcomes. 5073 // In the future, however, we might want to track the accumulated wait time and 5074 // adjust Millis accordingly if we encounter a spurious wakeup. 5075 5076 const int MAXTIMEOUT = 0x10000000; 5077 DWORD rv = WAIT_TIMEOUT; 5078 while (_Event < 0 && Millis > 0) { 5079 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5080 if (Millis > MAXTIMEOUT) { 5081 prd = MAXTIMEOUT; 5082 } 5083 rv = ::WaitForSingleObject(_ParkHandle, prd); 5084 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5085 if (rv == WAIT_TIMEOUT) { 5086 Millis -= prd; 5087 } 5088 } 5089 v = _Event; 5090 _Event = 0; 5091 // see comment at end of os::PlatformEvent::park() below: 5092 OrderAccess::fence(); 5093 // If we encounter a nearly simultanous timeout expiry and unpark() 5094 // we return OS_OK indicating we awoke via unpark(). 5095 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5096 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5097 } 5098 5099 void os::PlatformEvent::park() { 5100 // Transitions for _Event: 5101 // -1 => -1 : illegal 5102 // 1 => 0 : pass - return immediately 5103 // 0 => -1 : block; then set _Event to 0 before returning 5104 5105 guarantee(_ParkHandle != NULL, "Invariant"); 5106 // Invariant: Only the thread associated with the Event/PlatformEvent 5107 // may call park(). 5108 // Consider: use atomic decrement instead of CAS-loop 5109 int v; 5110 for (;;) { 5111 v = _Event; 5112 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5113 } 5114 guarantee((v == 0) || (v == 1), "invariant"); 5115 if (v != 0) return; 5116 5117 // Do this the hard way by blocking ... 5118 // TODO: consider a brief spin here, gated on the success of recent 5119 // spin attempts by this thread. 5120 while (_Event < 0) { 5121 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5122 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5123 } 5124 5125 // Usually we'll find _Event == 0 at this point, but as 5126 // an optional optimization we clear it, just in case can 5127 // multiple unpark() operations drove _Event up to 1. 5128 _Event = 0; 5129 OrderAccess::fence(); 5130 guarantee(_Event >= 0, "invariant"); 5131 } 5132 5133 void os::PlatformEvent::unpark() { 5134 guarantee(_ParkHandle != NULL, "Invariant"); 5135 5136 // Transitions for _Event: 5137 // 0 => 1 : just return 5138 // 1 => 1 : just return 5139 // -1 => either 0 or 1; must signal target thread 5140 // That is, we can safely transition _Event from -1 to either 5141 // 0 or 1. 5142 // See also: "Semaphores in Plan 9" by Mullender & Cox 5143 // 5144 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5145 // that it will take two back-to-back park() calls for the owning 5146 // thread to block. This has the benefit of forcing a spurious return 5147 // from the first park() call after an unpark() call which will help 5148 // shake out uses of park() and unpark() without condition variables. 5149 5150 if (Atomic::xchg(1, &_Event) >= 0) return; 5151 5152 ::SetEvent(_ParkHandle); 5153 } 5154 5155 5156 // JSR166 5157 // ------------------------------------------------------- 5158 5159 // The Windows implementation of Park is very straightforward: Basic 5160 // operations on Win32 Events turn out to have the right semantics to 5161 // use them directly. We opportunistically resuse the event inherited 5162 // from Monitor. 5163 5164 void Parker::park(bool isAbsolute, jlong time) { 5165 guarantee(_ParkEvent != NULL, "invariant"); 5166 // First, demultiplex/decode time arguments 5167 if (time < 0) { // don't wait 5168 return; 5169 } else if (time == 0 && !isAbsolute) { 5170 time = INFINITE; 5171 } else if (isAbsolute) { 5172 time -= os::javaTimeMillis(); // convert to relative time 5173 if (time <= 0) { // already elapsed 5174 return; 5175 } 5176 } else { // relative 5177 time /= 1000000; // Must coarsen from nanos to millis 5178 if (time == 0) { // Wait for the minimal time unit if zero 5179 time = 1; 5180 } 5181 } 5182 5183 JavaThread* thread = JavaThread::current(); 5184 5185 // Don't wait if interrupted or already triggered 5186 if (Thread::is_interrupted(thread, false) || 5187 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5188 ResetEvent(_ParkEvent); 5189 return; 5190 } else { 5191 ThreadBlockInVM tbivm(thread); 5192 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5193 thread->set_suspend_equivalent(); 5194 5195 WaitForSingleObject(_ParkEvent, time); 5196 ResetEvent(_ParkEvent); 5197 5198 // If externally suspended while waiting, re-suspend 5199 if (thread->handle_special_suspend_equivalent_condition()) { 5200 thread->java_suspend_self(); 5201 } 5202 } 5203 } 5204 5205 void Parker::unpark() { 5206 guarantee(_ParkEvent != NULL, "invariant"); 5207 SetEvent(_ParkEvent); 5208 } 5209 5210 // Run the specified command in a separate process. Return its exit value, 5211 // or -1 on failure (e.g. can't create a new process). 5212 int os::fork_and_exec(char* cmd) { 5213 STARTUPINFO si; 5214 PROCESS_INFORMATION pi; 5215 DWORD exit_code; 5216 5217 char * cmd_string; 5218 char * cmd_prefix = "cmd /C "; 5219 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5220 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5221 if (cmd_string == NULL) { 5222 return -1; 5223 } 5224 cmd_string[0] = '\0'; 5225 strcat(cmd_string, cmd_prefix); 5226 strcat(cmd_string, cmd); 5227 5228 // now replace all '\n' with '&' 5229 char * substring = cmd_string; 5230 while ((substring = strchr(substring, '\n')) != NULL) { 5231 substring[0] = '&'; 5232 substring++; 5233 } 5234 memset(&si, 0, sizeof(si)); 5235 si.cb = sizeof(si); 5236 memset(&pi, 0, sizeof(pi)); 5237 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5238 cmd_string, // command line 5239 NULL, // process security attribute 5240 NULL, // thread security attribute 5241 TRUE, // inherits system handles 5242 0, // no creation flags 5243 NULL, // use parent's environment block 5244 NULL, // use parent's starting directory 5245 &si, // (in) startup information 5246 &pi); // (out) process information 5247 5248 if (rslt) { 5249 // Wait until child process exits. 5250 WaitForSingleObject(pi.hProcess, INFINITE); 5251 5252 GetExitCodeProcess(pi.hProcess, &exit_code); 5253 5254 // Close process and thread handles. 5255 CloseHandle(pi.hProcess); 5256 CloseHandle(pi.hThread); 5257 } else { 5258 exit_code = -1; 5259 } 5260 5261 FREE_C_HEAP_ARRAY(char, cmd_string); 5262 return (int)exit_code; 5263 } 5264 5265 bool os::find(address addr, outputStream* st) { 5266 int offset = -1; 5267 bool result = false; 5268 char buf[256]; 5269 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5270 st->print(PTR_FORMAT " ", addr); 5271 if (strlen(buf) < sizeof(buf) - 1) { 5272 char* p = strrchr(buf, '\\'); 5273 if (p) { 5274 st->print("%s", p + 1); 5275 } else { 5276 st->print("%s", buf); 5277 } 5278 } else { 5279 // The library name is probably truncated. Let's omit the library name. 5280 // See also JDK-8147512. 5281 } 5282 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5283 st->print("::%s + 0x%x", buf, offset); 5284 } 5285 st->cr(); 5286 result = true; 5287 } 5288 return result; 5289 } 5290 5291 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5292 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5293 5294 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5295 JavaThread* thread = JavaThread::current(); 5296 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5297 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5298 5299 if (os::is_memory_serialize_page(thread, addr)) { 5300 return EXCEPTION_CONTINUE_EXECUTION; 5301 } 5302 } 5303 5304 return EXCEPTION_CONTINUE_SEARCH; 5305 } 5306 5307 static jint initSock() { 5308 WSADATA wsadata; 5309 5310 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5311 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5312 ::GetLastError()); 5313 return JNI_ERR; 5314 } 5315 return JNI_OK; 5316 } 5317 5318 struct hostent* os::get_host_by_name(char* name) { 5319 return (struct hostent*)gethostbyname(name); 5320 } 5321 5322 int os::socket_close(int fd) { 5323 return ::closesocket(fd); 5324 } 5325 5326 int os::socket(int domain, int type, int protocol) { 5327 return ::socket(domain, type, protocol); 5328 } 5329 5330 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5331 return ::connect(fd, him, len); 5332 } 5333 5334 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5335 return ::recv(fd, buf, (int)nBytes, flags); 5336 } 5337 5338 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5339 return ::send(fd, buf, (int)nBytes, flags); 5340 } 5341 5342 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5343 return ::send(fd, buf, (int)nBytes, flags); 5344 } 5345 5346 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5347 #if defined(IA32) 5348 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5349 #elif defined (AMD64) 5350 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5351 #endif 5352 5353 // returns true if thread could be suspended, 5354 // false otherwise 5355 static bool do_suspend(HANDLE* h) { 5356 if (h != NULL) { 5357 if (SuspendThread(*h) != ~0) { 5358 return true; 5359 } 5360 } 5361 return false; 5362 } 5363 5364 // resume the thread 5365 // calling resume on an active thread is a no-op 5366 static void do_resume(HANDLE* h) { 5367 if (h != NULL) { 5368 ResumeThread(*h); 5369 } 5370 } 5371 5372 // retrieve a suspend/resume context capable handle 5373 // from the tid. Caller validates handle return value. 5374 void get_thread_handle_for_extended_context(HANDLE* h, 5375 OSThread::thread_id_t tid) { 5376 if (h != NULL) { 5377 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5378 } 5379 } 5380 5381 // Thread sampling implementation 5382 // 5383 void os::SuspendedThreadTask::internal_do_task() { 5384 CONTEXT ctxt; 5385 HANDLE h = NULL; 5386 5387 // get context capable handle for thread 5388 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5389 5390 // sanity 5391 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5392 return; 5393 } 5394 5395 // suspend the thread 5396 if (do_suspend(&h)) { 5397 ctxt.ContextFlags = sampling_context_flags; 5398 // get thread context 5399 GetThreadContext(h, &ctxt); 5400 SuspendedThreadTaskContext context(_thread, &ctxt); 5401 // pass context to Thread Sampling impl 5402 do_task(context); 5403 // resume thread 5404 do_resume(&h); 5405 } 5406 5407 // close handle 5408 CloseHandle(h); 5409 } 5410 5411 bool os::start_debugging(char *buf, int buflen) { 5412 int len = (int)strlen(buf); 5413 char *p = &buf[len]; 5414 5415 jio_snprintf(p, buflen-len, 5416 "\n\n" 5417 "Do you want to debug the problem?\n\n" 5418 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5419 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5420 "Otherwise, select 'No' to abort...", 5421 os::current_process_id(), os::current_thread_id()); 5422 5423 bool yes = os::message_box("Unexpected Error", buf); 5424 5425 if (yes) { 5426 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5427 // exception. If VM is running inside a debugger, the debugger will 5428 // catch the exception. Otherwise, the breakpoint exception will reach 5429 // the default windows exception handler, which can spawn a debugger and 5430 // automatically attach to the dying VM. 5431 os::breakpoint(); 5432 yes = false; 5433 } 5434 return yes; 5435 } 5436 5437 void* os::get_default_process_handle() { 5438 return (void*)GetModuleHandle(NULL); 5439 } 5440 5441 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5442 // which is used to find statically linked in agents. 5443 // Additionally for windows, takes into account __stdcall names. 5444 // Parameters: 5445 // sym_name: Symbol in library we are looking for 5446 // lib_name: Name of library to look in, NULL for shared libs. 5447 // is_absolute_path == true if lib_name is absolute path to agent 5448 // such as "C:/a/b/L.dll" 5449 // == false if only the base name of the library is passed in 5450 // such as "L" 5451 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5452 bool is_absolute_path) { 5453 char *agent_entry_name; 5454 size_t len; 5455 size_t name_len; 5456 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5457 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5458 const char *start; 5459 5460 if (lib_name != NULL) { 5461 len = name_len = strlen(lib_name); 5462 if (is_absolute_path) { 5463 // Need to strip path, prefix and suffix 5464 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5465 lib_name = ++start; 5466 } else { 5467 // Need to check for drive prefix 5468 if ((start = strchr(lib_name, ':')) != NULL) { 5469 lib_name = ++start; 5470 } 5471 } 5472 if (len <= (prefix_len + suffix_len)) { 5473 return NULL; 5474 } 5475 lib_name += prefix_len; 5476 name_len = strlen(lib_name) - suffix_len; 5477 } 5478 } 5479 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5480 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5481 if (agent_entry_name == NULL) { 5482 return NULL; 5483 } 5484 if (lib_name != NULL) { 5485 const char *p = strrchr(sym_name, '@'); 5486 if (p != NULL && p != sym_name) { 5487 // sym_name == _Agent_OnLoad@XX 5488 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5489 agent_entry_name[(p-sym_name)] = '\0'; 5490 // agent_entry_name == _Agent_OnLoad 5491 strcat(agent_entry_name, "_"); 5492 strncat(agent_entry_name, lib_name, name_len); 5493 strcat(agent_entry_name, p); 5494 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5495 } else { 5496 strcpy(agent_entry_name, sym_name); 5497 strcat(agent_entry_name, "_"); 5498 strncat(agent_entry_name, lib_name, name_len); 5499 } 5500 } else { 5501 strcpy(agent_entry_name, sym_name); 5502 } 5503 return agent_entry_name; 5504 } 5505 5506 #ifndef PRODUCT 5507 5508 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5509 // contiguous memory block at a particular address. 5510 // The test first tries to find a good approximate address to allocate at by using the same 5511 // method to allocate some memory at any address. The test then tries to allocate memory in 5512 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5513 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5514 // the previously allocated memory is available for allocation. The only actual failure 5515 // that is reported is when the test tries to allocate at a particular location but gets a 5516 // different valid one. A NULL return value at this point is not considered an error but may 5517 // be legitimate. 5518 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5519 void TestReserveMemorySpecial_test() { 5520 if (!UseLargePages) { 5521 if (VerboseInternalVMTests) { 5522 tty->print("Skipping test because large pages are disabled"); 5523 } 5524 return; 5525 } 5526 // save current value of globals 5527 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5528 bool old_use_numa_interleaving = UseNUMAInterleaving; 5529 5530 // set globals to make sure we hit the correct code path 5531 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5532 5533 // do an allocation at an address selected by the OS to get a good one. 5534 const size_t large_allocation_size = os::large_page_size() * 4; 5535 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5536 if (result == NULL) { 5537 if (VerboseInternalVMTests) { 5538 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5539 large_allocation_size); 5540 } 5541 } else { 5542 os::release_memory_special(result, large_allocation_size); 5543 5544 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5545 // we managed to get it once. 5546 const size_t expected_allocation_size = os::large_page_size(); 5547 char* expected_location = result + os::large_page_size(); 5548 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5549 if (actual_location == NULL) { 5550 if (VerboseInternalVMTests) { 5551 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5552 expected_location, large_allocation_size); 5553 } 5554 } else { 5555 // release memory 5556 os::release_memory_special(actual_location, expected_allocation_size); 5557 // only now check, after releasing any memory to avoid any leaks. 5558 assert(actual_location == expected_location, 5559 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5560 expected_location, expected_allocation_size, actual_location); 5561 } 5562 } 5563 5564 // restore globals 5565 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5566 UseNUMAInterleaving = old_use_numa_interleaving; 5567 } 5568 #endif // PRODUCT 5569 5570 /* 5571 All the defined signal names for Windows. 5572 5573 NOTE that not all of these names are accepted by FindSignal! 5574 5575 For various reasons some of these may be rejected at runtime. 5576 5577 Here are the names currently accepted by a user of sun.misc.Signal with 5578 1.4.1 (ignoring potential interaction with use of chaining, etc): 5579 5580 (LIST TBD) 5581 5582 */ 5583 int os::get_signal_number(const char* name) { 5584 static const struct { 5585 char* name; 5586 int number; 5587 } siglabels [] = 5588 // derived from version 6.0 VC98/include/signal.h 5589 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5590 "FPE", SIGFPE, // floating point exception 5591 "SEGV", SIGSEGV, // segment violation 5592 "INT", SIGINT, // interrupt 5593 "TERM", SIGTERM, // software term signal from kill 5594 "BREAK", SIGBREAK, // Ctrl-Break sequence 5595 "ILL", SIGILL}; // illegal instruction 5596 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5597 if (strcmp(name, siglabels[i].name) == 0) { 5598 return siglabels[i].number; 5599 } 5600 } 5601 return -1; 5602 } 5603 5604 // Fast current thread access 5605 5606 int os::win32::_thread_ptr_offset = 0; 5607 5608 static void call_wrapper_dummy() {} 5609 5610 // We need to call the os_exception_wrapper once so that it sets 5611 // up the offset from FS of the thread pointer. 5612 void os::win32::initialize_thread_ptr_offset() { 5613 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5614 NULL, NULL, NULL, NULL); 5615 }