1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "semaphore_windows.hpp" 67 #include "services/attachListener.hpp" 68 #include "services/memTracker.hpp" 69 #include "services/runtimeService.hpp" 70 #include "utilities/decoder.hpp" 71 #include "utilities/defaultStream.hpp" 72 #include "utilities/events.hpp" 73 #include "utilities/growableArray.hpp" 74 #include "utilities/macros.hpp" 75 #include "utilities/vmError.hpp" 76 77 #ifdef _DEBUG 78 #include <crtdbg.h> 79 #endif 80 81 82 #include <windows.h> 83 #include <sys/types.h> 84 #include <sys/stat.h> 85 #include <sys/timeb.h> 86 #include <objidl.h> 87 #include <shlobj.h> 88 89 #include <malloc.h> 90 #include <signal.h> 91 #include <direct.h> 92 #include <errno.h> 93 #include <fcntl.h> 94 #include <io.h> 95 #include <process.h> // For _beginthreadex(), _endthreadex() 96 #include <imagehlp.h> // For os::dll_address_to_function_name 97 // for enumerating dll libraries 98 #include <vdmdbg.h> 99 100 // for timer info max values which include all bits 101 #define ALL_64_BITS CONST64(-1) 102 103 // For DLL loading/load error detection 104 // Values of PE COFF 105 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 106 #define IMAGE_FILE_SIGNATURE_LENGTH 4 107 108 static HANDLE main_process; 109 static HANDLE main_thread; 110 static int main_thread_id; 111 112 static FILETIME process_creation_time; 113 static FILETIME process_exit_time; 114 static FILETIME process_user_time; 115 static FILETIME process_kernel_time; 116 117 #ifdef _M_AMD64 118 #define __CPU__ amd64 119 #else 120 #define __CPU__ i486 121 #endif 122 123 // save DLL module handle, used by GetModuleFileName 124 125 HINSTANCE vm_lib_handle; 126 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 128 switch (reason) { 129 case DLL_PROCESS_ATTACH: 130 vm_lib_handle = hinst; 131 if (ForceTimeHighResolution) { 132 timeBeginPeriod(1L); 133 } 134 break; 135 case DLL_PROCESS_DETACH: 136 if (ForceTimeHighResolution) { 137 timeEndPeriod(1L); 138 } 139 break; 140 default: 141 break; 142 } 143 return true; 144 } 145 146 static inline double fileTimeAsDouble(FILETIME* time) { 147 const double high = (double) ((unsigned int) ~0); 148 const double split = 10000000.0; 149 double result = (time->dwLowDateTime / split) + 150 time->dwHighDateTime * (high/split); 151 return result; 152 } 153 154 // Implementation of os 155 156 bool os::unsetenv(const char* name) { 157 assert(name != NULL, "Null pointer"); 158 return (SetEnvironmentVariable(name, NULL) == TRUE); 159 } 160 161 // No setuid programs under Windows. 162 bool os::have_special_privileges() { 163 return false; 164 } 165 166 167 // This method is a periodic task to check for misbehaving JNI applications 168 // under CheckJNI, we can add any periodic checks here. 169 // For Windows at the moment does nothing 170 void os::run_periodic_checks() { 171 return; 172 } 173 174 // previous UnhandledExceptionFilter, if there is one 175 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 176 177 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 178 179 void os::init_system_properties_values() { 180 // sysclasspath, java_home, dll_dir 181 { 182 char *home_path; 183 char *dll_path; 184 char *pslash; 185 char *bin = "\\bin"; 186 char home_dir[MAX_PATH + 1]; 187 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 188 189 if (alt_home_dir != NULL) { 190 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 191 home_dir[MAX_PATH] = '\0'; 192 } else { 193 os::jvm_path(home_dir, sizeof(home_dir)); 194 // Found the full path to jvm.dll. 195 // Now cut the path to <java_home>/jre if we can. 196 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 197 pslash = strrchr(home_dir, '\\'); 198 if (pslash != NULL) { 199 *pslash = '\0'; // get rid of \{client|server} 200 pslash = strrchr(home_dir, '\\'); 201 if (pslash != NULL) { 202 *pslash = '\0'; // get rid of \bin 203 } 204 } 205 } 206 207 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 208 if (home_path == NULL) { 209 return; 210 } 211 strcpy(home_path, home_dir); 212 Arguments::set_java_home(home_path); 213 FREE_C_HEAP_ARRAY(char, home_path); 214 215 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 216 mtInternal); 217 if (dll_path == NULL) { 218 return; 219 } 220 strcpy(dll_path, home_dir); 221 strcat(dll_path, bin); 222 Arguments::set_dll_dir(dll_path); 223 FREE_C_HEAP_ARRAY(char, dll_path); 224 225 if (!set_boot_path('\\', ';')) { 226 return; 227 } 228 } 229 230 // library_path 231 #define EXT_DIR "\\lib\\ext" 232 #define BIN_DIR "\\bin" 233 #define PACKAGE_DIR "\\Sun\\Java" 234 { 235 // Win32 library search order (See the documentation for LoadLibrary): 236 // 237 // 1. The directory from which application is loaded. 238 // 2. The system wide Java Extensions directory (Java only) 239 // 3. System directory (GetSystemDirectory) 240 // 4. Windows directory (GetWindowsDirectory) 241 // 5. The PATH environment variable 242 // 6. The current directory 243 244 char *library_path; 245 char tmp[MAX_PATH]; 246 char *path_str = ::getenv("PATH"); 247 248 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 249 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 250 251 library_path[0] = '\0'; 252 253 GetModuleFileName(NULL, tmp, sizeof(tmp)); 254 *(strrchr(tmp, '\\')) = '\0'; 255 strcat(library_path, tmp); 256 257 GetWindowsDirectory(tmp, sizeof(tmp)); 258 strcat(library_path, ";"); 259 strcat(library_path, tmp); 260 strcat(library_path, PACKAGE_DIR BIN_DIR); 261 262 GetSystemDirectory(tmp, sizeof(tmp)); 263 strcat(library_path, ";"); 264 strcat(library_path, tmp); 265 266 GetWindowsDirectory(tmp, sizeof(tmp)); 267 strcat(library_path, ";"); 268 strcat(library_path, tmp); 269 270 if (path_str) { 271 strcat(library_path, ";"); 272 strcat(library_path, path_str); 273 } 274 275 strcat(library_path, ";."); 276 277 Arguments::set_library_path(library_path); 278 FREE_C_HEAP_ARRAY(char, library_path); 279 } 280 281 // Default extensions directory 282 { 283 char path[MAX_PATH]; 284 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 285 GetWindowsDirectory(path, MAX_PATH); 286 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 287 path, PACKAGE_DIR, EXT_DIR); 288 Arguments::set_ext_dirs(buf); 289 } 290 #undef EXT_DIR 291 #undef BIN_DIR 292 #undef PACKAGE_DIR 293 294 #ifndef _WIN64 295 // set our UnhandledExceptionFilter and save any previous one 296 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 297 #endif 298 299 // Done 300 return; 301 } 302 303 void os::breakpoint() { 304 DebugBreak(); 305 } 306 307 // Invoked from the BREAKPOINT Macro 308 extern "C" void breakpoint() { 309 os::breakpoint(); 310 } 311 312 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 313 // So far, this method is only used by Native Memory Tracking, which is 314 // only supported on Windows XP or later. 315 // 316 int os::get_native_stack(address* stack, int frames, int toSkip) { 317 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 318 for (int index = captured; index < frames; index ++) { 319 stack[index] = NULL; 320 } 321 return captured; 322 } 323 324 325 // os::current_stack_base() 326 // 327 // Returns the base of the stack, which is the stack's 328 // starting address. This function must be called 329 // while running on the stack of the thread being queried. 330 331 address os::current_stack_base() { 332 MEMORY_BASIC_INFORMATION minfo; 333 address stack_bottom; 334 size_t stack_size; 335 336 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 337 stack_bottom = (address)minfo.AllocationBase; 338 stack_size = minfo.RegionSize; 339 340 // Add up the sizes of all the regions with the same 341 // AllocationBase. 342 while (1) { 343 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 344 if (stack_bottom == (address)minfo.AllocationBase) { 345 stack_size += minfo.RegionSize; 346 } else { 347 break; 348 } 349 } 350 return stack_bottom + stack_size; 351 } 352 353 size_t os::current_stack_size() { 354 size_t sz; 355 MEMORY_BASIC_INFORMATION minfo; 356 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 357 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 358 return sz; 359 } 360 361 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 362 const struct tm* time_struct_ptr = localtime(clock); 363 if (time_struct_ptr != NULL) { 364 *res = *time_struct_ptr; 365 return res; 366 } 367 return NULL; 368 } 369 370 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 371 const struct tm* time_struct_ptr = gmtime(clock); 372 if (time_struct_ptr != NULL) { 373 *res = *time_struct_ptr; 374 return res; 375 } 376 return NULL; 377 } 378 379 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 380 381 // Thread start routine for all newly created threads 382 static unsigned __stdcall thread_native_entry(Thread* thread) { 383 // Try to randomize the cache line index of hot stack frames. 384 // This helps when threads of the same stack traces evict each other's 385 // cache lines. The threads can be either from the same JVM instance, or 386 // from different JVM instances. The benefit is especially true for 387 // processors with hyperthreading technology. 388 static int counter = 0; 389 int pid = os::current_process_id(); 390 _alloca(((pid ^ counter++) & 7) * 128); 391 392 thread->initialize_thread_current(); 393 394 OSThread* osthr = thread->osthread(); 395 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 396 397 if (UseNUMA) { 398 int lgrp_id = os::numa_get_group_id(); 399 if (lgrp_id != -1) { 400 thread->set_lgrp_id(lgrp_id); 401 } 402 } 403 404 // Diagnostic code to investigate JDK-6573254 405 int res = 30115; // non-java thread 406 if (thread->is_Java_thread()) { 407 res = 20115; // java thread 408 } 409 410 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 411 412 // Install a win32 structured exception handler around every thread created 413 // by VM, so VM can generate error dump when an exception occurred in non- 414 // Java thread (e.g. VM thread). 415 __try { 416 thread->run(); 417 } __except(topLevelExceptionFilter( 418 (_EXCEPTION_POINTERS*)_exception_info())) { 419 // Nothing to do. 420 } 421 422 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 423 424 // One less thread is executing 425 // When the VMThread gets here, the main thread may have already exited 426 // which frees the CodeHeap containing the Atomic::add code 427 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 428 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 429 } 430 431 // If a thread has not deleted itself ("delete this") as part of its 432 // termination sequence, we have to ensure thread-local-storage is 433 // cleared before we actually terminate. No threads should ever be 434 // deleted asynchronously with respect to their termination. 435 if (Thread::current_or_null_safe() != NULL) { 436 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 437 thread->clear_thread_current(); 438 } 439 440 // Thread must not return from exit_process_or_thread(), but if it does, 441 // let it proceed to exit normally 442 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 443 } 444 445 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 446 int thread_id) { 447 // Allocate the OSThread object 448 OSThread* osthread = new OSThread(NULL, NULL); 449 if (osthread == NULL) return NULL; 450 451 // Initialize support for Java interrupts 452 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 453 if (interrupt_event == NULL) { 454 delete osthread; 455 return NULL; 456 } 457 osthread->set_interrupt_event(interrupt_event); 458 459 // Store info on the Win32 thread into the OSThread 460 osthread->set_thread_handle(thread_handle); 461 osthread->set_thread_id(thread_id); 462 463 if (UseNUMA) { 464 int lgrp_id = os::numa_get_group_id(); 465 if (lgrp_id != -1) { 466 thread->set_lgrp_id(lgrp_id); 467 } 468 } 469 470 // Initial thread state is INITIALIZED, not SUSPENDED 471 osthread->set_state(INITIALIZED); 472 473 return osthread; 474 } 475 476 477 bool os::create_attached_thread(JavaThread* thread) { 478 #ifdef ASSERT 479 thread->verify_not_published(); 480 #endif 481 HANDLE thread_h; 482 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 483 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 484 fatal("DuplicateHandle failed\n"); 485 } 486 OSThread* osthread = create_os_thread(thread, thread_h, 487 (int)current_thread_id()); 488 if (osthread == NULL) { 489 return false; 490 } 491 492 // Initial thread state is RUNNABLE 493 osthread->set_state(RUNNABLE); 494 495 thread->set_osthread(osthread); 496 497 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 498 os::current_thread_id()); 499 500 return true; 501 } 502 503 bool os::create_main_thread(JavaThread* thread) { 504 #ifdef ASSERT 505 thread->verify_not_published(); 506 #endif 507 if (_starting_thread == NULL) { 508 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 509 if (_starting_thread == NULL) { 510 return false; 511 } 512 } 513 514 // The primordial thread is runnable from the start) 515 _starting_thread->set_state(RUNNABLE); 516 517 thread->set_osthread(_starting_thread); 518 return true; 519 } 520 521 // Helper function to trace _beginthreadex attributes, 522 // similar to os::Posix::describe_pthread_attr() 523 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 524 size_t stacksize, unsigned initflag) { 525 stringStream ss(buf, buflen); 526 if (stacksize == 0) { 527 ss.print("stacksize: default, "); 528 } else { 529 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 530 } 531 ss.print("flags: "); 532 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 533 #define ALL(X) \ 534 X(CREATE_SUSPENDED) \ 535 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 536 ALL(PRINT_FLAG) 537 #undef ALL 538 #undef PRINT_FLAG 539 return buf; 540 } 541 542 // Allocate and initialize a new OSThread 543 bool os::create_thread(Thread* thread, ThreadType thr_type, 544 size_t stack_size) { 545 unsigned thread_id; 546 547 // Allocate the OSThread object 548 OSThread* osthread = new OSThread(NULL, NULL); 549 if (osthread == NULL) { 550 return false; 551 } 552 553 // Initialize support for Java interrupts 554 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 555 if (interrupt_event == NULL) { 556 delete osthread; 557 return NULL; 558 } 559 osthread->set_interrupt_event(interrupt_event); 560 osthread->set_interrupted(false); 561 562 thread->set_osthread(osthread); 563 564 if (stack_size == 0) { 565 switch (thr_type) { 566 case os::java_thread: 567 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 568 if (JavaThread::stack_size_at_create() > 0) { 569 stack_size = JavaThread::stack_size_at_create(); 570 } 571 break; 572 case os::compiler_thread: 573 if (CompilerThreadStackSize > 0) { 574 stack_size = (size_t)(CompilerThreadStackSize * K); 575 break; 576 } // else fall through: 577 // use VMThreadStackSize if CompilerThreadStackSize is not defined 578 case os::vm_thread: 579 case os::pgc_thread: 580 case os::cgc_thread: 581 case os::watcher_thread: 582 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 583 break; 584 } 585 } 586 587 // Create the Win32 thread 588 // 589 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 590 // does not specify stack size. Instead, it specifies the size of 591 // initially committed space. The stack size is determined by 592 // PE header in the executable. If the committed "stack_size" is larger 593 // than default value in the PE header, the stack is rounded up to the 594 // nearest multiple of 1MB. For example if the launcher has default 595 // stack size of 320k, specifying any size less than 320k does not 596 // affect the actual stack size at all, it only affects the initial 597 // commitment. On the other hand, specifying 'stack_size' larger than 598 // default value may cause significant increase in memory usage, because 599 // not only the stack space will be rounded up to MB, but also the 600 // entire space is committed upfront. 601 // 602 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 603 // for CreateThread() that can treat 'stack_size' as stack size. However we 604 // are not supposed to call CreateThread() directly according to MSDN 605 // document because JVM uses C runtime library. The good news is that the 606 // flag appears to work with _beginthredex() as well. 607 608 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 609 HANDLE thread_handle = 610 (HANDLE)_beginthreadex(NULL, 611 (unsigned)stack_size, 612 (unsigned (__stdcall *)(void*)) thread_native_entry, 613 thread, 614 initflag, 615 &thread_id); 616 617 char buf[64]; 618 if (thread_handle != NULL) { 619 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 620 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 621 } else { 622 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 623 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 624 } 625 626 if (thread_handle == NULL) { 627 // Need to clean up stuff we've allocated so far 628 CloseHandle(osthread->interrupt_event()); 629 thread->set_osthread(NULL); 630 delete osthread; 631 return NULL; 632 } 633 634 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 635 636 // Store info on the Win32 thread into the OSThread 637 osthread->set_thread_handle(thread_handle); 638 osthread->set_thread_id(thread_id); 639 640 // Initial thread state is INITIALIZED, not SUSPENDED 641 osthread->set_state(INITIALIZED); 642 643 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 644 return true; 645 } 646 647 648 // Free Win32 resources related to the OSThread 649 void os::free_thread(OSThread* osthread) { 650 assert(osthread != NULL, "osthread not set"); 651 652 // We are told to free resources of the argument thread, 653 // but we can only really operate on the current thread. 654 assert(Thread::current()->osthread() == osthread, 655 "os::free_thread but not current thread"); 656 657 CloseHandle(osthread->thread_handle()); 658 CloseHandle(osthread->interrupt_event()); 659 delete osthread; 660 } 661 662 static jlong first_filetime; 663 static jlong initial_performance_count; 664 static jlong performance_frequency; 665 666 667 jlong as_long(LARGE_INTEGER x) { 668 jlong result = 0; // initialization to avoid warning 669 set_high(&result, x.HighPart); 670 set_low(&result, x.LowPart); 671 return result; 672 } 673 674 675 jlong os::elapsed_counter() { 676 LARGE_INTEGER count; 677 QueryPerformanceCounter(&count); 678 return as_long(count) - initial_performance_count; 679 } 680 681 682 jlong os::elapsed_frequency() { 683 return performance_frequency; 684 } 685 686 687 julong os::available_memory() { 688 return win32::available_memory(); 689 } 690 691 julong os::win32::available_memory() { 692 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 693 // value if total memory is larger than 4GB 694 MEMORYSTATUSEX ms; 695 ms.dwLength = sizeof(ms); 696 GlobalMemoryStatusEx(&ms); 697 698 return (julong)ms.ullAvailPhys; 699 } 700 701 julong os::physical_memory() { 702 return win32::physical_memory(); 703 } 704 705 bool os::has_allocatable_memory_limit(julong* limit) { 706 MEMORYSTATUSEX ms; 707 ms.dwLength = sizeof(ms); 708 GlobalMemoryStatusEx(&ms); 709 #ifdef _LP64 710 *limit = (julong)ms.ullAvailVirtual; 711 return true; 712 #else 713 // Limit to 1400m because of the 2gb address space wall 714 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 715 return true; 716 #endif 717 } 718 719 int os::active_processor_count() { 720 DWORD_PTR lpProcessAffinityMask = 0; 721 DWORD_PTR lpSystemAffinityMask = 0; 722 int proc_count = processor_count(); 723 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 724 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 725 // Nof active processors is number of bits in process affinity mask 726 int bitcount = 0; 727 while (lpProcessAffinityMask != 0) { 728 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 729 bitcount++; 730 } 731 return bitcount; 732 } else { 733 return proc_count; 734 } 735 } 736 737 void os::set_native_thread_name(const char *name) { 738 739 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 740 // 741 // Note that unfortunately this only works if the process 742 // is already attached to a debugger; debugger must observe 743 // the exception below to show the correct name. 744 745 // If there is no debugger attached skip raising the exception 746 if (!IsDebuggerPresent()) { 747 return; 748 } 749 750 const DWORD MS_VC_EXCEPTION = 0x406D1388; 751 struct { 752 DWORD dwType; // must be 0x1000 753 LPCSTR szName; // pointer to name (in user addr space) 754 DWORD dwThreadID; // thread ID (-1=caller thread) 755 DWORD dwFlags; // reserved for future use, must be zero 756 } info; 757 758 info.dwType = 0x1000; 759 info.szName = name; 760 info.dwThreadID = -1; 761 info.dwFlags = 0; 762 763 __try { 764 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 765 } __except(EXCEPTION_EXECUTE_HANDLER) {} 766 } 767 768 bool os::distribute_processes(uint length, uint* distribution) { 769 // Not yet implemented. 770 return false; 771 } 772 773 bool os::bind_to_processor(uint processor_id) { 774 // Not yet implemented. 775 return false; 776 } 777 778 void os::win32::initialize_performance_counter() { 779 LARGE_INTEGER count; 780 QueryPerformanceFrequency(&count); 781 performance_frequency = as_long(count); 782 QueryPerformanceCounter(&count); 783 initial_performance_count = as_long(count); 784 } 785 786 787 double os::elapsedTime() { 788 return (double) elapsed_counter() / (double) elapsed_frequency(); 789 } 790 791 792 // Windows format: 793 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 794 // Java format: 795 // Java standards require the number of milliseconds since 1/1/1970 796 797 // Constant offset - calculated using offset() 798 static jlong _offset = 116444736000000000; 799 // Fake time counter for reproducible results when debugging 800 static jlong fake_time = 0; 801 802 #ifdef ASSERT 803 // Just to be safe, recalculate the offset in debug mode 804 static jlong _calculated_offset = 0; 805 static int _has_calculated_offset = 0; 806 807 jlong offset() { 808 if (_has_calculated_offset) return _calculated_offset; 809 SYSTEMTIME java_origin; 810 java_origin.wYear = 1970; 811 java_origin.wMonth = 1; 812 java_origin.wDayOfWeek = 0; // ignored 813 java_origin.wDay = 1; 814 java_origin.wHour = 0; 815 java_origin.wMinute = 0; 816 java_origin.wSecond = 0; 817 java_origin.wMilliseconds = 0; 818 FILETIME jot; 819 if (!SystemTimeToFileTime(&java_origin, &jot)) { 820 fatal("Error = %d\nWindows error", GetLastError()); 821 } 822 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 823 _has_calculated_offset = 1; 824 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 825 return _calculated_offset; 826 } 827 #else 828 jlong offset() { 829 return _offset; 830 } 831 #endif 832 833 jlong windows_to_java_time(FILETIME wt) { 834 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 835 return (a - offset()) / 10000; 836 } 837 838 // Returns time ticks in (10th of micro seconds) 839 jlong windows_to_time_ticks(FILETIME wt) { 840 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 841 return (a - offset()); 842 } 843 844 FILETIME java_to_windows_time(jlong l) { 845 jlong a = (l * 10000) + offset(); 846 FILETIME result; 847 result.dwHighDateTime = high(a); 848 result.dwLowDateTime = low(a); 849 return result; 850 } 851 852 bool os::supports_vtime() { return true; } 853 bool os::enable_vtime() { return false; } 854 bool os::vtime_enabled() { return false; } 855 856 double os::elapsedVTime() { 857 FILETIME created; 858 FILETIME exited; 859 FILETIME kernel; 860 FILETIME user; 861 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 862 // the resolution of windows_to_java_time() should be sufficient (ms) 863 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 864 } else { 865 return elapsedTime(); 866 } 867 } 868 869 jlong os::javaTimeMillis() { 870 if (UseFakeTimers) { 871 return fake_time++; 872 } else { 873 FILETIME wt; 874 GetSystemTimeAsFileTime(&wt); 875 return windows_to_java_time(wt); 876 } 877 } 878 879 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 880 FILETIME wt; 881 GetSystemTimeAsFileTime(&wt); 882 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 883 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 884 seconds = secs; 885 nanos = jlong(ticks - (secs*10000000)) * 100; 886 } 887 888 jlong os::javaTimeNanos() { 889 LARGE_INTEGER current_count; 890 QueryPerformanceCounter(¤t_count); 891 double current = as_long(current_count); 892 double freq = performance_frequency; 893 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 894 return time; 895 } 896 897 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 898 jlong freq = performance_frequency; 899 if (freq < NANOSECS_PER_SEC) { 900 // the performance counter is 64 bits and we will 901 // be multiplying it -- so no wrap in 64 bits 902 info_ptr->max_value = ALL_64_BITS; 903 } else if (freq > NANOSECS_PER_SEC) { 904 // use the max value the counter can reach to 905 // determine the max value which could be returned 906 julong max_counter = (julong)ALL_64_BITS; 907 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 908 } else { 909 // the performance counter is 64 bits and we will 910 // be using it directly -- so no wrap in 64 bits 911 info_ptr->max_value = ALL_64_BITS; 912 } 913 914 // using a counter, so no skipping 915 info_ptr->may_skip_backward = false; 916 info_ptr->may_skip_forward = false; 917 918 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 919 } 920 921 char* os::local_time_string(char *buf, size_t buflen) { 922 SYSTEMTIME st; 923 GetLocalTime(&st); 924 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 925 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 926 return buf; 927 } 928 929 bool os::getTimesSecs(double* process_real_time, 930 double* process_user_time, 931 double* process_system_time) { 932 HANDLE h_process = GetCurrentProcess(); 933 FILETIME create_time, exit_time, kernel_time, user_time; 934 BOOL result = GetProcessTimes(h_process, 935 &create_time, 936 &exit_time, 937 &kernel_time, 938 &user_time); 939 if (result != 0) { 940 FILETIME wt; 941 GetSystemTimeAsFileTime(&wt); 942 jlong rtc_millis = windows_to_java_time(wt); 943 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 944 *process_user_time = 945 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 946 *process_system_time = 947 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 948 return true; 949 } else { 950 return false; 951 } 952 } 953 954 void os::shutdown() { 955 // allow PerfMemory to attempt cleanup of any persistent resources 956 perfMemory_exit(); 957 958 // flush buffered output, finish log files 959 ostream_abort(); 960 961 // Check for abort hook 962 abort_hook_t abort_hook = Arguments::abort_hook(); 963 if (abort_hook != NULL) { 964 abort_hook(); 965 } 966 } 967 968 969 static BOOL (WINAPI *_MiniDumpWriteDump)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 970 PMINIDUMP_EXCEPTION_INFORMATION, 971 PMINIDUMP_USER_STREAM_INFORMATION, 972 PMINIDUMP_CALLBACK_INFORMATION); 973 974 static HANDLE dumpFile = NULL; 975 976 // Check if dump file can be created. 977 void os::check_dump_limit(char* buffer, size_t buffsz) { 978 bool status = true; 979 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 980 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 981 status = false; 982 } 983 984 #ifndef ASSERT 985 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 986 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 987 status = false; 988 } 989 #endif 990 991 if (status) { 992 const char* cwd = get_current_directory(NULL, 0); 993 int pid = current_process_id(); 994 if (cwd != NULL) { 995 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 996 } else { 997 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 998 } 999 1000 if (dumpFile == NULL && 1001 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1002 == INVALID_HANDLE_VALUE) { 1003 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1004 status = false; 1005 } 1006 } 1007 VMError::record_coredump_status(buffer, status); 1008 } 1009 1010 void os::abort(bool dump_core, void* siginfo, const void* context) { 1011 HINSTANCE dbghelp; 1012 EXCEPTION_POINTERS ep; 1013 MINIDUMP_EXCEPTION_INFORMATION mei; 1014 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1015 1016 HANDLE hProcess = GetCurrentProcess(); 1017 DWORD processId = GetCurrentProcessId(); 1018 MINIDUMP_TYPE dumpType; 1019 1020 shutdown(); 1021 if (!dump_core || dumpFile == NULL) { 1022 if (dumpFile != NULL) { 1023 CloseHandle(dumpFile); 1024 } 1025 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1026 } 1027 1028 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 1029 1030 if (dbghelp == NULL) { 1031 jio_fprintf(stderr, "Failed to load dbghelp.dll\n"); 1032 CloseHandle(dumpFile); 1033 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1034 } 1035 1036 _MiniDumpWriteDump = 1037 CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, 1038 PMINIDUMP_EXCEPTION_INFORMATION, 1039 PMINIDUMP_USER_STREAM_INFORMATION, 1040 PMINIDUMP_CALLBACK_INFORMATION), 1041 GetProcAddress(dbghelp, 1042 "MiniDumpWriteDump")); 1043 1044 if (_MiniDumpWriteDump == NULL) { 1045 jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n"); 1046 CloseHandle(dumpFile); 1047 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1048 } 1049 1050 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1051 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1052 1053 if (siginfo != NULL && context != NULL) { 1054 ep.ContextRecord = (PCONTEXT) context; 1055 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1056 1057 mei.ThreadId = GetCurrentThreadId(); 1058 mei.ExceptionPointers = &ep; 1059 pmei = &mei; 1060 } else { 1061 pmei = NULL; 1062 } 1063 1064 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1065 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1066 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1067 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1068 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1069 } 1070 CloseHandle(dumpFile); 1071 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1072 } 1073 1074 // Die immediately, no exit hook, no abort hook, no cleanup. 1075 void os::die() { 1076 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1077 } 1078 1079 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1080 // * dirent_md.c 1.15 00/02/02 1081 // 1082 // The declarations for DIR and struct dirent are in jvm_win32.h. 1083 1084 // Caller must have already run dirname through JVM_NativePath, which removes 1085 // duplicate slashes and converts all instances of '/' into '\\'. 1086 1087 DIR * os::opendir(const char *dirname) { 1088 assert(dirname != NULL, "just checking"); // hotspot change 1089 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1090 DWORD fattr; // hotspot change 1091 char alt_dirname[4] = { 0, 0, 0, 0 }; 1092 1093 if (dirp == 0) { 1094 errno = ENOMEM; 1095 return 0; 1096 } 1097 1098 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1099 // as a directory in FindFirstFile(). We detect this case here and 1100 // prepend the current drive name. 1101 // 1102 if (dirname[1] == '\0' && dirname[0] == '\\') { 1103 alt_dirname[0] = _getdrive() + 'A' - 1; 1104 alt_dirname[1] = ':'; 1105 alt_dirname[2] = '\\'; 1106 alt_dirname[3] = '\0'; 1107 dirname = alt_dirname; 1108 } 1109 1110 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1111 if (dirp->path == 0) { 1112 free(dirp); 1113 errno = ENOMEM; 1114 return 0; 1115 } 1116 strcpy(dirp->path, dirname); 1117 1118 fattr = GetFileAttributes(dirp->path); 1119 if (fattr == 0xffffffff) { 1120 free(dirp->path); 1121 free(dirp); 1122 errno = ENOENT; 1123 return 0; 1124 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1125 free(dirp->path); 1126 free(dirp); 1127 errno = ENOTDIR; 1128 return 0; 1129 } 1130 1131 // Append "*.*", or possibly "\\*.*", to path 1132 if (dirp->path[1] == ':' && 1133 (dirp->path[2] == '\0' || 1134 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1135 // No '\\' needed for cases like "Z:" or "Z:\" 1136 strcat(dirp->path, "*.*"); 1137 } else { 1138 strcat(dirp->path, "\\*.*"); 1139 } 1140 1141 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1142 if (dirp->handle == INVALID_HANDLE_VALUE) { 1143 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1144 free(dirp->path); 1145 free(dirp); 1146 errno = EACCES; 1147 return 0; 1148 } 1149 } 1150 return dirp; 1151 } 1152 1153 // parameter dbuf unused on Windows 1154 struct dirent * os::readdir(DIR *dirp, dirent *dbuf) { 1155 assert(dirp != NULL, "just checking"); // hotspot change 1156 if (dirp->handle == INVALID_HANDLE_VALUE) { 1157 return 0; 1158 } 1159 1160 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1161 1162 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1163 if (GetLastError() == ERROR_INVALID_HANDLE) { 1164 errno = EBADF; 1165 return 0; 1166 } 1167 FindClose(dirp->handle); 1168 dirp->handle = INVALID_HANDLE_VALUE; 1169 } 1170 1171 return &dirp->dirent; 1172 } 1173 1174 int os::closedir(DIR *dirp) { 1175 assert(dirp != NULL, "just checking"); // hotspot change 1176 if (dirp->handle != INVALID_HANDLE_VALUE) { 1177 if (!FindClose(dirp->handle)) { 1178 errno = EBADF; 1179 return -1; 1180 } 1181 dirp->handle = INVALID_HANDLE_VALUE; 1182 } 1183 free(dirp->path); 1184 free(dirp); 1185 return 0; 1186 } 1187 1188 // This must be hard coded because it's the system's temporary 1189 // directory not the java application's temp directory, ala java.io.tmpdir. 1190 const char* os::get_temp_directory() { 1191 static char path_buf[MAX_PATH]; 1192 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1193 return path_buf; 1194 } else { 1195 path_buf[0] = '\0'; 1196 return path_buf; 1197 } 1198 } 1199 1200 static bool file_exists(const char* filename) { 1201 if (filename == NULL || strlen(filename) == 0) { 1202 return false; 1203 } 1204 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1205 } 1206 1207 bool os::dll_build_name(char *buffer, size_t buflen, 1208 const char* pname, const char* fname) { 1209 bool retval = false; 1210 const size_t pnamelen = pname ? strlen(pname) : 0; 1211 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1212 1213 // Return error on buffer overflow. 1214 if (pnamelen + strlen(fname) + 10 > buflen) { 1215 return retval; 1216 } 1217 1218 if (pnamelen == 0) { 1219 jio_snprintf(buffer, buflen, "%s.dll", fname); 1220 retval = true; 1221 } else if (c == ':' || c == '\\') { 1222 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1223 retval = true; 1224 } else if (strchr(pname, *os::path_separator()) != NULL) { 1225 int n; 1226 char** pelements = split_path(pname, &n); 1227 if (pelements == NULL) { 1228 return false; 1229 } 1230 for (int i = 0; i < n; i++) { 1231 char* path = pelements[i]; 1232 // Really shouldn't be NULL, but check can't hurt 1233 size_t plen = (path == NULL) ? 0 : strlen(path); 1234 if (plen == 0) { 1235 continue; // skip the empty path values 1236 } 1237 const char lastchar = path[plen - 1]; 1238 if (lastchar == ':' || lastchar == '\\') { 1239 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1240 } else { 1241 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1242 } 1243 if (file_exists(buffer)) { 1244 retval = true; 1245 break; 1246 } 1247 } 1248 // release the storage 1249 for (int i = 0; i < n; i++) { 1250 if (pelements[i] != NULL) { 1251 FREE_C_HEAP_ARRAY(char, pelements[i]); 1252 } 1253 } 1254 if (pelements != NULL) { 1255 FREE_C_HEAP_ARRAY(char*, pelements); 1256 } 1257 } else { 1258 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1259 retval = true; 1260 } 1261 return retval; 1262 } 1263 1264 // Needs to be in os specific directory because windows requires another 1265 // header file <direct.h> 1266 const char* os::get_current_directory(char *buf, size_t buflen) { 1267 int n = static_cast<int>(buflen); 1268 if (buflen > INT_MAX) n = INT_MAX; 1269 return _getcwd(buf, n); 1270 } 1271 1272 //----------------------------------------------------------- 1273 // Helper functions for fatal error handler 1274 #ifdef _WIN64 1275 // Helper routine which returns true if address in 1276 // within the NTDLL address space. 1277 // 1278 static bool _addr_in_ntdll(address addr) { 1279 HMODULE hmod; 1280 MODULEINFO minfo; 1281 1282 hmod = GetModuleHandle("NTDLL.DLL"); 1283 if (hmod == NULL) return false; 1284 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1285 &minfo, sizeof(MODULEINFO))) { 1286 return false; 1287 } 1288 1289 if ((addr >= minfo.lpBaseOfDll) && 1290 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1291 return true; 1292 } else { 1293 return false; 1294 } 1295 } 1296 #endif 1297 1298 struct _modinfo { 1299 address addr; 1300 char* full_path; // point to a char buffer 1301 int buflen; // size of the buffer 1302 address base_addr; 1303 }; 1304 1305 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1306 address top_address, void * param) { 1307 struct _modinfo *pmod = (struct _modinfo *)param; 1308 if (!pmod) return -1; 1309 1310 if (base_addr <= pmod->addr && 1311 top_address > pmod->addr) { 1312 // if a buffer is provided, copy path name to the buffer 1313 if (pmod->full_path) { 1314 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1315 } 1316 pmod->base_addr = base_addr; 1317 return 1; 1318 } 1319 return 0; 1320 } 1321 1322 bool os::dll_address_to_library_name(address addr, char* buf, 1323 int buflen, int* offset) { 1324 // buf is not optional, but offset is optional 1325 assert(buf != NULL, "sanity check"); 1326 1327 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1328 // return the full path to the DLL file, sometimes it returns path 1329 // to the corresponding PDB file (debug info); sometimes it only 1330 // returns partial path, which makes life painful. 1331 1332 struct _modinfo mi; 1333 mi.addr = addr; 1334 mi.full_path = buf; 1335 mi.buflen = buflen; 1336 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1337 // buf already contains path name 1338 if (offset) *offset = addr - mi.base_addr; 1339 return true; 1340 } 1341 1342 buf[0] = '\0'; 1343 if (offset) *offset = -1; 1344 return false; 1345 } 1346 1347 bool os::dll_address_to_function_name(address addr, char *buf, 1348 int buflen, int *offset, 1349 bool demangle) { 1350 // buf is not optional, but offset is optional 1351 assert(buf != NULL, "sanity check"); 1352 1353 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1354 return true; 1355 } 1356 if (offset != NULL) *offset = -1; 1357 buf[0] = '\0'; 1358 return false; 1359 } 1360 1361 // save the start and end address of jvm.dll into param[0] and param[1] 1362 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1363 address top_address, void * param) { 1364 if (!param) return -1; 1365 1366 if (base_addr <= (address)_locate_jvm_dll && 1367 top_address > (address)_locate_jvm_dll) { 1368 ((address*)param)[0] = base_addr; 1369 ((address*)param)[1] = top_address; 1370 return 1; 1371 } 1372 return 0; 1373 } 1374 1375 address vm_lib_location[2]; // start and end address of jvm.dll 1376 1377 // check if addr is inside jvm.dll 1378 bool os::address_is_in_vm(address addr) { 1379 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1380 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1381 assert(false, "Can't find jvm module."); 1382 return false; 1383 } 1384 } 1385 1386 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1387 } 1388 1389 // print module info; param is outputStream* 1390 static int _print_module(const char* fname, address base_address, 1391 address top_address, void* param) { 1392 if (!param) return -1; 1393 1394 outputStream* st = (outputStream*)param; 1395 1396 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1397 return 0; 1398 } 1399 1400 // Loads .dll/.so and 1401 // in case of error it checks if .dll/.so was built for the 1402 // same architecture as Hotspot is running on 1403 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1404 void * result = LoadLibrary(name); 1405 if (result != NULL) { 1406 return result; 1407 } 1408 1409 DWORD errcode = GetLastError(); 1410 if (errcode == ERROR_MOD_NOT_FOUND) { 1411 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1412 ebuf[ebuflen - 1] = '\0'; 1413 return NULL; 1414 } 1415 1416 // Parsing dll below 1417 // If we can read dll-info and find that dll was built 1418 // for an architecture other than Hotspot is running in 1419 // - then print to buffer "DLL was built for a different architecture" 1420 // else call os::lasterror to obtain system error message 1421 1422 // Read system error message into ebuf 1423 // It may or may not be overwritten below (in the for loop and just above) 1424 lasterror(ebuf, (size_t) ebuflen); 1425 ebuf[ebuflen - 1] = '\0'; 1426 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1427 if (fd < 0) { 1428 return NULL; 1429 } 1430 1431 uint32_t signature_offset; 1432 uint16_t lib_arch = 0; 1433 bool failed_to_get_lib_arch = 1434 ( // Go to position 3c in the dll 1435 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1436 || 1437 // Read location of signature 1438 (sizeof(signature_offset) != 1439 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1440 || 1441 // Go to COFF File Header in dll 1442 // that is located after "signature" (4 bytes long) 1443 (os::seek_to_file_offset(fd, 1444 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1445 || 1446 // Read field that contains code of architecture 1447 // that dll was built for 1448 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1449 ); 1450 1451 ::close(fd); 1452 if (failed_to_get_lib_arch) { 1453 // file i/o error - report os::lasterror(...) msg 1454 return NULL; 1455 } 1456 1457 typedef struct { 1458 uint16_t arch_code; 1459 char* arch_name; 1460 } arch_t; 1461 1462 static const arch_t arch_array[] = { 1463 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1464 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1465 }; 1466 #if (defined _M_AMD64) 1467 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1468 #elif (defined _M_IX86) 1469 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1470 #else 1471 #error Method os::dll_load requires that one of following \ 1472 is defined :_M_AMD64 or _M_IX86 1473 #endif 1474 1475 1476 // Obtain a string for printf operation 1477 // lib_arch_str shall contain string what platform this .dll was built for 1478 // running_arch_str shall string contain what platform Hotspot was built for 1479 char *running_arch_str = NULL, *lib_arch_str = NULL; 1480 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1481 if (lib_arch == arch_array[i].arch_code) { 1482 lib_arch_str = arch_array[i].arch_name; 1483 } 1484 if (running_arch == arch_array[i].arch_code) { 1485 running_arch_str = arch_array[i].arch_name; 1486 } 1487 } 1488 1489 assert(running_arch_str, 1490 "Didn't find running architecture code in arch_array"); 1491 1492 // If the architecture is right 1493 // but some other error took place - report os::lasterror(...) msg 1494 if (lib_arch == running_arch) { 1495 return NULL; 1496 } 1497 1498 if (lib_arch_str != NULL) { 1499 ::_snprintf(ebuf, ebuflen - 1, 1500 "Can't load %s-bit .dll on a %s-bit platform", 1501 lib_arch_str, running_arch_str); 1502 } else { 1503 // don't know what architecture this dll was build for 1504 ::_snprintf(ebuf, ebuflen - 1, 1505 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1506 lib_arch, running_arch_str); 1507 } 1508 1509 return NULL; 1510 } 1511 1512 void os::print_dll_info(outputStream *st) { 1513 st->print_cr("Dynamic libraries:"); 1514 get_loaded_modules_info(_print_module, (void *)st); 1515 } 1516 1517 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1518 HANDLE hProcess; 1519 1520 # define MAX_NUM_MODULES 128 1521 HMODULE modules[MAX_NUM_MODULES]; 1522 static char filename[MAX_PATH]; 1523 int result = 0; 1524 1525 int pid = os::current_process_id(); 1526 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1527 FALSE, pid); 1528 if (hProcess == NULL) return 0; 1529 1530 DWORD size_needed; 1531 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1532 CloseHandle(hProcess); 1533 return 0; 1534 } 1535 1536 // number of modules that are currently loaded 1537 int num_modules = size_needed / sizeof(HMODULE); 1538 1539 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1540 // Get Full pathname: 1541 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1542 filename[0] = '\0'; 1543 } 1544 1545 MODULEINFO modinfo; 1546 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1547 modinfo.lpBaseOfDll = NULL; 1548 modinfo.SizeOfImage = 0; 1549 } 1550 1551 // Invoke callback function 1552 result = callback(filename, (address)modinfo.lpBaseOfDll, 1553 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1554 if (result) break; 1555 } 1556 1557 CloseHandle(hProcess); 1558 return result; 1559 } 1560 1561 bool os::get_host_name(char* buf, size_t buflen) { 1562 DWORD size = (DWORD)buflen; 1563 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1564 } 1565 1566 void os::get_summary_os_info(char* buf, size_t buflen) { 1567 stringStream sst(buf, buflen); 1568 os::win32::print_windows_version(&sst); 1569 // chop off newline character 1570 char* nl = strchr(buf, '\n'); 1571 if (nl != NULL) *nl = '\0'; 1572 } 1573 1574 int os::log_vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1575 int ret = vsnprintf(buf, len, fmt, args); 1576 // Get the correct buffer size if buf is too small 1577 if (ret < 0) { 1578 return _vscprintf(fmt, args); 1579 } 1580 return ret; 1581 } 1582 1583 static inline time_t get_mtime(const char* filename) { 1584 struct stat st; 1585 int ret = os::stat(filename, &st); 1586 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1587 return st.st_mtime; 1588 } 1589 1590 int os::compare_file_modified_times(const char* file1, const char* file2) { 1591 time_t t1 = get_mtime(file1); 1592 time_t t2 = get_mtime(file2); 1593 return t1 - t2; 1594 } 1595 1596 void os::print_os_info_brief(outputStream* st) { 1597 os::print_os_info(st); 1598 } 1599 1600 void os::print_os_info(outputStream* st) { 1601 #ifdef ASSERT 1602 char buffer[1024]; 1603 st->print("HostName: "); 1604 if (get_host_name(buffer, sizeof(buffer))) { 1605 st->print("%s ", buffer); 1606 } else { 1607 st->print("N/A "); 1608 } 1609 #endif 1610 st->print("OS:"); 1611 os::win32::print_windows_version(st); 1612 } 1613 1614 void os::win32::print_windows_version(outputStream* st) { 1615 OSVERSIONINFOEX osvi; 1616 VS_FIXEDFILEINFO *file_info; 1617 TCHAR kernel32_path[MAX_PATH]; 1618 UINT len, ret; 1619 1620 // Use the GetVersionEx information to see if we're on a server or 1621 // workstation edition of Windows. Starting with Windows 8.1 we can't 1622 // trust the OS version information returned by this API. 1623 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1624 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1625 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1626 st->print_cr("Call to GetVersionEx failed"); 1627 return; 1628 } 1629 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1630 1631 // Get the full path to \Windows\System32\kernel32.dll and use that for 1632 // determining what version of Windows we're running on. 1633 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1634 ret = GetSystemDirectory(kernel32_path, len); 1635 if (ret == 0 || ret > len) { 1636 st->print_cr("Call to GetSystemDirectory failed"); 1637 return; 1638 } 1639 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1640 1641 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1642 if (version_size == 0) { 1643 st->print_cr("Call to GetFileVersionInfoSize failed"); 1644 return; 1645 } 1646 1647 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1648 if (version_info == NULL) { 1649 st->print_cr("Failed to allocate version_info"); 1650 return; 1651 } 1652 1653 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1654 os::free(version_info); 1655 st->print_cr("Call to GetFileVersionInfo failed"); 1656 return; 1657 } 1658 1659 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1660 os::free(version_info); 1661 st->print_cr("Call to VerQueryValue failed"); 1662 return; 1663 } 1664 1665 int major_version = HIWORD(file_info->dwProductVersionMS); 1666 int minor_version = LOWORD(file_info->dwProductVersionMS); 1667 int build_number = HIWORD(file_info->dwProductVersionLS); 1668 int build_minor = LOWORD(file_info->dwProductVersionLS); 1669 int os_vers = major_version * 1000 + minor_version; 1670 os::free(version_info); 1671 1672 st->print(" Windows "); 1673 switch (os_vers) { 1674 1675 case 6000: 1676 if (is_workstation) { 1677 st->print("Vista"); 1678 } else { 1679 st->print("Server 2008"); 1680 } 1681 break; 1682 1683 case 6001: 1684 if (is_workstation) { 1685 st->print("7"); 1686 } else { 1687 st->print("Server 2008 R2"); 1688 } 1689 break; 1690 1691 case 6002: 1692 if (is_workstation) { 1693 st->print("8"); 1694 } else { 1695 st->print("Server 2012"); 1696 } 1697 break; 1698 1699 case 6003: 1700 if (is_workstation) { 1701 st->print("8.1"); 1702 } else { 1703 st->print("Server 2012 R2"); 1704 } 1705 break; 1706 1707 case 10000: 1708 if (is_workstation) { 1709 st->print("10"); 1710 } else { 1711 st->print("Server 2016"); 1712 } 1713 break; 1714 1715 default: 1716 // Unrecognized windows, print out its major and minor versions 1717 st->print("%d.%d", major_version, minor_version); 1718 break; 1719 } 1720 1721 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1722 // find out whether we are running on 64 bit processor or not 1723 SYSTEM_INFO si; 1724 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1725 GetNativeSystemInfo(&si); 1726 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1727 st->print(" , 64 bit"); 1728 } 1729 1730 st->print(" Build %d", build_number); 1731 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1732 st->cr(); 1733 } 1734 1735 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1736 // Nothing to do for now. 1737 } 1738 1739 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1740 HKEY key; 1741 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1742 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1743 if (status == ERROR_SUCCESS) { 1744 DWORD size = (DWORD)buflen; 1745 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1746 if (status != ERROR_SUCCESS) { 1747 strncpy(buf, "## __CPU__", buflen); 1748 } 1749 RegCloseKey(key); 1750 } else { 1751 // Put generic cpu info to return 1752 strncpy(buf, "## __CPU__", buflen); 1753 } 1754 } 1755 1756 void os::print_memory_info(outputStream* st) { 1757 st->print("Memory:"); 1758 st->print(" %dk page", os::vm_page_size()>>10); 1759 1760 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1761 // value if total memory is larger than 4GB 1762 MEMORYSTATUSEX ms; 1763 ms.dwLength = sizeof(ms); 1764 GlobalMemoryStatusEx(&ms); 1765 1766 st->print(", physical %uk", os::physical_memory() >> 10); 1767 st->print("(%uk free)", os::available_memory() >> 10); 1768 1769 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1770 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1771 st->cr(); 1772 } 1773 1774 void os::print_siginfo(outputStream *st, const void* siginfo) { 1775 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1776 st->print("siginfo:"); 1777 1778 char tmp[64]; 1779 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1780 strcpy(tmp, "EXCEPTION_??"); 1781 } 1782 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1783 1784 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1785 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1786 er->NumberParameters >= 2) { 1787 switch (er->ExceptionInformation[0]) { 1788 case 0: st->print(", reading address"); break; 1789 case 1: st->print(", writing address"); break; 1790 case 8: st->print(", data execution prevention violation at address"); break; 1791 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1792 er->ExceptionInformation[0]); 1793 } 1794 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1795 } else { 1796 int num = er->NumberParameters; 1797 if (num > 0) { 1798 st->print(", ExceptionInformation="); 1799 for (int i = 0; i < num; i++) { 1800 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1801 } 1802 } 1803 } 1804 st->cr(); 1805 } 1806 1807 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1808 // do nothing 1809 } 1810 1811 static char saved_jvm_path[MAX_PATH] = {0}; 1812 1813 // Find the full path to the current module, jvm.dll 1814 void os::jvm_path(char *buf, jint buflen) { 1815 // Error checking. 1816 if (buflen < MAX_PATH) { 1817 assert(false, "must use a large-enough buffer"); 1818 buf[0] = '\0'; 1819 return; 1820 } 1821 // Lazy resolve the path to current module. 1822 if (saved_jvm_path[0] != 0) { 1823 strcpy(buf, saved_jvm_path); 1824 return; 1825 } 1826 1827 buf[0] = '\0'; 1828 if (Arguments::sun_java_launcher_is_altjvm()) { 1829 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1830 // for a JAVA_HOME environment variable and fix up the path so it 1831 // looks like jvm.dll is installed there (append a fake suffix 1832 // hotspot/jvm.dll). 1833 char* java_home_var = ::getenv("JAVA_HOME"); 1834 if (java_home_var != NULL && java_home_var[0] != 0 && 1835 strlen(java_home_var) < (size_t)buflen) { 1836 strncpy(buf, java_home_var, buflen); 1837 1838 // determine if this is a legacy image or modules image 1839 // modules image doesn't have "jre" subdirectory 1840 size_t len = strlen(buf); 1841 char* jrebin_p = buf + len; 1842 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1843 if (0 != _access(buf, 0)) { 1844 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1845 } 1846 len = strlen(buf); 1847 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1848 } 1849 } 1850 1851 if (buf[0] == '\0') { 1852 GetModuleFileName(vm_lib_handle, buf, buflen); 1853 } 1854 strncpy(saved_jvm_path, buf, MAX_PATH); 1855 saved_jvm_path[MAX_PATH - 1] = '\0'; 1856 } 1857 1858 1859 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1860 #ifndef _WIN64 1861 st->print("_"); 1862 #endif 1863 } 1864 1865 1866 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1867 #ifndef _WIN64 1868 st->print("@%d", args_size * sizeof(int)); 1869 #endif 1870 } 1871 1872 // This method is a copy of JDK's sysGetLastErrorString 1873 // from src/windows/hpi/src/system_md.c 1874 1875 size_t os::lasterror(char* buf, size_t len) { 1876 DWORD errval; 1877 1878 if ((errval = GetLastError()) != 0) { 1879 // DOS error 1880 size_t n = (size_t)FormatMessage( 1881 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1882 NULL, 1883 errval, 1884 0, 1885 buf, 1886 (DWORD)len, 1887 NULL); 1888 if (n > 3) { 1889 // Drop final '.', CR, LF 1890 if (buf[n - 1] == '\n') n--; 1891 if (buf[n - 1] == '\r') n--; 1892 if (buf[n - 1] == '.') n--; 1893 buf[n] = '\0'; 1894 } 1895 return n; 1896 } 1897 1898 if (errno != 0) { 1899 // C runtime error that has no corresponding DOS error code 1900 const char* s = os::strerror(errno); 1901 size_t n = strlen(s); 1902 if (n >= len) n = len - 1; 1903 strncpy(buf, s, n); 1904 buf[n] = '\0'; 1905 return n; 1906 } 1907 1908 return 0; 1909 } 1910 1911 int os::get_last_error() { 1912 DWORD error = GetLastError(); 1913 if (error == 0) { 1914 error = errno; 1915 } 1916 return (int)error; 1917 } 1918 1919 WindowsSemaphore::WindowsSemaphore(uint value) { 1920 _semaphore = ::CreateSemaphore(NULL, value, LONG_MAX, NULL); 1921 1922 guarantee(_semaphore != NULL, "CreateSemaphore failed with error code: %lu", GetLastError()); 1923 } 1924 1925 WindowsSemaphore::~WindowsSemaphore() { 1926 ::CloseHandle(_semaphore); 1927 } 1928 1929 void WindowsSemaphore::signal(uint count) { 1930 if (count > 0) { 1931 BOOL ret = ::ReleaseSemaphore(_semaphore, count, NULL); 1932 1933 assert(ret != 0, "ReleaseSemaphore failed with error code: %lu", GetLastError()); 1934 } 1935 } 1936 1937 void WindowsSemaphore::wait() { 1938 DWORD ret = ::WaitForSingleObject(_semaphore, INFINITE); 1939 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1940 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject failed with return value: %lu", ret); 1941 } 1942 1943 bool WindowsSemaphore::trywait() { 1944 DWORD ret = ::WaitForSingleObject(_semaphore, 0); 1945 assert(ret != WAIT_FAILED, "WaitForSingleObject failed with error code: %lu", GetLastError()); 1946 return ret == WAIT_OBJECT_0; 1947 } 1948 1949 // sun.misc.Signal 1950 // NOTE that this is a workaround for an apparent kernel bug where if 1951 // a signal handler for SIGBREAK is installed then that signal handler 1952 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1953 // See bug 4416763. 1954 static void (*sigbreakHandler)(int) = NULL; 1955 1956 static void UserHandler(int sig, void *siginfo, void *context) { 1957 os::signal_notify(sig); 1958 // We need to reinstate the signal handler each time... 1959 os::signal(sig, (void*)UserHandler); 1960 } 1961 1962 void* os::user_handler() { 1963 return (void*) UserHandler; 1964 } 1965 1966 void* os::signal(int signal_number, void* handler) { 1967 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1968 void (*oldHandler)(int) = sigbreakHandler; 1969 sigbreakHandler = (void (*)(int)) handler; 1970 return (void*) oldHandler; 1971 } else { 1972 return (void*)::signal(signal_number, (void (*)(int))handler); 1973 } 1974 } 1975 1976 void os::signal_raise(int signal_number) { 1977 raise(signal_number); 1978 } 1979 1980 // The Win32 C runtime library maps all console control events other than ^C 1981 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1982 // logoff, and shutdown events. We therefore install our own console handler 1983 // that raises SIGTERM for the latter cases. 1984 // 1985 static BOOL WINAPI consoleHandler(DWORD event) { 1986 switch (event) { 1987 case CTRL_C_EVENT: 1988 if (VMError::is_error_reported()) { 1989 // Ctrl-C is pressed during error reporting, likely because the error 1990 // handler fails to abort. Let VM die immediately. 1991 os::die(); 1992 } 1993 1994 os::signal_raise(SIGINT); 1995 return TRUE; 1996 break; 1997 case CTRL_BREAK_EVENT: 1998 if (sigbreakHandler != NULL) { 1999 (*sigbreakHandler)(SIGBREAK); 2000 } 2001 return TRUE; 2002 break; 2003 case CTRL_LOGOFF_EVENT: { 2004 // Don't terminate JVM if it is running in a non-interactive session, 2005 // such as a service process. 2006 USEROBJECTFLAGS flags; 2007 HANDLE handle = GetProcessWindowStation(); 2008 if (handle != NULL && 2009 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 2010 sizeof(USEROBJECTFLAGS), NULL)) { 2011 // If it is a non-interactive session, let next handler to deal 2012 // with it. 2013 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 2014 return FALSE; 2015 } 2016 } 2017 } 2018 case CTRL_CLOSE_EVENT: 2019 case CTRL_SHUTDOWN_EVENT: 2020 os::signal_raise(SIGTERM); 2021 return TRUE; 2022 break; 2023 default: 2024 break; 2025 } 2026 return FALSE; 2027 } 2028 2029 // The following code is moved from os.cpp for making this 2030 // code platform specific, which it is by its very nature. 2031 2032 // Return maximum OS signal used + 1 for internal use only 2033 // Used as exit signal for signal_thread 2034 int os::sigexitnum_pd() { 2035 return NSIG; 2036 } 2037 2038 // a counter for each possible signal value, including signal_thread exit signal 2039 static volatile jint pending_signals[NSIG+1] = { 0 }; 2040 static HANDLE sig_sem = NULL; 2041 2042 void os::signal_init_pd() { 2043 // Initialize signal structures 2044 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2045 2046 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 2047 2048 // Programs embedding the VM do not want it to attempt to receive 2049 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2050 // shutdown hooks mechanism introduced in 1.3. For example, when 2051 // the VM is run as part of a Windows NT service (i.e., a servlet 2052 // engine in a web server), the correct behavior is for any console 2053 // control handler to return FALSE, not TRUE, because the OS's 2054 // "final" handler for such events allows the process to continue if 2055 // it is a service (while terminating it if it is not a service). 2056 // To make this behavior uniform and the mechanism simpler, we 2057 // completely disable the VM's usage of these console events if -Xrs 2058 // (=ReduceSignalUsage) is specified. This means, for example, that 2059 // the CTRL-BREAK thread dump mechanism is also disabled in this 2060 // case. See bugs 4323062, 4345157, and related bugs. 2061 2062 if (!ReduceSignalUsage) { 2063 // Add a CTRL-C handler 2064 SetConsoleCtrlHandler(consoleHandler, TRUE); 2065 } 2066 } 2067 2068 void os::signal_notify(int signal_number) { 2069 BOOL ret; 2070 if (sig_sem != NULL) { 2071 Atomic::inc(&pending_signals[signal_number]); 2072 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2073 assert(ret != 0, "ReleaseSemaphore() failed"); 2074 } 2075 } 2076 2077 static int check_pending_signals(bool wait_for_signal) { 2078 DWORD ret; 2079 while (true) { 2080 for (int i = 0; i < NSIG + 1; i++) { 2081 jint n = pending_signals[i]; 2082 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2083 return i; 2084 } 2085 } 2086 if (!wait_for_signal) { 2087 return -1; 2088 } 2089 2090 JavaThread *thread = JavaThread::current(); 2091 2092 ThreadBlockInVM tbivm(thread); 2093 2094 bool threadIsSuspended; 2095 do { 2096 thread->set_suspend_equivalent(); 2097 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2098 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2099 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2100 2101 // were we externally suspended while we were waiting? 2102 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2103 if (threadIsSuspended) { 2104 // The semaphore has been incremented, but while we were waiting 2105 // another thread suspended us. We don't want to continue running 2106 // while suspended because that would surprise the thread that 2107 // suspended us. 2108 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2109 assert(ret != 0, "ReleaseSemaphore() failed"); 2110 2111 thread->java_suspend_self(); 2112 } 2113 } while (threadIsSuspended); 2114 } 2115 } 2116 2117 int os::signal_lookup() { 2118 return check_pending_signals(false); 2119 } 2120 2121 int os::signal_wait() { 2122 return check_pending_signals(true); 2123 } 2124 2125 // Implicit OS exception handling 2126 2127 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2128 address handler) { 2129 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2130 // Save pc in thread 2131 #ifdef _M_AMD64 2132 // Do not blow up if no thread info available. 2133 if (thread) { 2134 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2135 } 2136 // Set pc to handler 2137 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2138 #else 2139 // Do not blow up if no thread info available. 2140 if (thread) { 2141 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2142 } 2143 // Set pc to handler 2144 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2145 #endif 2146 2147 // Continue the execution 2148 return EXCEPTION_CONTINUE_EXECUTION; 2149 } 2150 2151 2152 // Used for PostMortemDump 2153 extern "C" void safepoints(); 2154 extern "C" void find(int x); 2155 extern "C" void events(); 2156 2157 // According to Windows API documentation, an illegal instruction sequence should generate 2158 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2159 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2160 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2161 2162 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2163 2164 // From "Execution Protection in the Windows Operating System" draft 0.35 2165 // Once a system header becomes available, the "real" define should be 2166 // included or copied here. 2167 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2168 2169 // Windows Vista/2008 heap corruption check 2170 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2171 2172 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2173 // C++ compiler contain this error code. Because this is a compiler-generated 2174 // error, the code is not listed in the Win32 API header files. 2175 // The code is actually a cryptic mnemonic device, with the initial "E" 2176 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2177 // ASCII values of "msc". 2178 2179 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2180 2181 #define def_excpt(val) { #val, (val) } 2182 2183 static const struct { char* name; uint number; } exceptlabels[] = { 2184 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2185 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2186 def_excpt(EXCEPTION_BREAKPOINT), 2187 def_excpt(EXCEPTION_SINGLE_STEP), 2188 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2189 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2190 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2191 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2192 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2193 def_excpt(EXCEPTION_FLT_OVERFLOW), 2194 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2195 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2196 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2197 def_excpt(EXCEPTION_INT_OVERFLOW), 2198 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2199 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2200 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2201 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2202 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2203 def_excpt(EXCEPTION_STACK_OVERFLOW), 2204 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2205 def_excpt(EXCEPTION_GUARD_PAGE), 2206 def_excpt(EXCEPTION_INVALID_HANDLE), 2207 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2208 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2209 }; 2210 2211 #undef def_excpt 2212 2213 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2214 uint code = static_cast<uint>(exception_code); 2215 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2216 if (exceptlabels[i].number == code) { 2217 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2218 return buf; 2219 } 2220 } 2221 2222 return NULL; 2223 } 2224 2225 //----------------------------------------------------------------------------- 2226 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2227 // handle exception caused by idiv; should only happen for -MinInt/-1 2228 // (division by zero is handled explicitly) 2229 #ifdef _M_AMD64 2230 PCONTEXT ctx = exceptionInfo->ContextRecord; 2231 address pc = (address)ctx->Rip; 2232 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2233 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2234 if (pc[0] == 0xF7) { 2235 // set correct result values and continue after idiv instruction 2236 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2237 } else { 2238 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2239 } 2240 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2241 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2242 // idiv opcode (0xF7). 2243 ctx->Rdx = (DWORD)0; // remainder 2244 // Continue the execution 2245 #else 2246 PCONTEXT ctx = exceptionInfo->ContextRecord; 2247 address pc = (address)ctx->Eip; 2248 assert(pc[0] == 0xF7, "not an idiv opcode"); 2249 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2250 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2251 // set correct result values and continue after idiv instruction 2252 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2253 ctx->Eax = (DWORD)min_jint; // result 2254 ctx->Edx = (DWORD)0; // remainder 2255 // Continue the execution 2256 #endif 2257 return EXCEPTION_CONTINUE_EXECUTION; 2258 } 2259 2260 //----------------------------------------------------------------------------- 2261 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2262 PCONTEXT ctx = exceptionInfo->ContextRecord; 2263 #ifndef _WIN64 2264 // handle exception caused by native method modifying control word 2265 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2266 2267 switch (exception_code) { 2268 case EXCEPTION_FLT_DENORMAL_OPERAND: 2269 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2270 case EXCEPTION_FLT_INEXACT_RESULT: 2271 case EXCEPTION_FLT_INVALID_OPERATION: 2272 case EXCEPTION_FLT_OVERFLOW: 2273 case EXCEPTION_FLT_STACK_CHECK: 2274 case EXCEPTION_FLT_UNDERFLOW: 2275 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2276 if (fp_control_word != ctx->FloatSave.ControlWord) { 2277 // Restore FPCW and mask out FLT exceptions 2278 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2279 // Mask out pending FLT exceptions 2280 ctx->FloatSave.StatusWord &= 0xffffff00; 2281 return EXCEPTION_CONTINUE_EXECUTION; 2282 } 2283 } 2284 2285 if (prev_uef_handler != NULL) { 2286 // We didn't handle this exception so pass it to the previous 2287 // UnhandledExceptionFilter. 2288 return (prev_uef_handler)(exceptionInfo); 2289 } 2290 #else // !_WIN64 2291 // On Windows, the mxcsr control bits are non-volatile across calls 2292 // See also CR 6192333 2293 // 2294 jint MxCsr = INITIAL_MXCSR; 2295 // we can't use StubRoutines::addr_mxcsr_std() 2296 // because in Win64 mxcsr is not saved there 2297 if (MxCsr != ctx->MxCsr) { 2298 ctx->MxCsr = MxCsr; 2299 return EXCEPTION_CONTINUE_EXECUTION; 2300 } 2301 #endif // !_WIN64 2302 2303 return EXCEPTION_CONTINUE_SEARCH; 2304 } 2305 2306 static inline void report_error(Thread* t, DWORD exception_code, 2307 address addr, void* siginfo, void* context) { 2308 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2309 2310 // If UseOsErrorReporting, this will return here and save the error file 2311 // somewhere where we can find it in the minidump. 2312 } 2313 2314 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2315 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2316 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2317 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2318 if (Interpreter::contains(pc)) { 2319 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2320 if (!fr->is_first_java_frame()) { 2321 // get_frame_at_stack_banging_point() is only called when we 2322 // have well defined stacks so java_sender() calls do not need 2323 // to assert safe_for_sender() first. 2324 *fr = fr->java_sender(); 2325 } 2326 } else { 2327 // more complex code with compiled code 2328 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2329 CodeBlob* cb = CodeCache::find_blob(pc); 2330 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2331 // Not sure where the pc points to, fallback to default 2332 // stack overflow handling 2333 return false; 2334 } else { 2335 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2336 // in compiled code, the stack banging is performed just after the return pc 2337 // has been pushed on the stack 2338 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2339 if (!fr->is_java_frame()) { 2340 // See java_sender() comment above. 2341 *fr = fr->java_sender(); 2342 } 2343 } 2344 } 2345 assert(fr->is_java_frame(), "Safety check"); 2346 return true; 2347 } 2348 2349 //----------------------------------------------------------------------------- 2350 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2351 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2352 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2353 #ifdef _M_AMD64 2354 address pc = (address) exceptionInfo->ContextRecord->Rip; 2355 #else 2356 address pc = (address) exceptionInfo->ContextRecord->Eip; 2357 #endif 2358 Thread* t = Thread::current_or_null_safe(); 2359 2360 // Handle SafeFetch32 and SafeFetchN exceptions. 2361 if (StubRoutines::is_safefetch_fault(pc)) { 2362 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2363 } 2364 2365 #ifndef _WIN64 2366 // Execution protection violation - win32 running on AMD64 only 2367 // Handled first to avoid misdiagnosis as a "normal" access violation; 2368 // This is safe to do because we have a new/unique ExceptionInformation 2369 // code for this condition. 2370 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2371 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2372 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2373 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2374 2375 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2376 int page_size = os::vm_page_size(); 2377 2378 // Make sure the pc and the faulting address are sane. 2379 // 2380 // If an instruction spans a page boundary, and the page containing 2381 // the beginning of the instruction is executable but the following 2382 // page is not, the pc and the faulting address might be slightly 2383 // different - we still want to unguard the 2nd page in this case. 2384 // 2385 // 15 bytes seems to be a (very) safe value for max instruction size. 2386 bool pc_is_near_addr = 2387 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2388 bool instr_spans_page_boundary = 2389 (align_down((intptr_t) pc ^ (intptr_t) addr, 2390 (intptr_t) page_size) > 0); 2391 2392 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2393 static volatile address last_addr = 2394 (address) os::non_memory_address_word(); 2395 2396 // In conservative mode, don't unguard unless the address is in the VM 2397 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2398 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2399 2400 // Set memory to RWX and retry 2401 address page_start = align_down(addr, page_size); 2402 bool res = os::protect_memory((char*) page_start, page_size, 2403 os::MEM_PROT_RWX); 2404 2405 log_debug(os)("Execution protection violation " 2406 "at " INTPTR_FORMAT 2407 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2408 p2i(page_start), (res ? "success" : os::strerror(errno))); 2409 2410 // Set last_addr so if we fault again at the same address, we don't 2411 // end up in an endless loop. 2412 // 2413 // There are two potential complications here. Two threads trapping 2414 // at the same address at the same time could cause one of the 2415 // threads to think it already unguarded, and abort the VM. Likely 2416 // very rare. 2417 // 2418 // The other race involves two threads alternately trapping at 2419 // different addresses and failing to unguard the page, resulting in 2420 // an endless loop. This condition is probably even more unlikely 2421 // than the first. 2422 // 2423 // Although both cases could be avoided by using locks or thread 2424 // local last_addr, these solutions are unnecessary complication: 2425 // this handler is a best-effort safety net, not a complete solution. 2426 // It is disabled by default and should only be used as a workaround 2427 // in case we missed any no-execute-unsafe VM code. 2428 2429 last_addr = addr; 2430 2431 return EXCEPTION_CONTINUE_EXECUTION; 2432 } 2433 } 2434 2435 // Last unguard failed or not unguarding 2436 tty->print_raw_cr("Execution protection violation"); 2437 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2438 exceptionInfo->ContextRecord); 2439 return EXCEPTION_CONTINUE_SEARCH; 2440 } 2441 } 2442 #endif // _WIN64 2443 2444 // Check to see if we caught the safepoint code in the 2445 // process of write protecting the memory serialization page. 2446 // It write enables the page immediately after protecting it 2447 // so just return. 2448 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2449 if (t != NULL && t->is_Java_thread()) { 2450 JavaThread* thread = (JavaThread*) t; 2451 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2452 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2453 if (os::is_memory_serialize_page(thread, addr)) { 2454 // Block current thread until the memory serialize page permission restored. 2455 os::block_on_serialize_page_trap(); 2456 return EXCEPTION_CONTINUE_EXECUTION; 2457 } 2458 } 2459 } 2460 2461 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2462 VM_Version::is_cpuinfo_segv_addr(pc)) { 2463 // Verify that OS save/restore AVX registers. 2464 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2465 } 2466 2467 if (t != NULL && t->is_Java_thread()) { 2468 JavaThread* thread = (JavaThread*) t; 2469 bool in_java = thread->thread_state() == _thread_in_Java; 2470 2471 // Handle potential stack overflows up front. 2472 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2473 if (thread->stack_guards_enabled()) { 2474 if (in_java) { 2475 frame fr; 2476 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2477 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2478 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2479 assert(fr.is_java_frame(), "Must be a Java frame"); 2480 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2481 } 2482 } 2483 // Yellow zone violation. The o/s has unprotected the first yellow 2484 // zone page for us. Note: must call disable_stack_yellow_zone to 2485 // update the enabled status, even if the zone contains only one page. 2486 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2487 thread->disable_stack_yellow_reserved_zone(); 2488 // If not in java code, return and hope for the best. 2489 return in_java 2490 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2491 : EXCEPTION_CONTINUE_EXECUTION; 2492 } else { 2493 // Fatal red zone violation. 2494 thread->disable_stack_red_zone(); 2495 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2496 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2497 exceptionInfo->ContextRecord); 2498 return EXCEPTION_CONTINUE_SEARCH; 2499 } 2500 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2501 // Either stack overflow or null pointer exception. 2502 if (in_java) { 2503 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2504 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2505 address stack_end = thread->stack_end(); 2506 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2507 // Stack overflow. 2508 assert(!os::uses_stack_guard_pages(), 2509 "should be caught by red zone code above."); 2510 return Handle_Exception(exceptionInfo, 2511 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2512 } 2513 // Check for safepoint polling and implicit null 2514 // We only expect null pointers in the stubs (vtable) 2515 // the rest are checked explicitly now. 2516 CodeBlob* cb = CodeCache::find_blob(pc); 2517 if (cb != NULL) { 2518 if (os::is_poll_address(addr)) { 2519 address stub = SharedRuntime::get_poll_stub(pc); 2520 return Handle_Exception(exceptionInfo, stub); 2521 } 2522 } 2523 { 2524 #ifdef _WIN64 2525 // If it's a legal stack address map the entire region in 2526 // 2527 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2528 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2529 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2530 addr = (address)((uintptr_t)addr & 2531 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2532 os::commit_memory((char *)addr, thread->stack_base() - addr, 2533 !ExecMem); 2534 return EXCEPTION_CONTINUE_EXECUTION; 2535 } else 2536 #endif 2537 { 2538 // Null pointer exception. 2539 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2540 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2541 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2542 } 2543 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2544 exceptionInfo->ContextRecord); 2545 return EXCEPTION_CONTINUE_SEARCH; 2546 } 2547 } 2548 } 2549 2550 #ifdef _WIN64 2551 // Special care for fast JNI field accessors. 2552 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2553 // in and the heap gets shrunk before the field access. 2554 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2555 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2556 if (addr != (address)-1) { 2557 return Handle_Exception(exceptionInfo, addr); 2558 } 2559 } 2560 #endif 2561 2562 // Stack overflow or null pointer exception in native code. 2563 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2564 exceptionInfo->ContextRecord); 2565 return EXCEPTION_CONTINUE_SEARCH; 2566 } // /EXCEPTION_ACCESS_VIOLATION 2567 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2568 2569 if (in_java) { 2570 switch (exception_code) { 2571 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2572 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2573 2574 case EXCEPTION_INT_OVERFLOW: 2575 return Handle_IDiv_Exception(exceptionInfo); 2576 2577 } // switch 2578 } 2579 if (((thread->thread_state() == _thread_in_Java) || 2580 (thread->thread_state() == _thread_in_native)) && 2581 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2582 LONG result=Handle_FLT_Exception(exceptionInfo); 2583 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2584 } 2585 } 2586 2587 if (exception_code != EXCEPTION_BREAKPOINT) { 2588 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2589 exceptionInfo->ContextRecord); 2590 } 2591 return EXCEPTION_CONTINUE_SEARCH; 2592 } 2593 2594 #ifndef _WIN64 2595 // Special care for fast JNI accessors. 2596 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2597 // the heap gets shrunk before the field access. 2598 // Need to install our own structured exception handler since native code may 2599 // install its own. 2600 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2601 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2602 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2603 address pc = (address) exceptionInfo->ContextRecord->Eip; 2604 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2605 if (addr != (address)-1) { 2606 return Handle_Exception(exceptionInfo, addr); 2607 } 2608 } 2609 return EXCEPTION_CONTINUE_SEARCH; 2610 } 2611 2612 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2613 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2614 jobject obj, \ 2615 jfieldID fieldID) { \ 2616 __try { \ 2617 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2618 obj, \ 2619 fieldID); \ 2620 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2621 _exception_info())) { \ 2622 } \ 2623 return 0; \ 2624 } 2625 2626 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2627 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2628 DEFINE_FAST_GETFIELD(jchar, char, Char) 2629 DEFINE_FAST_GETFIELD(jshort, short, Short) 2630 DEFINE_FAST_GETFIELD(jint, int, Int) 2631 DEFINE_FAST_GETFIELD(jlong, long, Long) 2632 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2633 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2634 2635 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2636 switch (type) { 2637 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2638 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2639 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2640 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2641 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2642 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2643 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2644 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2645 default: ShouldNotReachHere(); 2646 } 2647 return (address)-1; 2648 } 2649 #endif 2650 2651 // Virtual Memory 2652 2653 int os::vm_page_size() { return os::win32::vm_page_size(); } 2654 int os::vm_allocation_granularity() { 2655 return os::win32::vm_allocation_granularity(); 2656 } 2657 2658 // Windows large page support is available on Windows 2003. In order to use 2659 // large page memory, the administrator must first assign additional privilege 2660 // to the user: 2661 // + select Control Panel -> Administrative Tools -> Local Security Policy 2662 // + select Local Policies -> User Rights Assignment 2663 // + double click "Lock pages in memory", add users and/or groups 2664 // + reboot 2665 // Note the above steps are needed for administrator as well, as administrators 2666 // by default do not have the privilege to lock pages in memory. 2667 // 2668 // Note about Windows 2003: although the API supports committing large page 2669 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2670 // scenario, I found through experiment it only uses large page if the entire 2671 // memory region is reserved and committed in a single VirtualAlloc() call. 2672 // This makes Windows large page support more or less like Solaris ISM, in 2673 // that the entire heap must be committed upfront. This probably will change 2674 // in the future, if so the code below needs to be revisited. 2675 2676 #ifndef MEM_LARGE_PAGES 2677 #define MEM_LARGE_PAGES 0x20000000 2678 #endif 2679 2680 static HANDLE _hProcess; 2681 static HANDLE _hToken; 2682 2683 // Container for NUMA node list info 2684 class NUMANodeListHolder { 2685 private: 2686 int *_numa_used_node_list; // allocated below 2687 int _numa_used_node_count; 2688 2689 void free_node_list() { 2690 if (_numa_used_node_list != NULL) { 2691 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2692 } 2693 } 2694 2695 public: 2696 NUMANodeListHolder() { 2697 _numa_used_node_count = 0; 2698 _numa_used_node_list = NULL; 2699 // do rest of initialization in build routine (after function pointers are set up) 2700 } 2701 2702 ~NUMANodeListHolder() { 2703 free_node_list(); 2704 } 2705 2706 bool build() { 2707 DWORD_PTR proc_aff_mask; 2708 DWORD_PTR sys_aff_mask; 2709 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2710 ULONG highest_node_number; 2711 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2712 free_node_list(); 2713 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2714 for (unsigned int i = 0; i <= highest_node_number; i++) { 2715 ULONGLONG proc_mask_numa_node; 2716 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2717 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2718 _numa_used_node_list[_numa_used_node_count++] = i; 2719 } 2720 } 2721 return (_numa_used_node_count > 1); 2722 } 2723 2724 int get_count() { return _numa_used_node_count; } 2725 int get_node_list_entry(int n) { 2726 // for indexes out of range, returns -1 2727 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2728 } 2729 2730 } numa_node_list_holder; 2731 2732 2733 2734 static size_t _large_page_size = 0; 2735 2736 static bool request_lock_memory_privilege() { 2737 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2738 os::current_process_id()); 2739 2740 LUID luid; 2741 if (_hProcess != NULL && 2742 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2743 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2744 2745 TOKEN_PRIVILEGES tp; 2746 tp.PrivilegeCount = 1; 2747 tp.Privileges[0].Luid = luid; 2748 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2749 2750 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2751 // privilege. Check GetLastError() too. See MSDN document. 2752 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2753 (GetLastError() == ERROR_SUCCESS)) { 2754 return true; 2755 } 2756 } 2757 2758 return false; 2759 } 2760 2761 static void cleanup_after_large_page_init() { 2762 if (_hProcess) CloseHandle(_hProcess); 2763 _hProcess = NULL; 2764 if (_hToken) CloseHandle(_hToken); 2765 _hToken = NULL; 2766 } 2767 2768 static bool numa_interleaving_init() { 2769 bool success = false; 2770 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2771 2772 // print a warning if UseNUMAInterleaving flag is specified on command line 2773 bool warn_on_failure = use_numa_interleaving_specified; 2774 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2775 2776 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2777 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2778 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2779 2780 if (numa_node_list_holder.build()) { 2781 if (log_is_enabled(Debug, os, cpu)) { 2782 Log(os, cpu) log; 2783 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2784 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2785 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2786 } 2787 } 2788 success = true; 2789 } else { 2790 WARN("Process does not cover multiple NUMA nodes."); 2791 } 2792 if (!success) { 2793 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2794 } 2795 return success; 2796 #undef WARN 2797 } 2798 2799 // this routine is used whenever we need to reserve a contiguous VA range 2800 // but we need to make separate VirtualAlloc calls for each piece of the range 2801 // Reasons for doing this: 2802 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2803 // * UseNUMAInterleaving requires a separate node for each piece 2804 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2805 DWORD prot, 2806 bool should_inject_error = false) { 2807 char * p_buf; 2808 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2809 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2810 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2811 2812 // first reserve enough address space in advance since we want to be 2813 // able to break a single contiguous virtual address range into multiple 2814 // large page commits but WS2003 does not allow reserving large page space 2815 // so we just use 4K pages for reserve, this gives us a legal contiguous 2816 // address space. then we will deallocate that reservation, and re alloc 2817 // using large pages 2818 const size_t size_of_reserve = bytes + chunk_size; 2819 if (bytes > size_of_reserve) { 2820 // Overflowed. 2821 return NULL; 2822 } 2823 p_buf = (char *) VirtualAlloc(addr, 2824 size_of_reserve, // size of Reserve 2825 MEM_RESERVE, 2826 PAGE_READWRITE); 2827 // If reservation failed, return NULL 2828 if (p_buf == NULL) return NULL; 2829 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2830 os::release_memory(p_buf, bytes + chunk_size); 2831 2832 // we still need to round up to a page boundary (in case we are using large pages) 2833 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2834 // instead we handle this in the bytes_to_rq computation below 2835 p_buf = align_up(p_buf, page_size); 2836 2837 // now go through and allocate one chunk at a time until all bytes are 2838 // allocated 2839 size_t bytes_remaining = bytes; 2840 // An overflow of align_up() would have been caught above 2841 // in the calculation of size_of_reserve. 2842 char * next_alloc_addr = p_buf; 2843 HANDLE hProc = GetCurrentProcess(); 2844 2845 #ifdef ASSERT 2846 // Variable for the failure injection 2847 int ran_num = os::random(); 2848 size_t fail_after = ran_num % bytes; 2849 #endif 2850 2851 int count=0; 2852 while (bytes_remaining) { 2853 // select bytes_to_rq to get to the next chunk_size boundary 2854 2855 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2856 // Note allocate and commit 2857 char * p_new; 2858 2859 #ifdef ASSERT 2860 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2861 #else 2862 const bool inject_error_now = false; 2863 #endif 2864 2865 if (inject_error_now) { 2866 p_new = NULL; 2867 } else { 2868 if (!UseNUMAInterleaving) { 2869 p_new = (char *) VirtualAlloc(next_alloc_addr, 2870 bytes_to_rq, 2871 flags, 2872 prot); 2873 } else { 2874 // get the next node to use from the used_node_list 2875 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2876 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2877 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2878 } 2879 } 2880 2881 if (p_new == NULL) { 2882 // Free any allocated pages 2883 if (next_alloc_addr > p_buf) { 2884 // Some memory was committed so release it. 2885 size_t bytes_to_release = bytes - bytes_remaining; 2886 // NMT has yet to record any individual blocks, so it 2887 // need to create a dummy 'reserve' record to match 2888 // the release. 2889 MemTracker::record_virtual_memory_reserve((address)p_buf, 2890 bytes_to_release, CALLER_PC); 2891 os::release_memory(p_buf, bytes_to_release); 2892 } 2893 #ifdef ASSERT 2894 if (should_inject_error) { 2895 log_develop_debug(pagesize)("Reserving pages individually failed."); 2896 } 2897 #endif 2898 return NULL; 2899 } 2900 2901 bytes_remaining -= bytes_to_rq; 2902 next_alloc_addr += bytes_to_rq; 2903 count++; 2904 } 2905 // Although the memory is allocated individually, it is returned as one. 2906 // NMT records it as one block. 2907 if ((flags & MEM_COMMIT) != 0) { 2908 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2909 } else { 2910 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2911 } 2912 2913 // made it this far, success 2914 return p_buf; 2915 } 2916 2917 2918 2919 void os::large_page_init() { 2920 if (!UseLargePages) return; 2921 2922 // print a warning if any large page related flag is specified on command line 2923 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2924 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2925 bool success = false; 2926 2927 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2928 if (request_lock_memory_privilege()) { 2929 size_t s = GetLargePageMinimum(); 2930 if (s) { 2931 #if defined(IA32) || defined(AMD64) 2932 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2933 WARN("JVM cannot use large pages bigger than 4mb."); 2934 } else { 2935 #endif 2936 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2937 _large_page_size = LargePageSizeInBytes; 2938 } else { 2939 _large_page_size = s; 2940 } 2941 success = true; 2942 #if defined(IA32) || defined(AMD64) 2943 } 2944 #endif 2945 } else { 2946 WARN("Large page is not supported by the processor."); 2947 } 2948 } else { 2949 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2950 } 2951 #undef WARN 2952 2953 const size_t default_page_size = (size_t) vm_page_size(); 2954 if (success && _large_page_size > default_page_size) { 2955 _page_sizes[0] = _large_page_size; 2956 _page_sizes[1] = default_page_size; 2957 _page_sizes[2] = 0; 2958 } 2959 2960 cleanup_after_large_page_init(); 2961 UseLargePages = success; 2962 } 2963 2964 // On win32, one cannot release just a part of reserved memory, it's an 2965 // all or nothing deal. When we split a reservation, we must break the 2966 // reservation into two reservations. 2967 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2968 bool realloc) { 2969 if (size > 0) { 2970 release_memory(base, size); 2971 if (realloc) { 2972 reserve_memory(split, base); 2973 } 2974 if (size != split) { 2975 reserve_memory(size - split, base + split); 2976 } 2977 } 2978 } 2979 2980 // Multiple threads can race in this code but it's not possible to unmap small sections of 2981 // virtual space to get requested alignment, like posix-like os's. 2982 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 2983 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 2984 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 2985 "Alignment must be a multiple of allocation granularity (page size)"); 2986 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 2987 2988 size_t extra_size = size + alignment; 2989 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 2990 2991 char* aligned_base = NULL; 2992 2993 do { 2994 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 2995 if (extra_base == NULL) { 2996 return NULL; 2997 } 2998 // Do manual alignment 2999 aligned_base = align_up(extra_base, alignment); 3000 3001 os::release_memory(extra_base, extra_size); 3002 3003 aligned_base = os::reserve_memory(size, aligned_base); 3004 3005 } while (aligned_base == NULL); 3006 3007 return aligned_base; 3008 } 3009 3010 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3011 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3012 "reserve alignment"); 3013 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3014 char* res; 3015 // note that if UseLargePages is on, all the areas that require interleaving 3016 // will go thru reserve_memory_special rather than thru here. 3017 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3018 if (!use_individual) { 3019 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3020 } else { 3021 elapsedTimer reserveTimer; 3022 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3023 // in numa interleaving, we have to allocate pages individually 3024 // (well really chunks of NUMAInterleaveGranularity size) 3025 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3026 if (res == NULL) { 3027 warning("NUMA page allocation failed"); 3028 } 3029 if (Verbose && PrintMiscellaneous) { 3030 reserveTimer.stop(); 3031 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3032 reserveTimer.milliseconds(), reserveTimer.ticks()); 3033 } 3034 } 3035 assert(res == NULL || addr == NULL || addr == res, 3036 "Unexpected address from reserve."); 3037 3038 return res; 3039 } 3040 3041 // Reserve memory at an arbitrary address, only if that area is 3042 // available (and not reserved for something else). 3043 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3044 // Windows os::reserve_memory() fails of the requested address range is 3045 // not avilable. 3046 return reserve_memory(bytes, requested_addr); 3047 } 3048 3049 size_t os::large_page_size() { 3050 return _large_page_size; 3051 } 3052 3053 bool os::can_commit_large_page_memory() { 3054 // Windows only uses large page memory when the entire region is reserved 3055 // and committed in a single VirtualAlloc() call. This may change in the 3056 // future, but with Windows 2003 it's not possible to commit on demand. 3057 return false; 3058 } 3059 3060 bool os::can_execute_large_page_memory() { 3061 return true; 3062 } 3063 3064 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3065 bool exec) { 3066 assert(UseLargePages, "only for large pages"); 3067 3068 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3069 return NULL; // Fallback to small pages. 3070 } 3071 3072 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3073 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3074 3075 // with large pages, there are two cases where we need to use Individual Allocation 3076 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3077 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3078 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3079 log_debug(pagesize)("Reserving large pages individually."); 3080 3081 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3082 if (p_buf == NULL) { 3083 // give an appropriate warning message 3084 if (UseNUMAInterleaving) { 3085 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3086 } 3087 if (UseLargePagesIndividualAllocation) { 3088 warning("Individually allocated large pages failed, " 3089 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3090 } 3091 return NULL; 3092 } 3093 3094 return p_buf; 3095 3096 } else { 3097 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3098 3099 // normal policy just allocate it all at once 3100 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3101 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3102 if (res != NULL) { 3103 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3104 } 3105 3106 return res; 3107 } 3108 } 3109 3110 bool os::release_memory_special(char* base, size_t bytes) { 3111 assert(base != NULL, "Sanity check"); 3112 return release_memory(base, bytes); 3113 } 3114 3115 void os::print_statistics() { 3116 } 3117 3118 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3119 int err = os::get_last_error(); 3120 char buf[256]; 3121 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3122 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3123 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3124 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3125 } 3126 3127 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3128 if (bytes == 0) { 3129 // Don't bother the OS with noops. 3130 return true; 3131 } 3132 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3133 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3134 // Don't attempt to print anything if the OS call fails. We're 3135 // probably low on resources, so the print itself may cause crashes. 3136 3137 // unless we have NUMAInterleaving enabled, the range of a commit 3138 // is always within a reserve covered by a single VirtualAlloc 3139 // in that case we can just do a single commit for the requested size 3140 if (!UseNUMAInterleaving) { 3141 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3142 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3143 return false; 3144 } 3145 if (exec) { 3146 DWORD oldprot; 3147 // Windows doc says to use VirtualProtect to get execute permissions 3148 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3149 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3150 return false; 3151 } 3152 } 3153 return true; 3154 } else { 3155 3156 // when NUMAInterleaving is enabled, the commit might cover a range that 3157 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3158 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3159 // returns represents the number of bytes that can be committed in one step. 3160 size_t bytes_remaining = bytes; 3161 char * next_alloc_addr = addr; 3162 while (bytes_remaining > 0) { 3163 MEMORY_BASIC_INFORMATION alloc_info; 3164 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3165 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3166 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3167 PAGE_READWRITE) == NULL) { 3168 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3169 exec);) 3170 return false; 3171 } 3172 if (exec) { 3173 DWORD oldprot; 3174 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3175 PAGE_EXECUTE_READWRITE, &oldprot)) { 3176 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3177 exec);) 3178 return false; 3179 } 3180 } 3181 bytes_remaining -= bytes_to_rq; 3182 next_alloc_addr += bytes_to_rq; 3183 } 3184 } 3185 // if we made it this far, return true 3186 return true; 3187 } 3188 3189 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3190 bool exec) { 3191 // alignment_hint is ignored on this OS 3192 return pd_commit_memory(addr, size, exec); 3193 } 3194 3195 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3196 const char* mesg) { 3197 assert(mesg != NULL, "mesg must be specified"); 3198 if (!pd_commit_memory(addr, size, exec)) { 3199 warn_fail_commit_memory(addr, size, exec); 3200 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3201 } 3202 } 3203 3204 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3205 size_t alignment_hint, bool exec, 3206 const char* mesg) { 3207 // alignment_hint is ignored on this OS 3208 pd_commit_memory_or_exit(addr, size, exec, mesg); 3209 } 3210 3211 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3212 if (bytes == 0) { 3213 // Don't bother the OS with noops. 3214 return true; 3215 } 3216 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3217 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3218 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3219 } 3220 3221 bool os::pd_release_memory(char* addr, size_t bytes) { 3222 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3223 } 3224 3225 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3226 return os::commit_memory(addr, size, !ExecMem); 3227 } 3228 3229 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3230 return os::uncommit_memory(addr, size); 3231 } 3232 3233 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3234 uint count = 0; 3235 bool ret = false; 3236 size_t bytes_remaining = bytes; 3237 char * next_protect_addr = addr; 3238 3239 // Use VirtualQuery() to get the chunk size. 3240 while (bytes_remaining) { 3241 MEMORY_BASIC_INFORMATION alloc_info; 3242 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3243 return false; 3244 } 3245 3246 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3247 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3248 // but we don't distinguish here as both cases are protected by same API. 3249 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3250 warning("Failed protecting pages individually for chunk #%u", count); 3251 if (!ret) { 3252 return false; 3253 } 3254 3255 bytes_remaining -= bytes_to_protect; 3256 next_protect_addr += bytes_to_protect; 3257 count++; 3258 } 3259 return ret; 3260 } 3261 3262 // Set protections specified 3263 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3264 bool is_committed) { 3265 unsigned int p = 0; 3266 switch (prot) { 3267 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3268 case MEM_PROT_READ: p = PAGE_READONLY; break; 3269 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3270 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3271 default: 3272 ShouldNotReachHere(); 3273 } 3274 3275 DWORD old_status; 3276 3277 // Strange enough, but on Win32 one can change protection only for committed 3278 // memory, not a big deal anyway, as bytes less or equal than 64K 3279 if (!is_committed) { 3280 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3281 "cannot commit protection page"); 3282 } 3283 // One cannot use os::guard_memory() here, as on Win32 guard page 3284 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3285 // 3286 // Pages in the region become guard pages. Any attempt to access a guard page 3287 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3288 // the guard page status. Guard pages thus act as a one-time access alarm. 3289 bool ret; 3290 if (UseNUMAInterleaving) { 3291 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3292 // so we must protect the chunks individually. 3293 ret = protect_pages_individually(addr, bytes, p, &old_status); 3294 } else { 3295 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3296 } 3297 #ifdef ASSERT 3298 if (!ret) { 3299 int err = os::get_last_error(); 3300 char buf[256]; 3301 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3302 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3303 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3304 buf_len != 0 ? buf : "<no_error_string>", err); 3305 } 3306 #endif 3307 return ret; 3308 } 3309 3310 bool os::guard_memory(char* addr, size_t bytes) { 3311 DWORD old_status; 3312 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3313 } 3314 3315 bool os::unguard_memory(char* addr, size_t bytes) { 3316 DWORD old_status; 3317 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3318 } 3319 3320 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3321 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3322 void os::numa_make_global(char *addr, size_t bytes) { } 3323 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3324 bool os::numa_topology_changed() { return false; } 3325 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3326 int os::numa_get_group_id() { return 0; } 3327 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3328 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3329 // Provide an answer for UMA systems 3330 ids[0] = 0; 3331 return 1; 3332 } else { 3333 // check for size bigger than actual groups_num 3334 size = MIN2(size, numa_get_groups_num()); 3335 for (int i = 0; i < (int)size; i++) { 3336 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3337 } 3338 return size; 3339 } 3340 } 3341 3342 bool os::get_page_info(char *start, page_info* info) { 3343 return false; 3344 } 3345 3346 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3347 page_info* page_found) { 3348 return end; 3349 } 3350 3351 char* os::non_memory_address_word() { 3352 // Must never look like an address returned by reserve_memory, 3353 // even in its subfields (as defined by the CPU immediate fields, 3354 // if the CPU splits constants across multiple instructions). 3355 return (char*)-1; 3356 } 3357 3358 #define MAX_ERROR_COUNT 100 3359 #define SYS_THREAD_ERROR 0xffffffffUL 3360 3361 void os::pd_start_thread(Thread* thread) { 3362 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3363 // Returns previous suspend state: 3364 // 0: Thread was not suspended 3365 // 1: Thread is running now 3366 // >1: Thread is still suspended. 3367 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3368 } 3369 3370 class HighResolutionInterval : public CHeapObj<mtThread> { 3371 // The default timer resolution seems to be 10 milliseconds. 3372 // (Where is this written down?) 3373 // If someone wants to sleep for only a fraction of the default, 3374 // then we set the timer resolution down to 1 millisecond for 3375 // the duration of their interval. 3376 // We carefully set the resolution back, since otherwise we 3377 // seem to incur an overhead (3%?) that we don't need. 3378 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3379 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3380 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3381 // timeBeginPeriod() if the relative error exceeded some threshold. 3382 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3383 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3384 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3385 // resolution timers running. 3386 private: 3387 jlong resolution; 3388 public: 3389 HighResolutionInterval(jlong ms) { 3390 resolution = ms % 10L; 3391 if (resolution != 0) { 3392 MMRESULT result = timeBeginPeriod(1L); 3393 } 3394 } 3395 ~HighResolutionInterval() { 3396 if (resolution != 0) { 3397 MMRESULT result = timeEndPeriod(1L); 3398 } 3399 resolution = 0L; 3400 } 3401 }; 3402 3403 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3404 jlong limit = (jlong) MAXDWORD; 3405 3406 while (ms > limit) { 3407 int res; 3408 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3409 return res; 3410 } 3411 ms -= limit; 3412 } 3413 3414 assert(thread == Thread::current(), "thread consistency check"); 3415 OSThread* osthread = thread->osthread(); 3416 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3417 int result; 3418 if (interruptable) { 3419 assert(thread->is_Java_thread(), "must be java thread"); 3420 JavaThread *jt = (JavaThread *) thread; 3421 ThreadBlockInVM tbivm(jt); 3422 3423 jt->set_suspend_equivalent(); 3424 // cleared by handle_special_suspend_equivalent_condition() or 3425 // java_suspend_self() via check_and_wait_while_suspended() 3426 3427 HANDLE events[1]; 3428 events[0] = osthread->interrupt_event(); 3429 HighResolutionInterval *phri=NULL; 3430 if (!ForceTimeHighResolution) { 3431 phri = new HighResolutionInterval(ms); 3432 } 3433 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3434 result = OS_TIMEOUT; 3435 } else { 3436 ResetEvent(osthread->interrupt_event()); 3437 osthread->set_interrupted(false); 3438 result = OS_INTRPT; 3439 } 3440 delete phri; //if it is NULL, harmless 3441 3442 // were we externally suspended while we were waiting? 3443 jt->check_and_wait_while_suspended(); 3444 } else { 3445 assert(!thread->is_Java_thread(), "must not be java thread"); 3446 Sleep((long) ms); 3447 result = OS_TIMEOUT; 3448 } 3449 return result; 3450 } 3451 3452 // Short sleep, direct OS call. 3453 // 3454 // ms = 0, means allow others (if any) to run. 3455 // 3456 void os::naked_short_sleep(jlong ms) { 3457 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3458 Sleep(ms); 3459 } 3460 3461 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3462 void os::infinite_sleep() { 3463 while (true) { // sleep forever ... 3464 Sleep(100000); // ... 100 seconds at a time 3465 } 3466 } 3467 3468 typedef BOOL (WINAPI * STTSignature)(void); 3469 3470 void os::naked_yield() { 3471 // Consider passing back the return value from SwitchToThread(). 3472 SwitchToThread(); 3473 } 3474 3475 // Win32 only gives you access to seven real priorities at a time, 3476 // so we compress Java's ten down to seven. It would be better 3477 // if we dynamically adjusted relative priorities. 3478 3479 int os::java_to_os_priority[CriticalPriority + 1] = { 3480 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3481 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3482 THREAD_PRIORITY_LOWEST, // 2 3483 THREAD_PRIORITY_BELOW_NORMAL, // 3 3484 THREAD_PRIORITY_BELOW_NORMAL, // 4 3485 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3486 THREAD_PRIORITY_NORMAL, // 6 3487 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3488 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3489 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3490 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3491 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3492 }; 3493 3494 int prio_policy1[CriticalPriority + 1] = { 3495 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3496 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3497 THREAD_PRIORITY_LOWEST, // 2 3498 THREAD_PRIORITY_BELOW_NORMAL, // 3 3499 THREAD_PRIORITY_BELOW_NORMAL, // 4 3500 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3501 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3502 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3503 THREAD_PRIORITY_HIGHEST, // 8 3504 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3505 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3506 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3507 }; 3508 3509 static int prio_init() { 3510 // If ThreadPriorityPolicy is 1, switch tables 3511 if (ThreadPriorityPolicy == 1) { 3512 int i; 3513 for (i = 0; i < CriticalPriority + 1; i++) { 3514 os::java_to_os_priority[i] = prio_policy1[i]; 3515 } 3516 } 3517 if (UseCriticalJavaThreadPriority) { 3518 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3519 } 3520 return 0; 3521 } 3522 3523 OSReturn os::set_native_priority(Thread* thread, int priority) { 3524 if (!UseThreadPriorities) return OS_OK; 3525 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3526 return ret ? OS_OK : OS_ERR; 3527 } 3528 3529 OSReturn os::get_native_priority(const Thread* const thread, 3530 int* priority_ptr) { 3531 if (!UseThreadPriorities) { 3532 *priority_ptr = java_to_os_priority[NormPriority]; 3533 return OS_OK; 3534 } 3535 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3536 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3537 assert(false, "GetThreadPriority failed"); 3538 return OS_ERR; 3539 } 3540 *priority_ptr = os_prio; 3541 return OS_OK; 3542 } 3543 3544 3545 // Hint to the underlying OS that a task switch would not be good. 3546 // Void return because it's a hint and can fail. 3547 void os::hint_no_preempt() {} 3548 3549 void os::interrupt(Thread* thread) { 3550 assert(!thread->is_Java_thread() || Thread::current() == thread || 3551 Threads_lock->owned_by_self(), 3552 "possibility of dangling Thread pointer"); 3553 3554 OSThread* osthread = thread->osthread(); 3555 osthread->set_interrupted(true); 3556 // More than one thread can get here with the same value of osthread, 3557 // resulting in multiple notifications. We do, however, want the store 3558 // to interrupted() to be visible to other threads before we post 3559 // the interrupt event. 3560 OrderAccess::release(); 3561 SetEvent(osthread->interrupt_event()); 3562 // For JSR166: unpark after setting status 3563 if (thread->is_Java_thread()) { 3564 ((JavaThread*)thread)->parker()->unpark(); 3565 } 3566 3567 ParkEvent * ev = thread->_ParkEvent; 3568 if (ev != NULL) ev->unpark(); 3569 } 3570 3571 3572 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3573 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3574 "possibility of dangling Thread pointer"); 3575 3576 OSThread* osthread = thread->osthread(); 3577 // There is no synchronization between the setting of the interrupt 3578 // and it being cleared here. It is critical - see 6535709 - that 3579 // we only clear the interrupt state, and reset the interrupt event, 3580 // if we are going to report that we were indeed interrupted - else 3581 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3582 // depending on the timing. By checking thread interrupt event to see 3583 // if the thread gets real interrupt thus prevent spurious wakeup. 3584 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3585 if (interrupted && clear_interrupted) { 3586 osthread->set_interrupted(false); 3587 ResetEvent(osthread->interrupt_event()); 3588 } // Otherwise leave the interrupted state alone 3589 3590 return interrupted; 3591 } 3592 3593 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3594 ExtendedPC os::get_thread_pc(Thread* thread) { 3595 CONTEXT context; 3596 context.ContextFlags = CONTEXT_CONTROL; 3597 HANDLE handle = thread->osthread()->thread_handle(); 3598 if (GetThreadContext(handle, &context)) { 3599 #ifdef _M_AMD64 3600 return ExtendedPC((address) context.Rip); 3601 #else 3602 return ExtendedPC((address) context.Eip); 3603 #endif 3604 } else { 3605 return ExtendedPC(NULL); 3606 } 3607 } 3608 3609 // GetCurrentThreadId() returns DWORD 3610 intx os::current_thread_id() { return GetCurrentThreadId(); } 3611 3612 static int _initial_pid = 0; 3613 3614 int os::current_process_id() { 3615 return (_initial_pid ? _initial_pid : _getpid()); 3616 } 3617 3618 int os::win32::_vm_page_size = 0; 3619 int os::win32::_vm_allocation_granularity = 0; 3620 int os::win32::_processor_type = 0; 3621 // Processor level is not available on non-NT systems, use vm_version instead 3622 int os::win32::_processor_level = 0; 3623 julong os::win32::_physical_memory = 0; 3624 size_t os::win32::_default_stack_size = 0; 3625 3626 intx os::win32::_os_thread_limit = 0; 3627 volatile intx os::win32::_os_thread_count = 0; 3628 3629 bool os::win32::_is_windows_server = false; 3630 3631 // 6573254 3632 // Currently, the bug is observed across all the supported Windows releases, 3633 // including the latest one (as of this writing - Windows Server 2012 R2) 3634 bool os::win32::_has_exit_bug = true; 3635 3636 void os::win32::initialize_system_info() { 3637 SYSTEM_INFO si; 3638 GetSystemInfo(&si); 3639 _vm_page_size = si.dwPageSize; 3640 _vm_allocation_granularity = si.dwAllocationGranularity; 3641 _processor_type = si.dwProcessorType; 3642 _processor_level = si.wProcessorLevel; 3643 set_processor_count(si.dwNumberOfProcessors); 3644 3645 MEMORYSTATUSEX ms; 3646 ms.dwLength = sizeof(ms); 3647 3648 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3649 // dwMemoryLoad (% of memory in use) 3650 GlobalMemoryStatusEx(&ms); 3651 _physical_memory = ms.ullTotalPhys; 3652 3653 if (FLAG_IS_DEFAULT(MaxRAM)) { 3654 // Adjust MaxRAM according to the maximum virtual address space available. 3655 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3656 } 3657 3658 OSVERSIONINFOEX oi; 3659 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3660 GetVersionEx((OSVERSIONINFO*)&oi); 3661 switch (oi.dwPlatformId) { 3662 case VER_PLATFORM_WIN32_NT: 3663 { 3664 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3665 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3666 oi.wProductType == VER_NT_SERVER) { 3667 _is_windows_server = true; 3668 } 3669 } 3670 break; 3671 default: fatal("Unknown platform"); 3672 } 3673 3674 _default_stack_size = os::current_stack_size(); 3675 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3676 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3677 "stack size not a multiple of page size"); 3678 3679 initialize_performance_counter(); 3680 } 3681 3682 3683 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3684 int ebuflen) { 3685 char path[MAX_PATH]; 3686 DWORD size; 3687 DWORD pathLen = (DWORD)sizeof(path); 3688 HINSTANCE result = NULL; 3689 3690 // only allow library name without path component 3691 assert(strchr(name, '\\') == NULL, "path not allowed"); 3692 assert(strchr(name, ':') == NULL, "path not allowed"); 3693 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3694 jio_snprintf(ebuf, ebuflen, 3695 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3696 return NULL; 3697 } 3698 3699 // search system directory 3700 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3701 if (size >= pathLen) { 3702 return NULL; // truncated 3703 } 3704 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3705 return NULL; // truncated 3706 } 3707 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3708 return result; 3709 } 3710 } 3711 3712 // try Windows directory 3713 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3714 if (size >= pathLen) { 3715 return NULL; // truncated 3716 } 3717 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3718 return NULL; // truncated 3719 } 3720 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3721 return result; 3722 } 3723 } 3724 3725 jio_snprintf(ebuf, ebuflen, 3726 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3727 return NULL; 3728 } 3729 3730 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3731 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3732 3733 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3734 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3735 return TRUE; 3736 } 3737 3738 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3739 // Basic approach: 3740 // - Each exiting thread registers its intent to exit and then does so. 3741 // - A thread trying to terminate the process must wait for all 3742 // threads currently exiting to complete their exit. 3743 3744 if (os::win32::has_exit_bug()) { 3745 // The array holds handles of the threads that have started exiting by calling 3746 // _endthreadex(). 3747 // Should be large enough to avoid blocking the exiting thread due to lack of 3748 // a free slot. 3749 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3750 static int handle_count = 0; 3751 3752 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3753 static CRITICAL_SECTION crit_sect; 3754 static volatile jint process_exiting = 0; 3755 int i, j; 3756 DWORD res; 3757 HANDLE hproc, hthr; 3758 3759 // We only attempt to register threads until a process exiting 3760 // thread manages to set the process_exiting flag. Any threads 3761 // that come through here after the process_exiting flag is set 3762 // are unregistered and will be caught in the SuspendThread() 3763 // infinite loop below. 3764 bool registered = false; 3765 3766 // The first thread that reached this point, initializes the critical section. 3767 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3768 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3769 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3770 if (what != EPT_THREAD) { 3771 // Atomically set process_exiting before the critical section 3772 // to increase the visibility between racing threads. 3773 Atomic::cmpxchg((jint)GetCurrentThreadId(), &process_exiting, 0); 3774 } 3775 EnterCriticalSection(&crit_sect); 3776 3777 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3778 // Remove from the array those handles of the threads that have completed exiting. 3779 for (i = 0, j = 0; i < handle_count; ++i) { 3780 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3781 if (res == WAIT_TIMEOUT) { 3782 handles[j++] = handles[i]; 3783 } else { 3784 if (res == WAIT_FAILED) { 3785 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3786 GetLastError(), __FILE__, __LINE__); 3787 } 3788 // Don't keep the handle, if we failed waiting for it. 3789 CloseHandle(handles[i]); 3790 } 3791 } 3792 3793 // If there's no free slot in the array of the kept handles, we'll have to 3794 // wait until at least one thread completes exiting. 3795 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3796 // Raise the priority of the oldest exiting thread to increase its chances 3797 // to complete sooner. 3798 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3799 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3800 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3801 i = (res - WAIT_OBJECT_0); 3802 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3803 for (; i < handle_count; ++i) { 3804 handles[i] = handles[i + 1]; 3805 } 3806 } else { 3807 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3808 (res == WAIT_FAILED ? "failed" : "timed out"), 3809 GetLastError(), __FILE__, __LINE__); 3810 // Don't keep handles, if we failed waiting for them. 3811 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3812 CloseHandle(handles[i]); 3813 } 3814 handle_count = 0; 3815 } 3816 } 3817 3818 // Store a duplicate of the current thread handle in the array of handles. 3819 hproc = GetCurrentProcess(); 3820 hthr = GetCurrentThread(); 3821 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3822 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3823 warning("DuplicateHandle failed (%u) in %s: %d\n", 3824 GetLastError(), __FILE__, __LINE__); 3825 3826 // We can't register this thread (no more handles) so this thread 3827 // may be racing with a thread that is calling exit(). If the thread 3828 // that is calling exit() has managed to set the process_exiting 3829 // flag, then this thread will be caught in the SuspendThread() 3830 // infinite loop below which closes that race. A small timing 3831 // window remains before the process_exiting flag is set, but it 3832 // is only exposed when we are out of handles. 3833 } else { 3834 ++handle_count; 3835 registered = true; 3836 3837 // The current exiting thread has stored its handle in the array, and now 3838 // should leave the critical section before calling _endthreadex(). 3839 } 3840 3841 } else if (what != EPT_THREAD && handle_count > 0) { 3842 jlong start_time, finish_time, timeout_left; 3843 // Before ending the process, make sure all the threads that had called 3844 // _endthreadex() completed. 3845 3846 // Set the priority level of the current thread to the same value as 3847 // the priority level of exiting threads. 3848 // This is to ensure it will be given a fair chance to execute if 3849 // the timeout expires. 3850 hthr = GetCurrentThread(); 3851 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3852 start_time = os::javaTimeNanos(); 3853 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3854 for (i = 0; ; ) { 3855 int portion_count = handle_count - i; 3856 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3857 portion_count = MAXIMUM_WAIT_OBJECTS; 3858 } 3859 for (j = 0; j < portion_count; ++j) { 3860 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3861 } 3862 timeout_left = (finish_time - start_time) / 1000000L; 3863 if (timeout_left < 0) { 3864 timeout_left = 0; 3865 } 3866 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3867 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3868 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3869 (res == WAIT_FAILED ? "failed" : "timed out"), 3870 GetLastError(), __FILE__, __LINE__); 3871 // Reset portion_count so we close the remaining 3872 // handles due to this error. 3873 portion_count = handle_count - i; 3874 } 3875 for (j = 0; j < portion_count; ++j) { 3876 CloseHandle(handles[i + j]); 3877 } 3878 if ((i += portion_count) >= handle_count) { 3879 break; 3880 } 3881 start_time = os::javaTimeNanos(); 3882 } 3883 handle_count = 0; 3884 } 3885 3886 LeaveCriticalSection(&crit_sect); 3887 } 3888 3889 if (!registered && 3890 OrderAccess::load_acquire(&process_exiting) != 0 && 3891 process_exiting != (jint)GetCurrentThreadId()) { 3892 // Some other thread is about to call exit(), so we don't let 3893 // the current unregistered thread proceed to exit() or _endthreadex() 3894 while (true) { 3895 SuspendThread(GetCurrentThread()); 3896 // Avoid busy-wait loop, if SuspendThread() failed. 3897 Sleep(EXIT_TIMEOUT); 3898 } 3899 } 3900 } 3901 3902 // We are here if either 3903 // - there's no 'race at exit' bug on this OS release; 3904 // - initialization of the critical section failed (unlikely); 3905 // - the current thread has registered itself and left the critical section; 3906 // - the process-exiting thread has raised the flag and left the critical section. 3907 if (what == EPT_THREAD) { 3908 _endthreadex((unsigned)exit_code); 3909 } else if (what == EPT_PROCESS) { 3910 ::exit(exit_code); 3911 } else { 3912 _exit(exit_code); 3913 } 3914 3915 // Should not reach here 3916 return exit_code; 3917 } 3918 3919 #undef EXIT_TIMEOUT 3920 3921 void os::win32::setmode_streams() { 3922 _setmode(_fileno(stdin), _O_BINARY); 3923 _setmode(_fileno(stdout), _O_BINARY); 3924 _setmode(_fileno(stderr), _O_BINARY); 3925 } 3926 3927 3928 bool os::is_debugger_attached() { 3929 return IsDebuggerPresent() ? true : false; 3930 } 3931 3932 3933 void os::wait_for_keypress_at_exit(void) { 3934 if (PauseAtExit) { 3935 fprintf(stderr, "Press any key to continue...\n"); 3936 fgetc(stdin); 3937 } 3938 } 3939 3940 3941 bool os::message_box(const char* title, const char* message) { 3942 int result = MessageBox(NULL, message, title, 3943 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3944 return result == IDYES; 3945 } 3946 3947 #ifndef PRODUCT 3948 #ifndef _WIN64 3949 // Helpers to check whether NX protection is enabled 3950 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3951 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3952 pex->ExceptionRecord->NumberParameters > 0 && 3953 pex->ExceptionRecord->ExceptionInformation[0] == 3954 EXCEPTION_INFO_EXEC_VIOLATION) { 3955 return EXCEPTION_EXECUTE_HANDLER; 3956 } 3957 return EXCEPTION_CONTINUE_SEARCH; 3958 } 3959 3960 void nx_check_protection() { 3961 // If NX is enabled we'll get an exception calling into code on the stack 3962 char code[] = { (char)0xC3 }; // ret 3963 void *code_ptr = (void *)code; 3964 __try { 3965 __asm call code_ptr 3966 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3967 tty->print_raw_cr("NX protection detected."); 3968 } 3969 } 3970 #endif // _WIN64 3971 #endif // PRODUCT 3972 3973 // This is called _before_ the global arguments have been parsed 3974 void os::init(void) { 3975 _initial_pid = _getpid(); 3976 3977 init_random(1234567); 3978 3979 win32::initialize_system_info(); 3980 win32::setmode_streams(); 3981 init_page_sizes((size_t) win32::vm_page_size()); 3982 3983 // This may be overridden later when argument processing is done. 3984 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 3985 3986 // Initialize main_process and main_thread 3987 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3988 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3989 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3990 fatal("DuplicateHandle failed\n"); 3991 } 3992 main_thread_id = (int) GetCurrentThreadId(); 3993 3994 // initialize fast thread access - only used for 32-bit 3995 win32::initialize_thread_ptr_offset(); 3996 } 3997 3998 // To install functions for atexit processing 3999 extern "C" { 4000 static void perfMemory_exit_helper() { 4001 perfMemory_exit(); 4002 } 4003 } 4004 4005 static jint initSock(); 4006 4007 // this is called _after_ the global arguments have been parsed 4008 jint os::init_2(void) { 4009 // Allocate a single page and mark it as readable for safepoint polling 4010 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 4011 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 4012 4013 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 4014 guarantee(return_page != NULL, "Commit Failed for polling page"); 4015 4016 os::set_polling_page(polling_page); 4017 log_info(os)("SafePoint Polling address: " INTPTR_FORMAT, p2i(polling_page)); 4018 4019 if (!UseMembar) { 4020 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 4021 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 4022 4023 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 4024 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 4025 4026 os::set_memory_serialize_page(mem_serialize_page); 4027 log_info(os)("Memory Serialize Page address: " INTPTR_FORMAT, p2i(mem_serialize_page)); 4028 } 4029 4030 // Setup Windows Exceptions 4031 4032 // for debugging float code generation bugs 4033 if (ForceFloatExceptions) { 4034 #ifndef _WIN64 4035 static long fp_control_word = 0; 4036 __asm { fstcw fp_control_word } 4037 // see Intel PPro Manual, Vol. 2, p 7-16 4038 const long precision = 0x20; 4039 const long underflow = 0x10; 4040 const long overflow = 0x08; 4041 const long zero_div = 0x04; 4042 const long denorm = 0x02; 4043 const long invalid = 0x01; 4044 fp_control_word |= invalid; 4045 __asm { fldcw fp_control_word } 4046 #endif 4047 } 4048 4049 // If stack_commit_size is 0, windows will reserve the default size, 4050 // but only commit a small portion of it. 4051 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 4052 size_t default_reserve_size = os::win32::default_stack_size(); 4053 size_t actual_reserve_size = stack_commit_size; 4054 if (stack_commit_size < default_reserve_size) { 4055 // If stack_commit_size == 0, we want this too 4056 actual_reserve_size = default_reserve_size; 4057 } 4058 4059 // Check minimum allowable stack size for thread creation and to initialize 4060 // the java system classes, including StackOverflowError - depends on page 4061 // size. Add two 4K pages for compiler2 recursion in main thread. 4062 // Add in 4*BytesPerWord 4K pages to account for VM stack during 4063 // class initialization depending on 32 or 64 bit VM. 4064 size_t min_stack_allowed = 4065 (size_t)(JavaThread::stack_guard_zone_size() + 4066 JavaThread::stack_shadow_zone_size() + 4067 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 4068 4069 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 4070 4071 if (actual_reserve_size < min_stack_allowed) { 4072 tty->print_cr("\nThe Java thread stack size specified is too small. " 4073 "Specify at least %dk", 4074 min_stack_allowed / K); 4075 return JNI_ERR; 4076 } 4077 4078 JavaThread::set_stack_size_at_create(stack_commit_size); 4079 4080 // Calculate theoretical max. size of Threads to guard gainst artifical 4081 // out-of-memory situations, where all available address-space has been 4082 // reserved by thread stacks. 4083 assert(actual_reserve_size != 0, "Must have a stack"); 4084 4085 // Calculate the thread limit when we should start doing Virtual Memory 4086 // banging. Currently when the threads will have used all but 200Mb of space. 4087 // 4088 // TODO: consider performing a similar calculation for commit size instead 4089 // as reserve size, since on a 64-bit platform we'll run into that more 4090 // often than running out of virtual memory space. We can use the 4091 // lower value of the two calculations as the os_thread_limit. 4092 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4093 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4094 4095 // at exit methods are called in the reverse order of their registration. 4096 // there is no limit to the number of functions registered. atexit does 4097 // not set errno. 4098 4099 if (PerfAllowAtExitRegistration) { 4100 // only register atexit functions if PerfAllowAtExitRegistration is set. 4101 // atexit functions can be delayed until process exit time, which 4102 // can be problematic for embedded VM situations. Embedded VMs should 4103 // call DestroyJavaVM() to assure that VM resources are released. 4104 4105 // note: perfMemory_exit_helper atexit function may be removed in 4106 // the future if the appropriate cleanup code can be added to the 4107 // VM_Exit VMOperation's doit method. 4108 if (atexit(perfMemory_exit_helper) != 0) { 4109 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4110 } 4111 } 4112 4113 #ifndef _WIN64 4114 // Print something if NX is enabled (win32 on AMD64) 4115 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4116 #endif 4117 4118 // initialize thread priority policy 4119 prio_init(); 4120 4121 if (UseNUMA && !ForceNUMA) { 4122 UseNUMA = false; // We don't fully support this yet 4123 } 4124 4125 if (UseNUMAInterleaving) { 4126 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4127 bool success = numa_interleaving_init(); 4128 if (!success) UseNUMAInterleaving = false; 4129 } 4130 4131 if (initSock() != JNI_OK) { 4132 return JNI_ERR; 4133 } 4134 4135 return JNI_OK; 4136 } 4137 4138 // Mark the polling page as unreadable 4139 void os::make_polling_page_unreadable(void) { 4140 DWORD old_status; 4141 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4142 PAGE_NOACCESS, &old_status)) { 4143 fatal("Could not disable polling page"); 4144 } 4145 } 4146 4147 // Mark the polling page as readable 4148 void os::make_polling_page_readable(void) { 4149 DWORD old_status; 4150 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4151 PAGE_READONLY, &old_status)) { 4152 fatal("Could not enable polling page"); 4153 } 4154 } 4155 4156 4157 int os::stat(const char *path, struct stat *sbuf) { 4158 char pathbuf[MAX_PATH]; 4159 if (strlen(path) > MAX_PATH - 1) { 4160 errno = ENAMETOOLONG; 4161 return -1; 4162 } 4163 os::native_path(strcpy(pathbuf, path)); 4164 int ret = ::stat(pathbuf, sbuf); 4165 if (sbuf != NULL && UseUTCFileTimestamp) { 4166 // Fix for 6539723. st_mtime returned from stat() is dependent on 4167 // the system timezone and so can return different values for the 4168 // same file if/when daylight savings time changes. This adjustment 4169 // makes sure the same timestamp is returned regardless of the TZ. 4170 // 4171 // See: 4172 // http://msdn.microsoft.com/library/ 4173 // default.asp?url=/library/en-us/sysinfo/base/ 4174 // time_zone_information_str.asp 4175 // and 4176 // http://msdn.microsoft.com/library/default.asp?url= 4177 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4178 // 4179 // NOTE: there is a insidious bug here: If the timezone is changed 4180 // after the call to stat() but before 'GetTimeZoneInformation()', then 4181 // the adjustment we do here will be wrong and we'll return the wrong 4182 // value (which will likely end up creating an invalid class data 4183 // archive). Absent a better API for this, or some time zone locking 4184 // mechanism, we'll have to live with this risk. 4185 TIME_ZONE_INFORMATION tz; 4186 DWORD tzid = GetTimeZoneInformation(&tz); 4187 int daylightBias = 4188 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4189 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4190 } 4191 return ret; 4192 } 4193 4194 4195 #define FT2INT64(ft) \ 4196 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4197 4198 4199 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4200 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4201 // of a thread. 4202 // 4203 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4204 // the fast estimate available on the platform. 4205 4206 // current_thread_cpu_time() is not optimized for Windows yet 4207 jlong os::current_thread_cpu_time() { 4208 // return user + sys since the cost is the same 4209 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4210 } 4211 4212 jlong os::thread_cpu_time(Thread* thread) { 4213 // consistent with what current_thread_cpu_time() returns. 4214 return os::thread_cpu_time(thread, true /* user+sys */); 4215 } 4216 4217 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4218 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4219 } 4220 4221 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4222 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4223 // If this function changes, os::is_thread_cpu_time_supported() should too 4224 FILETIME CreationTime; 4225 FILETIME ExitTime; 4226 FILETIME KernelTime; 4227 FILETIME UserTime; 4228 4229 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4230 &ExitTime, &KernelTime, &UserTime) == 0) { 4231 return -1; 4232 } else if (user_sys_cpu_time) { 4233 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4234 } else { 4235 return FT2INT64(UserTime) * 100; 4236 } 4237 } 4238 4239 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4240 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4241 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4242 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4243 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4244 } 4245 4246 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4247 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4248 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4249 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4250 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4251 } 4252 4253 bool os::is_thread_cpu_time_supported() { 4254 // see os::thread_cpu_time 4255 FILETIME CreationTime; 4256 FILETIME ExitTime; 4257 FILETIME KernelTime; 4258 FILETIME UserTime; 4259 4260 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4261 &KernelTime, &UserTime) == 0) { 4262 return false; 4263 } else { 4264 return true; 4265 } 4266 } 4267 4268 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4269 // It does have primitives (PDH API) to get CPU usage and run queue length. 4270 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4271 // If we wanted to implement loadavg on Windows, we have a few options: 4272 // 4273 // a) Query CPU usage and run queue length and "fake" an answer by 4274 // returning the CPU usage if it's under 100%, and the run queue 4275 // length otherwise. It turns out that querying is pretty slow 4276 // on Windows, on the order of 200 microseconds on a fast machine. 4277 // Note that on the Windows the CPU usage value is the % usage 4278 // since the last time the API was called (and the first call 4279 // returns 100%), so we'd have to deal with that as well. 4280 // 4281 // b) Sample the "fake" answer using a sampling thread and store 4282 // the answer in a global variable. The call to loadavg would 4283 // just return the value of the global, avoiding the slow query. 4284 // 4285 // c) Sample a better answer using exponential decay to smooth the 4286 // value. This is basically the algorithm used by UNIX kernels. 4287 // 4288 // Note that sampling thread starvation could affect both (b) and (c). 4289 int os::loadavg(double loadavg[], int nelem) { 4290 return -1; 4291 } 4292 4293 4294 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4295 bool os::dont_yield() { 4296 return DontYieldALot; 4297 } 4298 4299 // This method is a slightly reworked copy of JDK's sysOpen 4300 // from src/windows/hpi/src/sys_api_md.c 4301 4302 int os::open(const char *path, int oflag, int mode) { 4303 char pathbuf[MAX_PATH]; 4304 4305 if (strlen(path) > MAX_PATH - 1) { 4306 errno = ENAMETOOLONG; 4307 return -1; 4308 } 4309 os::native_path(strcpy(pathbuf, path)); 4310 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4311 } 4312 4313 FILE* os::open(int fd, const char* mode) { 4314 return ::_fdopen(fd, mode); 4315 } 4316 4317 // Is a (classpath) directory empty? 4318 bool os::dir_is_empty(const char* path) { 4319 WIN32_FIND_DATA fd; 4320 HANDLE f = FindFirstFile(path, &fd); 4321 if (f == INVALID_HANDLE_VALUE) { 4322 return true; 4323 } 4324 FindClose(f); 4325 return false; 4326 } 4327 4328 // create binary file, rewriting existing file if required 4329 int os::create_binary_file(const char* path, bool rewrite_existing) { 4330 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4331 if (!rewrite_existing) { 4332 oflags |= _O_EXCL; 4333 } 4334 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4335 } 4336 4337 // return current position of file pointer 4338 jlong os::current_file_offset(int fd) { 4339 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4340 } 4341 4342 // move file pointer to the specified offset 4343 jlong os::seek_to_file_offset(int fd, jlong offset) { 4344 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4345 } 4346 4347 4348 jlong os::lseek(int fd, jlong offset, int whence) { 4349 return (jlong) ::_lseeki64(fd, offset, whence); 4350 } 4351 4352 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4353 OVERLAPPED ov; 4354 DWORD nread; 4355 BOOL result; 4356 4357 ZeroMemory(&ov, sizeof(ov)); 4358 ov.Offset = (DWORD)offset; 4359 ov.OffsetHigh = (DWORD)(offset >> 32); 4360 4361 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4362 4363 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4364 4365 return result ? nread : 0; 4366 } 4367 4368 4369 // This method is a slightly reworked copy of JDK's sysNativePath 4370 // from src/windows/hpi/src/path_md.c 4371 4372 // Convert a pathname to native format. On win32, this involves forcing all 4373 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4374 // sometimes rejects '/') and removing redundant separators. The input path is 4375 // assumed to have been converted into the character encoding used by the local 4376 // system. Because this might be a double-byte encoding, care is taken to 4377 // treat double-byte lead characters correctly. 4378 // 4379 // This procedure modifies the given path in place, as the result is never 4380 // longer than the original. There is no error return; this operation always 4381 // succeeds. 4382 char * os::native_path(char *path) { 4383 char *src = path, *dst = path, *end = path; 4384 char *colon = NULL; // If a drive specifier is found, this will 4385 // point to the colon following the drive letter 4386 4387 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4388 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4389 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4390 4391 // Check for leading separators 4392 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4393 while (isfilesep(*src)) { 4394 src++; 4395 } 4396 4397 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4398 // Remove leading separators if followed by drive specifier. This 4399 // hack is necessary to support file URLs containing drive 4400 // specifiers (e.g., "file://c:/path"). As a side effect, 4401 // "/c:/path" can be used as an alternative to "c:/path". 4402 *dst++ = *src++; 4403 colon = dst; 4404 *dst++ = ':'; 4405 src++; 4406 } else { 4407 src = path; 4408 if (isfilesep(src[0]) && isfilesep(src[1])) { 4409 // UNC pathname: Retain first separator; leave src pointed at 4410 // second separator so that further separators will be collapsed 4411 // into the second separator. The result will be a pathname 4412 // beginning with "\\\\" followed (most likely) by a host name. 4413 src = dst = path + 1; 4414 path[0] = '\\'; // Force first separator to '\\' 4415 } 4416 } 4417 4418 end = dst; 4419 4420 // Remove redundant separators from remainder of path, forcing all 4421 // separators to be '\\' rather than '/'. Also, single byte space 4422 // characters are removed from the end of the path because those 4423 // are not legal ending characters on this operating system. 4424 // 4425 while (*src != '\0') { 4426 if (isfilesep(*src)) { 4427 *dst++ = '\\'; src++; 4428 while (isfilesep(*src)) src++; 4429 if (*src == '\0') { 4430 // Check for trailing separator 4431 end = dst; 4432 if (colon == dst - 2) break; // "z:\\" 4433 if (dst == path + 1) break; // "\\" 4434 if (dst == path + 2 && isfilesep(path[0])) { 4435 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4436 // beginning of a UNC pathname. Even though it is not, by 4437 // itself, a valid UNC pathname, we leave it as is in order 4438 // to be consistent with the path canonicalizer as well 4439 // as the win32 APIs, which treat this case as an invalid 4440 // UNC pathname rather than as an alias for the root 4441 // directory of the current drive. 4442 break; 4443 } 4444 end = --dst; // Path does not denote a root directory, so 4445 // remove trailing separator 4446 break; 4447 } 4448 end = dst; 4449 } else { 4450 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4451 *dst++ = *src++; 4452 if (*src) *dst++ = *src++; 4453 end = dst; 4454 } else { // Copy a single-byte character 4455 char c = *src++; 4456 *dst++ = c; 4457 // Space is not a legal ending character 4458 if (c != ' ') end = dst; 4459 } 4460 } 4461 } 4462 4463 *end = '\0'; 4464 4465 // For "z:", add "." to work around a bug in the C runtime library 4466 if (colon == dst - 1) { 4467 path[2] = '.'; 4468 path[3] = '\0'; 4469 } 4470 4471 return path; 4472 } 4473 4474 // This code is a copy of JDK's sysSetLength 4475 // from src/windows/hpi/src/sys_api_md.c 4476 4477 int os::ftruncate(int fd, jlong length) { 4478 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4479 long high = (long)(length >> 32); 4480 DWORD ret; 4481 4482 if (h == (HANDLE)(-1)) { 4483 return -1; 4484 } 4485 4486 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4487 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4488 return -1; 4489 } 4490 4491 if (::SetEndOfFile(h) == FALSE) { 4492 return -1; 4493 } 4494 4495 return 0; 4496 } 4497 4498 int os::get_fileno(FILE* fp) { 4499 return _fileno(fp); 4500 } 4501 4502 // This code is a copy of JDK's sysSync 4503 // from src/windows/hpi/src/sys_api_md.c 4504 // except for the legacy workaround for a bug in Win 98 4505 4506 int os::fsync(int fd) { 4507 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4508 4509 if ((!::FlushFileBuffers(handle)) && 4510 (GetLastError() != ERROR_ACCESS_DENIED)) { 4511 // from winerror.h 4512 return -1; 4513 } 4514 return 0; 4515 } 4516 4517 static int nonSeekAvailable(int, long *); 4518 static int stdinAvailable(int, long *); 4519 4520 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4521 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4522 4523 // This code is a copy of JDK's sysAvailable 4524 // from src/windows/hpi/src/sys_api_md.c 4525 4526 int os::available(int fd, jlong *bytes) { 4527 jlong cur, end; 4528 struct _stati64 stbuf64; 4529 4530 if (::_fstati64(fd, &stbuf64) >= 0) { 4531 int mode = stbuf64.st_mode; 4532 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4533 int ret; 4534 long lpbytes; 4535 if (fd == 0) { 4536 ret = stdinAvailable(fd, &lpbytes); 4537 } else { 4538 ret = nonSeekAvailable(fd, &lpbytes); 4539 } 4540 (*bytes) = (jlong)(lpbytes); 4541 return ret; 4542 } 4543 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4544 return FALSE; 4545 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4546 return FALSE; 4547 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4548 return FALSE; 4549 } 4550 *bytes = end - cur; 4551 return TRUE; 4552 } else { 4553 return FALSE; 4554 } 4555 } 4556 4557 void os::flockfile(FILE* fp) { 4558 _lock_file(fp); 4559 } 4560 4561 void os::funlockfile(FILE* fp) { 4562 _unlock_file(fp); 4563 } 4564 4565 // This code is a copy of JDK's nonSeekAvailable 4566 // from src/windows/hpi/src/sys_api_md.c 4567 4568 static int nonSeekAvailable(int fd, long *pbytes) { 4569 // This is used for available on non-seekable devices 4570 // (like both named and anonymous pipes, such as pipes 4571 // connected to an exec'd process). 4572 // Standard Input is a special case. 4573 HANDLE han; 4574 4575 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4576 return FALSE; 4577 } 4578 4579 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4580 // PeekNamedPipe fails when at EOF. In that case we 4581 // simply make *pbytes = 0 which is consistent with the 4582 // behavior we get on Solaris when an fd is at EOF. 4583 // The only alternative is to raise an Exception, 4584 // which isn't really warranted. 4585 // 4586 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4587 return FALSE; 4588 } 4589 *pbytes = 0; 4590 } 4591 return TRUE; 4592 } 4593 4594 #define MAX_INPUT_EVENTS 2000 4595 4596 // This code is a copy of JDK's stdinAvailable 4597 // from src/windows/hpi/src/sys_api_md.c 4598 4599 static int stdinAvailable(int fd, long *pbytes) { 4600 HANDLE han; 4601 DWORD numEventsRead = 0; // Number of events read from buffer 4602 DWORD numEvents = 0; // Number of events in buffer 4603 DWORD i = 0; // Loop index 4604 DWORD curLength = 0; // Position marker 4605 DWORD actualLength = 0; // Number of bytes readable 4606 BOOL error = FALSE; // Error holder 4607 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4608 4609 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4610 return FALSE; 4611 } 4612 4613 // Construct an array of input records in the console buffer 4614 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4615 if (error == 0) { 4616 return nonSeekAvailable(fd, pbytes); 4617 } 4618 4619 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4620 if (numEvents > MAX_INPUT_EVENTS) { 4621 numEvents = MAX_INPUT_EVENTS; 4622 } 4623 4624 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4625 if (lpBuffer == NULL) { 4626 return FALSE; 4627 } 4628 4629 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4630 if (error == 0) { 4631 os::free(lpBuffer); 4632 return FALSE; 4633 } 4634 4635 // Examine input records for the number of bytes available 4636 for (i=0; i<numEvents; i++) { 4637 if (lpBuffer[i].EventType == KEY_EVENT) { 4638 4639 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4640 &(lpBuffer[i].Event); 4641 if (keyRecord->bKeyDown == TRUE) { 4642 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4643 curLength++; 4644 if (*keyPressed == '\r') { 4645 actualLength = curLength; 4646 } 4647 } 4648 } 4649 } 4650 4651 if (lpBuffer != NULL) { 4652 os::free(lpBuffer); 4653 } 4654 4655 *pbytes = (long) actualLength; 4656 return TRUE; 4657 } 4658 4659 // Map a block of memory. 4660 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4661 char *addr, size_t bytes, bool read_only, 4662 bool allow_exec) { 4663 HANDLE hFile; 4664 char* base; 4665 4666 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4667 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4668 if (hFile == NULL) { 4669 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4670 return NULL; 4671 } 4672 4673 if (allow_exec) { 4674 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4675 // unless it comes from a PE image (which the shared archive is not.) 4676 // Even VirtualProtect refuses to give execute access to mapped memory 4677 // that was not previously executable. 4678 // 4679 // Instead, stick the executable region in anonymous memory. Yuck. 4680 // Penalty is that ~4 pages will not be shareable - in the future 4681 // we might consider DLLizing the shared archive with a proper PE 4682 // header so that mapping executable + sharing is possible. 4683 4684 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4685 PAGE_READWRITE); 4686 if (base == NULL) { 4687 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4688 CloseHandle(hFile); 4689 return NULL; 4690 } 4691 4692 DWORD bytes_read; 4693 OVERLAPPED overlapped; 4694 overlapped.Offset = (DWORD)file_offset; 4695 overlapped.OffsetHigh = 0; 4696 overlapped.hEvent = NULL; 4697 // ReadFile guarantees that if the return value is true, the requested 4698 // number of bytes were read before returning. 4699 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4700 if (!res) { 4701 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4702 release_memory(base, bytes); 4703 CloseHandle(hFile); 4704 return NULL; 4705 } 4706 } else { 4707 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4708 NULL /* file_name */); 4709 if (hMap == NULL) { 4710 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4711 CloseHandle(hFile); 4712 return NULL; 4713 } 4714 4715 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4716 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4717 (DWORD)bytes, addr); 4718 if (base == NULL) { 4719 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4720 CloseHandle(hMap); 4721 CloseHandle(hFile); 4722 return NULL; 4723 } 4724 4725 if (CloseHandle(hMap) == 0) { 4726 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4727 CloseHandle(hFile); 4728 return base; 4729 } 4730 } 4731 4732 if (allow_exec) { 4733 DWORD old_protect; 4734 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4735 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4736 4737 if (!res) { 4738 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4739 // Don't consider this a hard error, on IA32 even if the 4740 // VirtualProtect fails, we should still be able to execute 4741 CloseHandle(hFile); 4742 return base; 4743 } 4744 } 4745 4746 if (CloseHandle(hFile) == 0) { 4747 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4748 return base; 4749 } 4750 4751 return base; 4752 } 4753 4754 4755 // Remap a block of memory. 4756 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4757 char *addr, size_t bytes, bool read_only, 4758 bool allow_exec) { 4759 // This OS does not allow existing memory maps to be remapped so we 4760 // have to unmap the memory before we remap it. 4761 if (!os::unmap_memory(addr, bytes)) { 4762 return NULL; 4763 } 4764 4765 // There is a very small theoretical window between the unmap_memory() 4766 // call above and the map_memory() call below where a thread in native 4767 // code may be able to access an address that is no longer mapped. 4768 4769 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4770 read_only, allow_exec); 4771 } 4772 4773 4774 // Unmap a block of memory. 4775 // Returns true=success, otherwise false. 4776 4777 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4778 MEMORY_BASIC_INFORMATION mem_info; 4779 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4780 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4781 return false; 4782 } 4783 4784 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4785 // Instead, executable region was allocated using VirtualAlloc(). See 4786 // pd_map_memory() above. 4787 // 4788 // The following flags should match the 'exec_access' flages used for 4789 // VirtualProtect() in pd_map_memory(). 4790 if (mem_info.Protect == PAGE_EXECUTE_READ || 4791 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4792 return pd_release_memory(addr, bytes); 4793 } 4794 4795 BOOL result = UnmapViewOfFile(addr); 4796 if (result == 0) { 4797 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4798 return false; 4799 } 4800 return true; 4801 } 4802 4803 void os::pause() { 4804 char filename[MAX_PATH]; 4805 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4806 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4807 } else { 4808 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4809 } 4810 4811 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4812 if (fd != -1) { 4813 struct stat buf; 4814 ::close(fd); 4815 while (::stat(filename, &buf) == 0) { 4816 Sleep(100); 4817 } 4818 } else { 4819 jio_fprintf(stderr, 4820 "Could not open pause file '%s', continuing immediately.\n", filename); 4821 } 4822 } 4823 4824 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4825 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4826 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4827 4828 os::ThreadCrashProtection::ThreadCrashProtection() { 4829 } 4830 4831 // See the caveats for this class in os_windows.hpp 4832 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4833 // into this method and returns false. If no OS EXCEPTION was raised, returns 4834 // true. 4835 // The callback is supposed to provide the method that should be protected. 4836 // 4837 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4838 4839 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4840 4841 _protected_thread = Thread::current_or_null(); 4842 assert(_protected_thread != NULL, "Cannot crash protect a none Thread"); 4843 4844 bool success = true; 4845 __try { 4846 _crash_protection = this; 4847 cb.call(); 4848 } __except(EXCEPTION_EXECUTE_HANDLER) { 4849 // only for protection, nothing to do 4850 success = false; 4851 } 4852 _crash_protection = NULL; 4853 _protected_thread = NULL; 4854 Thread::muxRelease(&_crash_mux); 4855 return success; 4856 } 4857 4858 // An Event wraps a win32 "CreateEvent" kernel handle. 4859 // 4860 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4861 // 4862 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4863 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4864 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4865 // In addition, an unpark() operation might fetch the handle field, but the 4866 // event could recycle between the fetch and the SetEvent() operation. 4867 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4868 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4869 // on an stale but recycled handle would be harmless, but in practice this might 4870 // confuse other non-Sun code, so it's not a viable approach. 4871 // 4872 // 2: Once a win32 event handle is associated with an Event, it remains associated 4873 // with the Event. The event handle is never closed. This could be construed 4874 // as handle leakage, but only up to the maximum # of threads that have been extant 4875 // at any one time. This shouldn't be an issue, as windows platforms typically 4876 // permit a process to have hundreds of thousands of open handles. 4877 // 4878 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4879 // and release unused handles. 4880 // 4881 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4882 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4883 // 4884 // 5. Use an RCU-like mechanism (Read-Copy Update). 4885 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4886 // 4887 // We use (2). 4888 // 4889 // TODO-FIXME: 4890 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4891 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4892 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4893 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4894 // into a single win32 CreateEvent() handle. 4895 // 4896 // Assumption: 4897 // Only one parker can exist on an event, which is why we allocate 4898 // them per-thread. Multiple unparkers can coexist. 4899 // 4900 // _Event transitions in park() 4901 // -1 => -1 : illegal 4902 // 1 => 0 : pass - return immediately 4903 // 0 => -1 : block; then set _Event to 0 before returning 4904 // 4905 // _Event transitions in unpark() 4906 // 0 => 1 : just return 4907 // 1 => 1 : just return 4908 // -1 => either 0 or 1; must signal target thread 4909 // That is, we can safely transition _Event from -1 to either 4910 // 0 or 1. 4911 // 4912 // _Event serves as a restricted-range semaphore. 4913 // -1 : thread is blocked, i.e. there is a waiter 4914 // 0 : neutral: thread is running or ready, 4915 // could have been signaled after a wait started 4916 // 1 : signaled - thread is running or ready 4917 // 4918 // Another possible encoding of _Event would be with 4919 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 4920 // 4921 4922 int os::PlatformEvent::park(jlong Millis) { 4923 // Transitions for _Event: 4924 // -1 => -1 : illegal 4925 // 1 => 0 : pass - return immediately 4926 // 0 => -1 : block; then set _Event to 0 before returning 4927 4928 guarantee(_ParkHandle != NULL , "Invariant"); 4929 guarantee(Millis > 0 , "Invariant"); 4930 4931 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4932 // the initial park() operation. 4933 // Consider: use atomic decrement instead of CAS-loop 4934 4935 int v; 4936 for (;;) { 4937 v = _Event; 4938 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4939 } 4940 guarantee((v == 0) || (v == 1), "invariant"); 4941 if (v != 0) return OS_OK; 4942 4943 // Do this the hard way by blocking ... 4944 // TODO: consider a brief spin here, gated on the success of recent 4945 // spin attempts by this thread. 4946 // 4947 // We decompose long timeouts into series of shorter timed waits. 4948 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4949 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4950 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4951 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4952 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4953 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4954 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4955 // for the already waited time. This policy does not admit any new outcomes. 4956 // In the future, however, we might want to track the accumulated wait time and 4957 // adjust Millis accordingly if we encounter a spurious wakeup. 4958 4959 const int MAXTIMEOUT = 0x10000000; 4960 DWORD rv = WAIT_TIMEOUT; 4961 while (_Event < 0 && Millis > 0) { 4962 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4963 if (Millis > MAXTIMEOUT) { 4964 prd = MAXTIMEOUT; 4965 } 4966 rv = ::WaitForSingleObject(_ParkHandle, prd); 4967 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4968 if (rv == WAIT_TIMEOUT) { 4969 Millis -= prd; 4970 } 4971 } 4972 v = _Event; 4973 _Event = 0; 4974 // see comment at end of os::PlatformEvent::park() below: 4975 OrderAccess::fence(); 4976 // If we encounter a nearly simultanous timeout expiry and unpark() 4977 // we return OS_OK indicating we awoke via unpark(). 4978 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4979 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4980 } 4981 4982 void os::PlatformEvent::park() { 4983 // Transitions for _Event: 4984 // -1 => -1 : illegal 4985 // 1 => 0 : pass - return immediately 4986 // 0 => -1 : block; then set _Event to 0 before returning 4987 4988 guarantee(_ParkHandle != NULL, "Invariant"); 4989 // Invariant: Only the thread associated with the Event/PlatformEvent 4990 // may call park(). 4991 // Consider: use atomic decrement instead of CAS-loop 4992 int v; 4993 for (;;) { 4994 v = _Event; 4995 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4996 } 4997 guarantee((v == 0) || (v == 1), "invariant"); 4998 if (v != 0) return; 4999 5000 // Do this the hard way by blocking ... 5001 // TODO: consider a brief spin here, gated on the success of recent 5002 // spin attempts by this thread. 5003 while (_Event < 0) { 5004 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5005 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5006 } 5007 5008 // Usually we'll find _Event == 0 at this point, but as 5009 // an optional optimization we clear it, just in case can 5010 // multiple unpark() operations drove _Event up to 1. 5011 _Event = 0; 5012 OrderAccess::fence(); 5013 guarantee(_Event >= 0, "invariant"); 5014 } 5015 5016 void os::PlatformEvent::unpark() { 5017 guarantee(_ParkHandle != NULL, "Invariant"); 5018 5019 // Transitions for _Event: 5020 // 0 => 1 : just return 5021 // 1 => 1 : just return 5022 // -1 => either 0 or 1; must signal target thread 5023 // That is, we can safely transition _Event from -1 to either 5024 // 0 or 1. 5025 // See also: "Semaphores in Plan 9" by Mullender & Cox 5026 // 5027 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5028 // that it will take two back-to-back park() calls for the owning 5029 // thread to block. This has the benefit of forcing a spurious return 5030 // from the first park() call after an unpark() call which will help 5031 // shake out uses of park() and unpark() without condition variables. 5032 5033 if (Atomic::xchg(1, &_Event) >= 0) return; 5034 5035 ::SetEvent(_ParkHandle); 5036 } 5037 5038 5039 // JSR166 5040 // ------------------------------------------------------- 5041 5042 // The Windows implementation of Park is very straightforward: Basic 5043 // operations on Win32 Events turn out to have the right semantics to 5044 // use them directly. We opportunistically resuse the event inherited 5045 // from Monitor. 5046 5047 void Parker::park(bool isAbsolute, jlong time) { 5048 guarantee(_ParkEvent != NULL, "invariant"); 5049 // First, demultiplex/decode time arguments 5050 if (time < 0) { // don't wait 5051 return; 5052 } else if (time == 0 && !isAbsolute) { 5053 time = INFINITE; 5054 } else if (isAbsolute) { 5055 time -= os::javaTimeMillis(); // convert to relative time 5056 if (time <= 0) { // already elapsed 5057 return; 5058 } 5059 } else { // relative 5060 time /= 1000000; // Must coarsen from nanos to millis 5061 if (time == 0) { // Wait for the minimal time unit if zero 5062 time = 1; 5063 } 5064 } 5065 5066 JavaThread* thread = JavaThread::current(); 5067 5068 // Don't wait if interrupted or already triggered 5069 if (Thread::is_interrupted(thread, false) || 5070 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5071 ResetEvent(_ParkEvent); 5072 return; 5073 } else { 5074 ThreadBlockInVM tbivm(thread); 5075 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5076 thread->set_suspend_equivalent(); 5077 5078 WaitForSingleObject(_ParkEvent, time); 5079 ResetEvent(_ParkEvent); 5080 5081 // If externally suspended while waiting, re-suspend 5082 if (thread->handle_special_suspend_equivalent_condition()) { 5083 thread->java_suspend_self(); 5084 } 5085 } 5086 } 5087 5088 void Parker::unpark() { 5089 guarantee(_ParkEvent != NULL, "invariant"); 5090 SetEvent(_ParkEvent); 5091 } 5092 5093 // Run the specified command in a separate process. Return its exit value, 5094 // or -1 on failure (e.g. can't create a new process). 5095 int os::fork_and_exec(char* cmd) { 5096 STARTUPINFO si; 5097 PROCESS_INFORMATION pi; 5098 DWORD exit_code; 5099 5100 char * cmd_string; 5101 char * cmd_prefix = "cmd /C "; 5102 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5103 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5104 if (cmd_string == NULL) { 5105 return -1; 5106 } 5107 cmd_string[0] = '\0'; 5108 strcat(cmd_string, cmd_prefix); 5109 strcat(cmd_string, cmd); 5110 5111 // now replace all '\n' with '&' 5112 char * substring = cmd_string; 5113 while ((substring = strchr(substring, '\n')) != NULL) { 5114 substring[0] = '&'; 5115 substring++; 5116 } 5117 memset(&si, 0, sizeof(si)); 5118 si.cb = sizeof(si); 5119 memset(&pi, 0, sizeof(pi)); 5120 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5121 cmd_string, // command line 5122 NULL, // process security attribute 5123 NULL, // thread security attribute 5124 TRUE, // inherits system handles 5125 0, // no creation flags 5126 NULL, // use parent's environment block 5127 NULL, // use parent's starting directory 5128 &si, // (in) startup information 5129 &pi); // (out) process information 5130 5131 if (rslt) { 5132 // Wait until child process exits. 5133 WaitForSingleObject(pi.hProcess, INFINITE); 5134 5135 GetExitCodeProcess(pi.hProcess, &exit_code); 5136 5137 // Close process and thread handles. 5138 CloseHandle(pi.hProcess); 5139 CloseHandle(pi.hThread); 5140 } else { 5141 exit_code = -1; 5142 } 5143 5144 FREE_C_HEAP_ARRAY(char, cmd_string); 5145 return (int)exit_code; 5146 } 5147 5148 bool os::find(address addr, outputStream* st) { 5149 int offset = -1; 5150 bool result = false; 5151 char buf[256]; 5152 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5153 st->print(PTR_FORMAT " ", addr); 5154 if (strlen(buf) < sizeof(buf) - 1) { 5155 char* p = strrchr(buf, '\\'); 5156 if (p) { 5157 st->print("%s", p + 1); 5158 } else { 5159 st->print("%s", buf); 5160 } 5161 } else { 5162 // The library name is probably truncated. Let's omit the library name. 5163 // See also JDK-8147512. 5164 } 5165 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5166 st->print("::%s + 0x%x", buf, offset); 5167 } 5168 st->cr(); 5169 result = true; 5170 } 5171 return result; 5172 } 5173 5174 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5175 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5176 5177 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5178 JavaThread* thread = JavaThread::current(); 5179 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5180 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5181 5182 if (os::is_memory_serialize_page(thread, addr)) { 5183 return EXCEPTION_CONTINUE_EXECUTION; 5184 } 5185 } 5186 5187 return EXCEPTION_CONTINUE_SEARCH; 5188 } 5189 5190 // We don't build a headless jre for Windows 5191 bool os::is_headless_jre() { return false; } 5192 5193 static jint initSock() { 5194 WSADATA wsadata; 5195 5196 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5197 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5198 ::GetLastError()); 5199 return JNI_ERR; 5200 } 5201 return JNI_OK; 5202 } 5203 5204 struct hostent* os::get_host_by_name(char* name) { 5205 return (struct hostent*)gethostbyname(name); 5206 } 5207 5208 int os::socket_close(int fd) { 5209 return ::closesocket(fd); 5210 } 5211 5212 int os::socket(int domain, int type, int protocol) { 5213 return ::socket(domain, type, protocol); 5214 } 5215 5216 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5217 return ::connect(fd, him, len); 5218 } 5219 5220 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5221 return ::recv(fd, buf, (int)nBytes, flags); 5222 } 5223 5224 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5225 return ::send(fd, buf, (int)nBytes, flags); 5226 } 5227 5228 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5229 return ::send(fd, buf, (int)nBytes, flags); 5230 } 5231 5232 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5233 #if defined(IA32) 5234 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5235 #elif defined (AMD64) 5236 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5237 #endif 5238 5239 // returns true if thread could be suspended, 5240 // false otherwise 5241 static bool do_suspend(HANDLE* h) { 5242 if (h != NULL) { 5243 if (SuspendThread(*h) != ~0) { 5244 return true; 5245 } 5246 } 5247 return false; 5248 } 5249 5250 // resume the thread 5251 // calling resume on an active thread is a no-op 5252 static void do_resume(HANDLE* h) { 5253 if (h != NULL) { 5254 ResumeThread(*h); 5255 } 5256 } 5257 5258 // retrieve a suspend/resume context capable handle 5259 // from the tid. Caller validates handle return value. 5260 void get_thread_handle_for_extended_context(HANDLE* h, 5261 OSThread::thread_id_t tid) { 5262 if (h != NULL) { 5263 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5264 } 5265 } 5266 5267 // Thread sampling implementation 5268 // 5269 void os::SuspendedThreadTask::internal_do_task() { 5270 CONTEXT ctxt; 5271 HANDLE h = NULL; 5272 5273 // get context capable handle for thread 5274 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5275 5276 // sanity 5277 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5278 return; 5279 } 5280 5281 // suspend the thread 5282 if (do_suspend(&h)) { 5283 ctxt.ContextFlags = sampling_context_flags; 5284 // get thread context 5285 GetThreadContext(h, &ctxt); 5286 SuspendedThreadTaskContext context(_thread, &ctxt); 5287 // pass context to Thread Sampling impl 5288 do_task(context); 5289 // resume thread 5290 do_resume(&h); 5291 } 5292 5293 // close handle 5294 CloseHandle(h); 5295 } 5296 5297 bool os::start_debugging(char *buf, int buflen) { 5298 int len = (int)strlen(buf); 5299 char *p = &buf[len]; 5300 5301 jio_snprintf(p, buflen-len, 5302 "\n\n" 5303 "Do you want to debug the problem?\n\n" 5304 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5305 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5306 "Otherwise, select 'No' to abort...", 5307 os::current_process_id(), os::current_thread_id()); 5308 5309 bool yes = os::message_box("Unexpected Error", buf); 5310 5311 if (yes) { 5312 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5313 // exception. If VM is running inside a debugger, the debugger will 5314 // catch the exception. Otherwise, the breakpoint exception will reach 5315 // the default windows exception handler, which can spawn a debugger and 5316 // automatically attach to the dying VM. 5317 os::breakpoint(); 5318 yes = false; 5319 } 5320 return yes; 5321 } 5322 5323 void* os::get_default_process_handle() { 5324 return (void*)GetModuleHandle(NULL); 5325 } 5326 5327 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5328 // which is used to find statically linked in agents. 5329 // Additionally for windows, takes into account __stdcall names. 5330 // Parameters: 5331 // sym_name: Symbol in library we are looking for 5332 // lib_name: Name of library to look in, NULL for shared libs. 5333 // is_absolute_path == true if lib_name is absolute path to agent 5334 // such as "C:/a/b/L.dll" 5335 // == false if only the base name of the library is passed in 5336 // such as "L" 5337 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5338 bool is_absolute_path) { 5339 char *agent_entry_name; 5340 size_t len; 5341 size_t name_len; 5342 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5343 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5344 const char *start; 5345 5346 if (lib_name != NULL) { 5347 len = name_len = strlen(lib_name); 5348 if (is_absolute_path) { 5349 // Need to strip path, prefix and suffix 5350 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5351 lib_name = ++start; 5352 } else { 5353 // Need to check for drive prefix 5354 if ((start = strchr(lib_name, ':')) != NULL) { 5355 lib_name = ++start; 5356 } 5357 } 5358 if (len <= (prefix_len + suffix_len)) { 5359 return NULL; 5360 } 5361 lib_name += prefix_len; 5362 name_len = strlen(lib_name) - suffix_len; 5363 } 5364 } 5365 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5366 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5367 if (agent_entry_name == NULL) { 5368 return NULL; 5369 } 5370 if (lib_name != NULL) { 5371 const char *p = strrchr(sym_name, '@'); 5372 if (p != NULL && p != sym_name) { 5373 // sym_name == _Agent_OnLoad@XX 5374 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5375 agent_entry_name[(p-sym_name)] = '\0'; 5376 // agent_entry_name == _Agent_OnLoad 5377 strcat(agent_entry_name, "_"); 5378 strncat(agent_entry_name, lib_name, name_len); 5379 strcat(agent_entry_name, p); 5380 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5381 } else { 5382 strcpy(agent_entry_name, sym_name); 5383 strcat(agent_entry_name, "_"); 5384 strncat(agent_entry_name, lib_name, name_len); 5385 } 5386 } else { 5387 strcpy(agent_entry_name, sym_name); 5388 } 5389 return agent_entry_name; 5390 } 5391 5392 #ifndef PRODUCT 5393 5394 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5395 // contiguous memory block at a particular address. 5396 // The test first tries to find a good approximate address to allocate at by using the same 5397 // method to allocate some memory at any address. The test then tries to allocate memory in 5398 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5399 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5400 // the previously allocated memory is available for allocation. The only actual failure 5401 // that is reported is when the test tries to allocate at a particular location but gets a 5402 // different valid one. A NULL return value at this point is not considered an error but may 5403 // be legitimate. 5404 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5405 void TestReserveMemorySpecial_test() { 5406 if (!UseLargePages) { 5407 if (VerboseInternalVMTests) { 5408 tty->print("Skipping test because large pages are disabled"); 5409 } 5410 return; 5411 } 5412 // save current value of globals 5413 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5414 bool old_use_numa_interleaving = UseNUMAInterleaving; 5415 5416 // set globals to make sure we hit the correct code path 5417 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5418 5419 // do an allocation at an address selected by the OS to get a good one. 5420 const size_t large_allocation_size = os::large_page_size() * 4; 5421 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5422 if (result == NULL) { 5423 if (VerboseInternalVMTests) { 5424 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5425 large_allocation_size); 5426 } 5427 } else { 5428 os::release_memory_special(result, large_allocation_size); 5429 5430 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5431 // we managed to get it once. 5432 const size_t expected_allocation_size = os::large_page_size(); 5433 char* expected_location = result + os::large_page_size(); 5434 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5435 if (actual_location == NULL) { 5436 if (VerboseInternalVMTests) { 5437 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5438 expected_location, large_allocation_size); 5439 } 5440 } else { 5441 // release memory 5442 os::release_memory_special(actual_location, expected_allocation_size); 5443 // only now check, after releasing any memory to avoid any leaks. 5444 assert(actual_location == expected_location, 5445 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5446 expected_location, expected_allocation_size, actual_location); 5447 } 5448 } 5449 5450 // restore globals 5451 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5452 UseNUMAInterleaving = old_use_numa_interleaving; 5453 } 5454 #endif // PRODUCT 5455 5456 /* 5457 All the defined signal names for Windows. 5458 5459 NOTE that not all of these names are accepted by FindSignal! 5460 5461 For various reasons some of these may be rejected at runtime. 5462 5463 Here are the names currently accepted by a user of sun.misc.Signal with 5464 1.4.1 (ignoring potential interaction with use of chaining, etc): 5465 5466 (LIST TBD) 5467 5468 */ 5469 int os::get_signal_number(const char* name) { 5470 static const struct { 5471 char* name; 5472 int number; 5473 } siglabels [] = 5474 // derived from version 6.0 VC98/include/signal.h 5475 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5476 "FPE", SIGFPE, // floating point exception 5477 "SEGV", SIGSEGV, // segment violation 5478 "INT", SIGINT, // interrupt 5479 "TERM", SIGTERM, // software term signal from kill 5480 "BREAK", SIGBREAK, // Ctrl-Break sequence 5481 "ILL", SIGILL}; // illegal instruction 5482 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5483 if (strcmp(name, siglabels[i].name) == 0) { 5484 return siglabels[i].number; 5485 } 5486 } 5487 return -1; 5488 } 5489 5490 // Fast current thread access 5491 5492 int os::win32::_thread_ptr_offset = 0; 5493 5494 static void call_wrapper_dummy() {} 5495 5496 // We need to call the os_exception_wrapper once so that it sets 5497 // up the offset from FS of the thread pointer. 5498 void os::win32::initialize_thread_ptr_offset() { 5499 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5500 NULL, NULL, NULL, NULL); 5501 }