1 /* 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm.h" 46 #include "prims/jvm_misc.hpp" 47 #include "runtime/arguments.hpp" 48 #include "runtime/atomic.inline.hpp" 49 #include "runtime/extendedPC.hpp" 50 #include "runtime/globals.hpp" 51 #include "runtime/interfaceSupport.hpp" 52 #include "runtime/java.hpp" 53 #include "runtime/javaCalls.hpp" 54 #include "runtime/mutexLocker.hpp" 55 #include "runtime/objectMonitor.hpp" 56 #include "runtime/orderAccess.inline.hpp" 57 #include "runtime/osThread.hpp" 58 #include "runtime/perfMemory.hpp" 59 #include "runtime/sharedRuntime.hpp" 60 #include "runtime/statSampler.hpp" 61 #include "runtime/stubRoutines.hpp" 62 #include "runtime/thread.inline.hpp" 63 #include "runtime/threadCritical.hpp" 64 #include "runtime/timer.hpp" 65 #include "runtime/vm_version.hpp" 66 #include "services/attachListener.hpp" 67 #include "services/memTracker.hpp" 68 #include "services/runtimeService.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/vmError.hpp" 74 75 #ifdef _DEBUG 76 #include <crtdbg.h> 77 #endif 78 79 80 #include <windows.h> 81 #include <sys/types.h> 82 #include <sys/stat.h> 83 #include <sys/timeb.h> 84 #include <objidl.h> 85 #include <shlobj.h> 86 87 #include <malloc.h> 88 #include <signal.h> 89 #include <direct.h> 90 #include <errno.h> 91 #include <fcntl.h> 92 #include <io.h> 93 #include <process.h> // For _beginthreadex(), _endthreadex() 94 #include <imagehlp.h> // For os::dll_address_to_function_name 95 /* for enumerating dll libraries */ 96 #include <vdmdbg.h> 97 98 // for timer info max values which include all bits 99 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 100 101 // For DLL loading/load error detection 102 // Values of PE COFF 103 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 104 #define IMAGE_FILE_SIGNATURE_LENGTH 4 105 106 static HANDLE main_process; 107 static HANDLE main_thread; 108 static int main_thread_id; 109 110 static FILETIME process_creation_time; 111 static FILETIME process_exit_time; 112 static FILETIME process_user_time; 113 static FILETIME process_kernel_time; 114 115 #ifdef _M_IA64 116 #define __CPU__ ia64 117 #elif _M_AMD64 118 #define __CPU__ amd64 119 #else 120 #define __CPU__ i486 121 #endif 122 123 // save DLL module handle, used by GetModuleFileName 124 125 HINSTANCE vm_lib_handle; 126 127 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 128 switch (reason) { 129 case DLL_PROCESS_ATTACH: 130 vm_lib_handle = hinst; 131 if (ForceTimeHighResolution) 132 timeBeginPeriod(1L); 133 break; 134 case DLL_PROCESS_DETACH: 135 if (ForceTimeHighResolution) 136 timeEndPeriod(1L); 137 138 break; 139 default: 140 break; 141 } 142 return true; 143 } 144 145 static inline double fileTimeAsDouble(FILETIME* time) { 146 const double high = (double) ((unsigned int) ~0); 147 const double split = 10000000.0; 148 double result = (time->dwLowDateTime / split) + 149 time->dwHighDateTime * (high/split); 150 return result; 151 } 152 153 // Implementation of os 154 155 bool os::getenv(const char* name, char* buffer, int len) { 156 int result = GetEnvironmentVariable(name, buffer, len); 157 return result > 0 && result < len; 158 } 159 160 bool os::unsetenv(const char* name) { 161 assert(name != NULL, "Null pointer"); 162 return (SetEnvironmentVariable(name, NULL) == TRUE); 163 } 164 165 // No setuid programs under Windows. 166 bool os::have_special_privileges() { 167 return false; 168 } 169 170 171 // This method is a periodic task to check for misbehaving JNI applications 172 // under CheckJNI, we can add any periodic checks here. 173 // For Windows at the moment does nothing 174 void os::run_periodic_checks() { 175 return; 176 } 177 178 // previous UnhandledExceptionFilter, if there is one 179 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 180 181 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 182 void os::init_system_properties_values() { 183 /* sysclasspath, java_home, dll_dir */ 184 { 185 char *home_path; 186 char *dll_path; 187 char *pslash; 188 char *bin = "\\bin"; 189 char home_dir[MAX_PATH]; 190 191 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 192 os::jvm_path(home_dir, sizeof(home_dir)); 193 // Found the full path to jvm.dll. 194 // Now cut the path to <java_home>/jre if we can. 195 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 196 pslash = strrchr(home_dir, '\\'); 197 if (pslash != NULL) { 198 *pslash = '\0'; /* get rid of \{client|server} */ 199 pslash = strrchr(home_dir, '\\'); 200 if (pslash != NULL) 201 *pslash = '\0'; /* get rid of \bin */ 202 } 203 } 204 205 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 206 if (home_path == NULL) 207 return; 208 strcpy(home_path, home_dir); 209 Arguments::set_java_home(home_path); 210 211 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 212 if (dll_path == NULL) 213 return; 214 strcpy(dll_path, home_dir); 215 strcat(dll_path, bin); 216 Arguments::set_dll_dir(dll_path); 217 218 if (!set_boot_path('\\', ';')) 219 return; 220 } 221 222 /* library_path */ 223 #define EXT_DIR "\\lib\\ext" 224 #define BIN_DIR "\\bin" 225 #define PACKAGE_DIR "\\Sun\\Java" 226 { 227 /* Win32 library search order (See the documentation for LoadLibrary): 228 * 229 * 1. The directory from which application is loaded. 230 * 2. The system wide Java Extensions directory (Java only) 231 * 3. System directory (GetSystemDirectory) 232 * 4. Windows directory (GetWindowsDirectory) 233 * 5. The PATH environment variable 234 * 6. The current directory 235 */ 236 237 char *library_path; 238 char tmp[MAX_PATH]; 239 char *path_str = ::getenv("PATH"); 240 241 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 242 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 243 244 library_path[0] = '\0'; 245 246 GetModuleFileName(NULL, tmp, sizeof(tmp)); 247 *(strrchr(tmp, '\\')) = '\0'; 248 strcat(library_path, tmp); 249 250 GetWindowsDirectory(tmp, sizeof(tmp)); 251 strcat(library_path, ";"); 252 strcat(library_path, tmp); 253 strcat(library_path, PACKAGE_DIR BIN_DIR); 254 255 GetSystemDirectory(tmp, sizeof(tmp)); 256 strcat(library_path, ";"); 257 strcat(library_path, tmp); 258 259 GetWindowsDirectory(tmp, sizeof(tmp)); 260 strcat(library_path, ";"); 261 strcat(library_path, tmp); 262 263 if (path_str) { 264 strcat(library_path, ";"); 265 strcat(library_path, path_str); 266 } 267 268 strcat(library_path, ";."); 269 270 Arguments::set_library_path(library_path); 271 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 272 } 273 274 /* Default extensions directory */ 275 { 276 char path[MAX_PATH]; 277 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 278 GetWindowsDirectory(path, MAX_PATH); 279 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 280 path, PACKAGE_DIR, EXT_DIR); 281 Arguments::set_ext_dirs(buf); 282 } 283 #undef EXT_DIR 284 #undef BIN_DIR 285 #undef PACKAGE_DIR 286 287 /* Default endorsed standards directory. */ 288 { 289 #define ENDORSED_DIR "\\lib\\endorsed" 290 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 291 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 292 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 293 Arguments::set_endorsed_dirs(buf); 294 #undef ENDORSED_DIR 295 } 296 297 #ifndef _WIN64 298 // set our UnhandledExceptionFilter and save any previous one 299 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 300 #endif 301 302 // Done 303 return; 304 } 305 306 void os::breakpoint() { 307 DebugBreak(); 308 } 309 310 // Invoked from the BREAKPOINT Macro 311 extern "C" void breakpoint() { 312 os::breakpoint(); 313 } 314 315 /* 316 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 317 * So far, this method is only used by Native Memory Tracking, which is 318 * only supported on Windows XP or later. 319 */ 320 int os::get_native_stack(address* stack, int frames, int toSkip) { 321 #ifdef _NMT_NOINLINE_ 322 toSkip ++; 323 #endif 324 int captured = Kernel32Dll::RtlCaptureStackBackTrace(toSkip + 1, frames, 325 (PVOID*)stack, NULL); 326 for (int index = captured; index < frames; index ++) { 327 stack[index] = NULL; 328 } 329 return captured; 330 } 331 332 333 // os::current_stack_base() 334 // 335 // Returns the base of the stack, which is the stack's 336 // starting address. This function must be called 337 // while running on the stack of the thread being queried. 338 339 address os::current_stack_base() { 340 MEMORY_BASIC_INFORMATION minfo; 341 address stack_bottom; 342 size_t stack_size; 343 344 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 345 stack_bottom = (address)minfo.AllocationBase; 346 stack_size = minfo.RegionSize; 347 348 // Add up the sizes of all the regions with the same 349 // AllocationBase. 350 while (1) 351 { 352 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 353 if (stack_bottom == (address)minfo.AllocationBase) 354 stack_size += minfo.RegionSize; 355 else 356 break; 357 } 358 359 #ifdef _M_IA64 360 // IA64 has memory and register stacks 361 // 362 // This is the stack layout you get on NT/IA64 if you specify 1MB stack limit 363 // at thread creation (1MB backing store growing upwards, 1MB memory stack 364 // growing downwards, 2MB summed up) 365 // 366 // ... 367 // ------- top of stack (high address) ----- 368 // | 369 // | 1MB 370 // | Backing Store (Register Stack) 371 // | 372 // | / \ 373 // | | 374 // | | 375 // | | 376 // ------------------------ stack base ----- 377 // | 1MB 378 // | Memory Stack 379 // | 380 // | | 381 // | | 382 // | | 383 // | \ / 384 // | 385 // ----- bottom of stack (low address) ----- 386 // ... 387 388 stack_size = stack_size / 2; 389 #endif 390 return stack_bottom + stack_size; 391 } 392 393 size_t os::current_stack_size() { 394 size_t sz; 395 MEMORY_BASIC_INFORMATION minfo; 396 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 397 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 398 return sz; 399 } 400 401 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 402 const struct tm* time_struct_ptr = localtime(clock); 403 if (time_struct_ptr != NULL) { 404 *res = *time_struct_ptr; 405 return res; 406 } 407 return NULL; 408 } 409 410 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 411 412 extern jint volatile vm_getting_terminated; 413 414 // Thread start routine for all new Java threads 415 static unsigned __stdcall java_start(Thread* thread) { 416 // Try to randomize the cache line index of hot stack frames. 417 // This helps when threads of the same stack traces evict each other's 418 // cache lines. The threads can be either from the same JVM instance, or 419 // from different JVM instances. The benefit is especially true for 420 // processors with hyperthreading technology. 421 static int counter = 0; 422 int pid = os::current_process_id(); 423 _alloca(((pid ^ counter++) & 7) * 128); 424 425 OSThread* osthr = thread->osthread(); 426 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 427 428 if (UseNUMA) { 429 int lgrp_id = os::numa_get_group_id(); 430 if (lgrp_id != -1) { 431 thread->set_lgrp_id(lgrp_id); 432 } 433 } 434 435 // Diagnostic code to investigate JDK-6573254 (Part I) 436 unsigned res = 90115; // non-java thread 437 if (thread->is_Java_thread()) { 438 JavaThread* java_thread = (JavaThread*)thread; 439 res = java_lang_Thread::is_daemon(java_thread->threadObj()) 440 ? 70115 // java daemon thread 441 : 80115; // java non-daemon thread 442 } 443 444 // Install a win32 structured exception handler around every thread created 445 // by VM, so VM can generate error dump when an exception occurred in non- 446 // Java thread (e.g. VM thread). 447 __try { 448 thread->run(); 449 } __except(topLevelExceptionFilter( 450 (_EXCEPTION_POINTERS*)_exception_info())) { 451 // Nothing to do. 452 } 453 454 // One less thread is executing 455 // When the VMThread gets here, the main thread may have already exited 456 // which frees the CodeHeap containing the Atomic::add code 457 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 458 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 459 } 460 461 // Diagnostic code to investigate JDK-6573254 (Part II) 462 if (OrderAccess::load_acquire(&vm_getting_terminated)) { 463 return res; 464 } 465 466 return 0; 467 } 468 469 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 470 // Allocate the OSThread object 471 OSThread* osthread = new OSThread(NULL, NULL); 472 if (osthread == NULL) return NULL; 473 474 // Initialize support for Java interrupts 475 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 476 if (interrupt_event == NULL) { 477 delete osthread; 478 return NULL; 479 } 480 osthread->set_interrupt_event(interrupt_event); 481 482 // Store info on the Win32 thread into the OSThread 483 osthread->set_thread_handle(thread_handle); 484 osthread->set_thread_id(thread_id); 485 486 if (UseNUMA) { 487 int lgrp_id = os::numa_get_group_id(); 488 if (lgrp_id != -1) { 489 thread->set_lgrp_id(lgrp_id); 490 } 491 } 492 493 // Initial thread state is INITIALIZED, not SUSPENDED 494 osthread->set_state(INITIALIZED); 495 496 return osthread; 497 } 498 499 500 bool os::create_attached_thread(JavaThread* thread) { 501 #ifdef ASSERT 502 thread->verify_not_published(); 503 #endif 504 HANDLE thread_h; 505 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 506 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 507 fatal("DuplicateHandle failed\n"); 508 } 509 OSThread* osthread = create_os_thread(thread, thread_h, 510 (int)current_thread_id()); 511 if (osthread == NULL) { 512 return false; 513 } 514 515 // Initial thread state is RUNNABLE 516 osthread->set_state(RUNNABLE); 517 518 thread->set_osthread(osthread); 519 return true; 520 } 521 522 bool os::create_main_thread(JavaThread* thread) { 523 #ifdef ASSERT 524 thread->verify_not_published(); 525 #endif 526 if (_starting_thread == NULL) { 527 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 528 if (_starting_thread == NULL) { 529 return false; 530 } 531 } 532 533 // The primordial thread is runnable from the start) 534 _starting_thread->set_state(RUNNABLE); 535 536 thread->set_osthread(_starting_thread); 537 return true; 538 } 539 540 // Allocate and initialize a new OSThread 541 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 542 unsigned thread_id; 543 544 // Allocate the OSThread object 545 OSThread* osthread = new OSThread(NULL, NULL); 546 if (osthread == NULL) { 547 return false; 548 } 549 550 // Initialize support for Java interrupts 551 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 552 if (interrupt_event == NULL) { 553 delete osthread; 554 return NULL; 555 } 556 osthread->set_interrupt_event(interrupt_event); 557 osthread->set_interrupted(false); 558 559 thread->set_osthread(osthread); 560 561 if (stack_size == 0) { 562 switch (thr_type) { 563 case os::java_thread: 564 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 565 if (JavaThread::stack_size_at_create() > 0) 566 stack_size = JavaThread::stack_size_at_create(); 567 break; 568 case os::compiler_thread: 569 if (CompilerThreadStackSize > 0) { 570 stack_size = (size_t)(CompilerThreadStackSize * K); 571 break; 572 } // else fall through: 573 // use VMThreadStackSize if CompilerThreadStackSize is not defined 574 case os::vm_thread: 575 case os::pgc_thread: 576 case os::cgc_thread: 577 case os::watcher_thread: 578 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 579 break; 580 } 581 } 582 583 // Create the Win32 thread 584 // 585 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 586 // does not specify stack size. Instead, it specifies the size of 587 // initially committed space. The stack size is determined by 588 // PE header in the executable. If the committed "stack_size" is larger 589 // than default value in the PE header, the stack is rounded up to the 590 // nearest multiple of 1MB. For example if the launcher has default 591 // stack size of 320k, specifying any size less than 320k does not 592 // affect the actual stack size at all, it only affects the initial 593 // commitment. On the other hand, specifying 'stack_size' larger than 594 // default value may cause significant increase in memory usage, because 595 // not only the stack space will be rounded up to MB, but also the 596 // entire space is committed upfront. 597 // 598 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 599 // for CreateThread() that can treat 'stack_size' as stack size. However we 600 // are not supposed to call CreateThread() directly according to MSDN 601 // document because JVM uses C runtime library. The good news is that the 602 // flag appears to work with _beginthredex() as well. 603 604 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 605 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 606 #endif 607 608 HANDLE thread_handle = 609 (HANDLE)_beginthreadex(NULL, 610 (unsigned)stack_size, 611 (unsigned (__stdcall *)(void*)) java_start, 612 thread, 613 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 614 &thread_id); 615 if (thread_handle == NULL) { 616 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 617 // without the flag. 618 thread_handle = 619 (HANDLE)_beginthreadex(NULL, 620 (unsigned)stack_size, 621 (unsigned (__stdcall *)(void*)) java_start, 622 thread, 623 CREATE_SUSPENDED, 624 &thread_id); 625 } 626 if (thread_handle == NULL) { 627 // Need to clean up stuff we've allocated so far 628 CloseHandle(osthread->interrupt_event()); 629 thread->set_osthread(NULL); 630 delete osthread; 631 return NULL; 632 } 633 634 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 635 636 // Store info on the Win32 thread into the OSThread 637 osthread->set_thread_handle(thread_handle); 638 osthread->set_thread_id(thread_id); 639 640 // Initial thread state is INITIALIZED, not SUSPENDED 641 osthread->set_state(INITIALIZED); 642 643 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 644 return true; 645 } 646 647 648 // Free Win32 resources related to the OSThread 649 void os::free_thread(OSThread* osthread) { 650 assert(osthread != NULL, "osthread not set"); 651 CloseHandle(osthread->thread_handle()); 652 CloseHandle(osthread->interrupt_event()); 653 delete osthread; 654 } 655 656 static jlong first_filetime; 657 static jlong initial_performance_count; 658 static jlong performance_frequency; 659 660 661 jlong as_long(LARGE_INTEGER x) { 662 jlong result = 0; // initialization to avoid warning 663 set_high(&result, x.HighPart); 664 set_low(&result, x.LowPart); 665 return result; 666 } 667 668 669 jlong os::elapsed_counter() { 670 LARGE_INTEGER count; 671 if (win32::_has_performance_count) { 672 QueryPerformanceCounter(&count); 673 return as_long(count) - initial_performance_count; 674 } else { 675 FILETIME wt; 676 GetSystemTimeAsFileTime(&wt); 677 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 678 } 679 } 680 681 682 jlong os::elapsed_frequency() { 683 if (win32::_has_performance_count) { 684 return performance_frequency; 685 } else { 686 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 687 return 10000000; 688 } 689 } 690 691 692 julong os::available_memory() { 693 return win32::available_memory(); 694 } 695 696 julong os::win32::available_memory() { 697 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 698 // value if total memory is larger than 4GB 699 MEMORYSTATUSEX ms; 700 ms.dwLength = sizeof(ms); 701 GlobalMemoryStatusEx(&ms); 702 703 return (julong)ms.ullAvailPhys; 704 } 705 706 julong os::physical_memory() { 707 return win32::physical_memory(); 708 } 709 710 bool os::has_allocatable_memory_limit(julong* limit) { 711 MEMORYSTATUSEX ms; 712 ms.dwLength = sizeof(ms); 713 GlobalMemoryStatusEx(&ms); 714 #ifdef _LP64 715 *limit = (julong)ms.ullAvailVirtual; 716 return true; 717 #else 718 // Limit to 1400m because of the 2gb address space wall 719 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 720 return true; 721 #endif 722 } 723 724 // VC6 lacks DWORD_PTR 725 #if _MSC_VER < 1300 726 typedef UINT_PTR DWORD_PTR; 727 #endif 728 729 int os::active_processor_count() { 730 DWORD_PTR lpProcessAffinityMask = 0; 731 DWORD_PTR lpSystemAffinityMask = 0; 732 int proc_count = processor_count(); 733 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 734 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 735 // Nof active processors is number of bits in process affinity mask 736 int bitcount = 0; 737 while (lpProcessAffinityMask != 0) { 738 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 739 bitcount++; 740 } 741 return bitcount; 742 } else { 743 return proc_count; 744 } 745 } 746 747 void os::set_native_thread_name(const char *name) { 748 // Not yet implemented. 749 return; 750 } 751 752 bool os::distribute_processes(uint length, uint* distribution) { 753 // Not yet implemented. 754 return false; 755 } 756 757 bool os::bind_to_processor(uint processor_id) { 758 // Not yet implemented. 759 return false; 760 } 761 762 void os::win32::initialize_performance_counter() { 763 LARGE_INTEGER count; 764 if (QueryPerformanceFrequency(&count)) { 765 win32::_has_performance_count = 1; 766 performance_frequency = as_long(count); 767 QueryPerformanceCounter(&count); 768 initial_performance_count = as_long(count); 769 } else { 770 win32::_has_performance_count = 0; 771 FILETIME wt; 772 GetSystemTimeAsFileTime(&wt); 773 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 774 } 775 } 776 777 778 double os::elapsedTime() { 779 return (double) elapsed_counter() / (double) elapsed_frequency(); 780 } 781 782 783 // Windows format: 784 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 785 // Java format: 786 // Java standards require the number of milliseconds since 1/1/1970 787 788 // Constant offset - calculated using offset() 789 static jlong _offset = 116444736000000000; 790 // Fake time counter for reproducible results when debugging 791 static jlong fake_time = 0; 792 793 #ifdef ASSERT 794 // Just to be safe, recalculate the offset in debug mode 795 static jlong _calculated_offset = 0; 796 static int _has_calculated_offset = 0; 797 798 jlong offset() { 799 if (_has_calculated_offset) return _calculated_offset; 800 SYSTEMTIME java_origin; 801 java_origin.wYear = 1970; 802 java_origin.wMonth = 1; 803 java_origin.wDayOfWeek = 0; // ignored 804 java_origin.wDay = 1; 805 java_origin.wHour = 0; 806 java_origin.wMinute = 0; 807 java_origin.wSecond = 0; 808 java_origin.wMilliseconds = 0; 809 FILETIME jot; 810 if (!SystemTimeToFileTime(&java_origin, &jot)) { 811 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 812 } 813 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 814 _has_calculated_offset = 1; 815 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 816 return _calculated_offset; 817 } 818 #else 819 jlong offset() { 820 return _offset; 821 } 822 #endif 823 824 jlong windows_to_java_time(FILETIME wt) { 825 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 826 return (a - offset()) / 10000; 827 } 828 829 FILETIME java_to_windows_time(jlong l) { 830 jlong a = (l * 10000) + offset(); 831 FILETIME result; 832 result.dwHighDateTime = high(a); 833 result.dwLowDateTime = low(a); 834 return result; 835 } 836 837 bool os::supports_vtime() { return true; } 838 bool os::enable_vtime() { return false; } 839 bool os::vtime_enabled() { return false; } 840 841 double os::elapsedVTime() { 842 FILETIME created; 843 FILETIME exited; 844 FILETIME kernel; 845 FILETIME user; 846 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 847 // the resolution of windows_to_java_time() should be sufficient (ms) 848 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 849 } else { 850 return elapsedTime(); 851 } 852 } 853 854 jlong os::javaTimeMillis() { 855 if (UseFakeTimers) { 856 return fake_time++; 857 } else { 858 FILETIME wt; 859 GetSystemTimeAsFileTime(&wt); 860 return windows_to_java_time(wt); 861 } 862 } 863 864 jlong os::javaTimeNanos() { 865 if (!win32::_has_performance_count) { 866 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 867 } else { 868 LARGE_INTEGER current_count; 869 QueryPerformanceCounter(¤t_count); 870 double current = as_long(current_count); 871 double freq = performance_frequency; 872 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 873 return time; 874 } 875 } 876 877 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 878 if (!win32::_has_performance_count) { 879 // javaTimeMillis() doesn't have much percision, 880 // but it is not going to wrap -- so all 64 bits 881 info_ptr->max_value = ALL_64_BITS; 882 883 // this is a wall clock timer, so may skip 884 info_ptr->may_skip_backward = true; 885 info_ptr->may_skip_forward = true; 886 } else { 887 jlong freq = performance_frequency; 888 if (freq < NANOSECS_PER_SEC) { 889 // the performance counter is 64 bits and we will 890 // be multiplying it -- so no wrap in 64 bits 891 info_ptr->max_value = ALL_64_BITS; 892 } else if (freq > NANOSECS_PER_SEC) { 893 // use the max value the counter can reach to 894 // determine the max value which could be returned 895 julong max_counter = (julong)ALL_64_BITS; 896 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 897 } else { 898 // the performance counter is 64 bits and we will 899 // be using it directly -- so no wrap in 64 bits 900 info_ptr->max_value = ALL_64_BITS; 901 } 902 903 // using a counter, so no skipping 904 info_ptr->may_skip_backward = false; 905 info_ptr->may_skip_forward = false; 906 } 907 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 908 } 909 910 char* os::local_time_string(char *buf, size_t buflen) { 911 SYSTEMTIME st; 912 GetLocalTime(&st); 913 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 914 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 915 return buf; 916 } 917 918 bool os::getTimesSecs(double* process_real_time, 919 double* process_user_time, 920 double* process_system_time) { 921 HANDLE h_process = GetCurrentProcess(); 922 FILETIME create_time, exit_time, kernel_time, user_time; 923 BOOL result = GetProcessTimes(h_process, 924 &create_time, 925 &exit_time, 926 &kernel_time, 927 &user_time); 928 if (result != 0) { 929 FILETIME wt; 930 GetSystemTimeAsFileTime(&wt); 931 jlong rtc_millis = windows_to_java_time(wt); 932 jlong user_millis = windows_to_java_time(user_time); 933 jlong system_millis = windows_to_java_time(kernel_time); 934 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 935 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 936 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 937 return true; 938 } else { 939 return false; 940 } 941 } 942 943 void os::shutdown() { 944 945 // allow PerfMemory to attempt cleanup of any persistent resources 946 perfMemory_exit(); 947 948 // flush buffered output, finish log files 949 ostream_abort(); 950 951 // Check for abort hook 952 abort_hook_t abort_hook = Arguments::abort_hook(); 953 if (abort_hook != NULL) { 954 abort_hook(); 955 } 956 } 957 958 959 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 960 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 961 962 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 963 HINSTANCE dbghelp; 964 EXCEPTION_POINTERS ep; 965 MINIDUMP_EXCEPTION_INFORMATION mei; 966 MINIDUMP_EXCEPTION_INFORMATION* pmei; 967 968 HANDLE hProcess = GetCurrentProcess(); 969 DWORD processId = GetCurrentProcessId(); 970 HANDLE dumpFile; 971 MINIDUMP_TYPE dumpType; 972 static const char* cwd; 973 974 // Default is to always create dump for debug builds, on product builds only dump on server versions of Windows. 975 #ifndef ASSERT 976 // If running on a client version of Windows and user has not explicitly enabled dumping 977 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 978 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 979 return; 980 // If running on a server version of Windows and user has explictly disabled dumping 981 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 982 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 983 return; 984 } 985 #else 986 if (!FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 987 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 988 return; 989 } 990 #endif 991 992 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 993 994 if (dbghelp == NULL) { 995 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 996 return; 997 } 998 999 _MiniDumpWriteDump = CAST_TO_FN_PTR( 1000 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 1001 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 1002 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 1003 1004 if (_MiniDumpWriteDump == NULL) { 1005 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 1006 return; 1007 } 1008 1009 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 1010 1011 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 1012 // API_VERSION_NUMBER 11 or higher contains the ones we want though 1013 #if API_VERSION_NUMBER >= 11 1014 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 1015 MiniDumpWithUnloadedModules); 1016 #endif 1017 1018 cwd = get_current_directory(NULL, 0); 1019 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp", cwd, current_process_id()); 1020 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 1021 1022 if (dumpFile == INVALID_HANDLE_VALUE) { 1023 VMError::report_coredump_status("Failed to create file for dumping", false); 1024 return; 1025 } 1026 if (exceptionRecord != NULL && contextRecord != NULL) { 1027 ep.ContextRecord = (PCONTEXT) contextRecord; 1028 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 1029 1030 mei.ThreadId = GetCurrentThreadId(); 1031 mei.ExceptionPointers = &ep; 1032 pmei = &mei; 1033 } else { 1034 pmei = NULL; 1035 } 1036 1037 1038 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1039 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1040 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 1041 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 1042 DWORD error = GetLastError(); 1043 LPTSTR msgbuf = NULL; 1044 1045 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | 1046 FORMAT_MESSAGE_FROM_SYSTEM | 1047 FORMAT_MESSAGE_IGNORE_INSERTS, 1048 NULL, error, 0, (LPTSTR)&msgbuf, 0, NULL) != 0) { 1049 1050 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x: %s)", error, msgbuf); 1051 LocalFree(msgbuf); 1052 } else { 1053 // Call to FormatMessage failed, just include the result from GetLastError 1054 jio_snprintf(buffer, bufferSize, "Call to MiniDumpWriteDump() failed (Error 0x%x)", error); 1055 } 1056 VMError::report_coredump_status(buffer, false); 1057 } else { 1058 VMError::report_coredump_status(buffer, true); 1059 } 1060 1061 CloseHandle(dumpFile); 1062 } 1063 1064 1065 1066 void os::abort(bool dump_core) 1067 { 1068 os::shutdown(); 1069 // no core dump on Windows 1070 ::exit(1); 1071 } 1072 1073 // Die immediately, no exit hook, no abort hook, no cleanup. 1074 void os::die() { 1075 _exit(-1); 1076 } 1077 1078 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1079 // * dirent_md.c 1.15 00/02/02 1080 // 1081 // The declarations for DIR and struct dirent are in jvm_win32.h. 1082 1083 /* Caller must have already run dirname through JVM_NativePath, which removes 1084 duplicate slashes and converts all instances of '/' into '\\'. */ 1085 1086 DIR * 1087 os::opendir(const char *dirname) 1088 { 1089 assert(dirname != NULL, "just checking"); // hotspot change 1090 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1091 DWORD fattr; // hotspot change 1092 char alt_dirname[4] = { 0, 0, 0, 0 }; 1093 1094 if (dirp == 0) { 1095 errno = ENOMEM; 1096 return 0; 1097 } 1098 1099 /* 1100 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1101 * as a directory in FindFirstFile(). We detect this case here and 1102 * prepend the current drive name. 1103 */ 1104 if (dirname[1] == '\0' && dirname[0] == '\\') { 1105 alt_dirname[0] = _getdrive() + 'A' - 1; 1106 alt_dirname[1] = ':'; 1107 alt_dirname[2] = '\\'; 1108 alt_dirname[3] = '\0'; 1109 dirname = alt_dirname; 1110 } 1111 1112 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1113 if (dirp->path == 0) { 1114 free(dirp, mtInternal); 1115 errno = ENOMEM; 1116 return 0; 1117 } 1118 strcpy(dirp->path, dirname); 1119 1120 fattr = GetFileAttributes(dirp->path); 1121 if (fattr == 0xffffffff) { 1122 free(dirp->path, mtInternal); 1123 free(dirp, mtInternal); 1124 errno = ENOENT; 1125 return 0; 1126 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1127 free(dirp->path, mtInternal); 1128 free(dirp, mtInternal); 1129 errno = ENOTDIR; 1130 return 0; 1131 } 1132 1133 /* Append "*.*", or possibly "\\*.*", to path */ 1134 if (dirp->path[1] == ':' 1135 && (dirp->path[2] == '\0' 1136 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1137 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1138 strcat(dirp->path, "*.*"); 1139 } else { 1140 strcat(dirp->path, "\\*.*"); 1141 } 1142 1143 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1144 if (dirp->handle == INVALID_HANDLE_VALUE) { 1145 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1146 free(dirp->path, mtInternal); 1147 free(dirp, mtInternal); 1148 errno = EACCES; 1149 return 0; 1150 } 1151 } 1152 return dirp; 1153 } 1154 1155 /* parameter dbuf unused on Windows */ 1156 1157 struct dirent * 1158 os::readdir(DIR *dirp, dirent *dbuf) 1159 { 1160 assert(dirp != NULL, "just checking"); // hotspot change 1161 if (dirp->handle == INVALID_HANDLE_VALUE) { 1162 return 0; 1163 } 1164 1165 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1166 1167 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1168 if (GetLastError() == ERROR_INVALID_HANDLE) { 1169 errno = EBADF; 1170 return 0; 1171 } 1172 FindClose(dirp->handle); 1173 dirp->handle = INVALID_HANDLE_VALUE; 1174 } 1175 1176 return &dirp->dirent; 1177 } 1178 1179 int 1180 os::closedir(DIR *dirp) 1181 { 1182 assert(dirp != NULL, "just checking"); // hotspot change 1183 if (dirp->handle != INVALID_HANDLE_VALUE) { 1184 if (!FindClose(dirp->handle)) { 1185 errno = EBADF; 1186 return -1; 1187 } 1188 dirp->handle = INVALID_HANDLE_VALUE; 1189 } 1190 free(dirp->path, mtInternal); 1191 free(dirp, mtInternal); 1192 return 0; 1193 } 1194 1195 // This must be hard coded because it's the system's temporary 1196 // directory not the java application's temp directory, ala java.io.tmpdir. 1197 const char* os::get_temp_directory() { 1198 static char path_buf[MAX_PATH]; 1199 if (GetTempPath(MAX_PATH, path_buf)>0) 1200 return path_buf; 1201 else{ 1202 path_buf[0]='\0'; 1203 return path_buf; 1204 } 1205 } 1206 1207 static bool file_exists(const char* filename) { 1208 if (filename == NULL || strlen(filename) == 0) { 1209 return false; 1210 } 1211 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1212 } 1213 1214 bool os::dll_build_name(char *buffer, size_t buflen, 1215 const char* pname, const char* fname) { 1216 bool retval = false; 1217 const size_t pnamelen = pname ? strlen(pname) : 0; 1218 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1219 1220 // Return error on buffer overflow. 1221 if (pnamelen + strlen(fname) + 10 > buflen) { 1222 return retval; 1223 } 1224 1225 if (pnamelen == 0) { 1226 jio_snprintf(buffer, buflen, "%s.dll", fname); 1227 retval = true; 1228 } else if (c == ':' || c == '\\') { 1229 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1230 retval = true; 1231 } else if (strchr(pname, *os::path_separator()) != NULL) { 1232 int n; 1233 char** pelements = split_path(pname, &n); 1234 if (pelements == NULL) { 1235 return false; 1236 } 1237 for (int i = 0; i < n; i++) { 1238 char* path = pelements[i]; 1239 // Really shouldn't be NULL, but check can't hurt 1240 size_t plen = (path == NULL) ? 0 : strlen(path); 1241 if (plen == 0) { 1242 continue; // skip the empty path values 1243 } 1244 const char lastchar = path[plen - 1]; 1245 if (lastchar == ':' || lastchar == '\\') { 1246 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1247 } else { 1248 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1249 } 1250 if (file_exists(buffer)) { 1251 retval = true; 1252 break; 1253 } 1254 } 1255 // release the storage 1256 for (int i = 0; i < n; i++) { 1257 if (pelements[i] != NULL) { 1258 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1259 } 1260 } 1261 if (pelements != NULL) { 1262 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1263 } 1264 } else { 1265 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1266 retval = true; 1267 } 1268 return retval; 1269 } 1270 1271 // Needs to be in os specific directory because windows requires another 1272 // header file <direct.h> 1273 const char* os::get_current_directory(char *buf, size_t buflen) { 1274 int n = static_cast<int>(buflen); 1275 if (buflen > INT_MAX) n = INT_MAX; 1276 return _getcwd(buf, n); 1277 } 1278 1279 //----------------------------------------------------------- 1280 // Helper functions for fatal error handler 1281 #ifdef _WIN64 1282 // Helper routine which returns true if address in 1283 // within the NTDLL address space. 1284 // 1285 static bool _addr_in_ntdll( address addr ) 1286 { 1287 HMODULE hmod; 1288 MODULEINFO minfo; 1289 1290 hmod = GetModuleHandle("NTDLL.DLL"); 1291 if (hmod == NULL) return false; 1292 if (!os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1293 &minfo, sizeof(MODULEINFO)) ) 1294 return false; 1295 1296 if ((addr >= minfo.lpBaseOfDll) && 1297 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1298 return true; 1299 else 1300 return false; 1301 } 1302 #endif 1303 1304 struct _modinfo { 1305 address addr; 1306 char* full_path; // point to a char buffer 1307 int buflen; // size of the buffer 1308 address base_addr; 1309 }; 1310 1311 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1312 address top_address, void * param) { 1313 struct _modinfo *pmod = (struct _modinfo *)param; 1314 if (!pmod) return -1; 1315 1316 if (base_addr <= pmod->addr && 1317 top_address > pmod->addr) { 1318 // if a buffer is provided, copy path name to the buffer 1319 if (pmod->full_path) { 1320 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1321 } 1322 pmod->base_addr = base_addr; 1323 return 1; 1324 } 1325 return 0; 1326 } 1327 1328 bool os::dll_address_to_library_name(address addr, char* buf, 1329 int buflen, int* offset) { 1330 // buf is not optional, but offset is optional 1331 assert(buf != NULL, "sanity check"); 1332 1333 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1334 // return the full path to the DLL file, sometimes it returns path 1335 // to the corresponding PDB file (debug info); sometimes it only 1336 // returns partial path, which makes life painful. 1337 1338 struct _modinfo mi; 1339 mi.addr = addr; 1340 mi.full_path = buf; 1341 mi.buflen = buflen; 1342 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1343 // buf already contains path name 1344 if (offset) *offset = addr - mi.base_addr; 1345 return true; 1346 } 1347 1348 buf[0] = '\0'; 1349 if (offset) *offset = -1; 1350 return false; 1351 } 1352 1353 bool os::dll_address_to_function_name(address addr, char *buf, 1354 int buflen, int *offset) { 1355 // buf is not optional, but offset is optional 1356 assert(buf != NULL, "sanity check"); 1357 1358 if (Decoder::decode(addr, buf, buflen, offset)) { 1359 return true; 1360 } 1361 if (offset != NULL) *offset = -1; 1362 buf[0] = '\0'; 1363 return false; 1364 } 1365 1366 // save the start and end address of jvm.dll into param[0] and param[1] 1367 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1368 address top_address, void * param) { 1369 if (!param) return -1; 1370 1371 if (base_addr <= (address)_locate_jvm_dll && 1372 top_address > (address)_locate_jvm_dll) { 1373 ((address*)param)[0] = base_addr; 1374 ((address*)param)[1] = top_address; 1375 return 1; 1376 } 1377 return 0; 1378 } 1379 1380 address vm_lib_location[2]; // start and end address of jvm.dll 1381 1382 // check if addr is inside jvm.dll 1383 bool os::address_is_in_vm(address addr) { 1384 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1385 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1386 assert(false, "Can't find jvm module."); 1387 return false; 1388 } 1389 } 1390 1391 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1392 } 1393 1394 // print module info; param is outputStream* 1395 static int _print_module(const char* fname, address base_address, 1396 address top_address, void* param) { 1397 if (!param) return -1; 1398 1399 outputStream* st = (outputStream*)param; 1400 1401 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1402 return 0; 1403 } 1404 1405 // Loads .dll/.so and 1406 // in case of error it checks if .dll/.so was built for the 1407 // same architecture as Hotspot is running on 1408 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1409 { 1410 void * result = LoadLibrary(name); 1411 if (result != NULL) 1412 { 1413 return result; 1414 } 1415 1416 DWORD errcode = GetLastError(); 1417 if (errcode == ERROR_MOD_NOT_FOUND) { 1418 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1419 ebuf[ebuflen-1]='\0'; 1420 return NULL; 1421 } 1422 1423 // Parsing dll below 1424 // If we can read dll-info and find that dll was built 1425 // for an architecture other than Hotspot is running in 1426 // - then print to buffer "DLL was built for a different architecture" 1427 // else call os::lasterror to obtain system error message 1428 1429 // Read system error message into ebuf 1430 // It may or may not be overwritten below (in the for loop and just above) 1431 lasterror(ebuf, (size_t) ebuflen); 1432 ebuf[ebuflen-1]='\0'; 1433 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1434 if (file_descriptor<0) 1435 { 1436 return NULL; 1437 } 1438 1439 uint32_t signature_offset; 1440 uint16_t lib_arch=0; 1441 bool failed_to_get_lib_arch= 1442 ( 1443 //Go to position 3c in the dll 1444 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1445 || 1446 // Read loacation of signature 1447 (sizeof(signature_offset)!= 1448 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1449 || 1450 //Go to COFF File Header in dll 1451 //that is located after"signature" (4 bytes long) 1452 (os::seek_to_file_offset(file_descriptor, 1453 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1454 || 1455 //Read field that contains code of architecture 1456 // that dll was build for 1457 (sizeof(lib_arch)!= 1458 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1459 ); 1460 1461 ::close(file_descriptor); 1462 if (failed_to_get_lib_arch) 1463 { 1464 // file i/o error - report os::lasterror(...) msg 1465 return NULL; 1466 } 1467 1468 typedef struct 1469 { 1470 uint16_t arch_code; 1471 char* arch_name; 1472 } arch_t; 1473 1474 static const arch_t arch_array[]={ 1475 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1476 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1477 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1478 }; 1479 #if (defined _M_IA64) 1480 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1481 #elif (defined _M_AMD64) 1482 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1483 #elif (defined _M_IX86) 1484 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1485 #else 1486 #error Method os::dll_load requires that one of following \ 1487 is defined :_M_IA64,_M_AMD64 or _M_IX86 1488 #endif 1489 1490 1491 // Obtain a string for printf operation 1492 // lib_arch_str shall contain string what platform this .dll was built for 1493 // running_arch_str shall string contain what platform Hotspot was built for 1494 char *running_arch_str=NULL,*lib_arch_str=NULL; 1495 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1496 { 1497 if (lib_arch==arch_array[i].arch_code) 1498 lib_arch_str=arch_array[i].arch_name; 1499 if (running_arch==arch_array[i].arch_code) 1500 running_arch_str=arch_array[i].arch_name; 1501 } 1502 1503 assert(running_arch_str, 1504 "Didn't find runing architecture code in arch_array"); 1505 1506 // If the architure is right 1507 // but some other error took place - report os::lasterror(...) msg 1508 if (lib_arch == running_arch) 1509 { 1510 return NULL; 1511 } 1512 1513 if (lib_arch_str!=NULL) 1514 { 1515 ::_snprintf(ebuf, ebuflen-1, 1516 "Can't load %s-bit .dll on a %s-bit platform", 1517 lib_arch_str,running_arch_str); 1518 } 1519 else 1520 { 1521 // don't know what architecture this dll was build for 1522 ::_snprintf(ebuf, ebuflen-1, 1523 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1524 lib_arch,running_arch_str); 1525 } 1526 1527 return NULL; 1528 } 1529 1530 void os::print_dll_info(outputStream *st) { 1531 st->print_cr("Dynamic libraries:"); 1532 get_loaded_modules_info(_print_module, (void *)st); 1533 } 1534 1535 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1536 HANDLE hProcess; 1537 1538 # define MAX_NUM_MODULES 128 1539 HMODULE modules[MAX_NUM_MODULES]; 1540 static char filename[MAX_PATH]; 1541 int result = 0; 1542 1543 if (!os::PSApiDll::PSApiAvailable()) { 1544 return 0; 1545 } 1546 1547 int pid = os::current_process_id(); 1548 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1549 FALSE, pid); 1550 if (hProcess == NULL) return 0; 1551 1552 DWORD size_needed; 1553 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1554 sizeof(modules), &size_needed)) { 1555 CloseHandle(hProcess); 1556 return 0; 1557 } 1558 1559 // number of modules that are currently loaded 1560 int num_modules = size_needed / sizeof(HMODULE); 1561 1562 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1563 // Get Full pathname: 1564 if (!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1565 filename, sizeof(filename))) { 1566 filename[0] = '\0'; 1567 } 1568 1569 MODULEINFO modinfo; 1570 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1571 &modinfo, sizeof(modinfo))) { 1572 modinfo.lpBaseOfDll = NULL; 1573 modinfo.SizeOfImage = 0; 1574 } 1575 1576 // Invoke callback function 1577 result = callback(filename, (address)modinfo.lpBaseOfDll, 1578 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1579 if (result) break; 1580 } 1581 1582 CloseHandle(hProcess); 1583 return result; 1584 } 1585 1586 void os::print_os_info_brief(outputStream* st) { 1587 os::print_os_info(st); 1588 } 1589 1590 void os::print_os_info(outputStream* st) { 1591 st->print("OS:"); 1592 1593 os::win32::print_windows_version(st); 1594 } 1595 1596 void os::win32::print_windows_version(outputStream* st) { 1597 OSVERSIONINFOEX osvi; 1598 SYSTEM_INFO si; 1599 1600 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1601 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1602 1603 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1604 st->print_cr("N/A"); 1605 return; 1606 } 1607 1608 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; 1609 1610 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1611 if (os_vers >= 5002) { 1612 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1613 // find out whether we are running on 64 bit processor or not. 1614 if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) { 1615 os::Kernel32Dll::GetNativeSystemInfo(&si); 1616 } else { 1617 GetSystemInfo(&si); 1618 } 1619 } 1620 1621 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { 1622 switch (os_vers) { 1623 case 3051: st->print(" Windows NT 3.51"); break; 1624 case 4000: st->print(" Windows NT 4.0"); break; 1625 case 5000: st->print(" Windows 2000"); break; 1626 case 5001: st->print(" Windows XP"); break; 1627 case 5002: 1628 if (osvi.wProductType == VER_NT_WORKSTATION && 1629 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1630 st->print(" Windows XP x64 Edition"); 1631 } else { 1632 st->print(" Windows Server 2003 family"); 1633 } 1634 break; 1635 1636 case 6000: 1637 if (osvi.wProductType == VER_NT_WORKSTATION) { 1638 st->print(" Windows Vista"); 1639 } else { 1640 st->print(" Windows Server 2008"); 1641 } 1642 break; 1643 1644 case 6001: 1645 if (osvi.wProductType == VER_NT_WORKSTATION) { 1646 st->print(" Windows 7"); 1647 } else { 1648 st->print(" Windows Server 2008 R2"); 1649 } 1650 break; 1651 1652 case 6002: 1653 if (osvi.wProductType == VER_NT_WORKSTATION) { 1654 st->print(" Windows 8"); 1655 } else { 1656 st->print(" Windows Server 2012"); 1657 } 1658 break; 1659 1660 case 6003: 1661 if (osvi.wProductType == VER_NT_WORKSTATION) { 1662 st->print(" Windows 8.1"); 1663 } else { 1664 st->print(" Windows Server 2012 R2"); 1665 } 1666 break; 1667 1668 default: // future os 1669 // Unrecognized windows, print out its major and minor versions 1670 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1671 } 1672 } else { 1673 switch (os_vers) { 1674 case 4000: st->print(" Windows 95"); break; 1675 case 4010: st->print(" Windows 98"); break; 1676 case 4090: st->print(" Windows Me"); break; 1677 default: // future windows, print out its major and minor versions 1678 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1679 } 1680 } 1681 1682 if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1683 st->print(" , 64 bit"); 1684 } 1685 1686 st->print(" Build %d", osvi.dwBuildNumber); 1687 st->print(" %s", osvi.szCSDVersion); // service pack 1688 st->cr(); 1689 } 1690 1691 void os::pd_print_cpu_info(outputStream* st) { 1692 // Nothing to do for now. 1693 } 1694 1695 void os::print_memory_info(outputStream* st) { 1696 st->print("Memory:"); 1697 st->print(" %dk page", os::vm_page_size()>>10); 1698 1699 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1700 // value if total memory is larger than 4GB 1701 MEMORYSTATUSEX ms; 1702 ms.dwLength = sizeof(ms); 1703 GlobalMemoryStatusEx(&ms); 1704 1705 st->print(", physical %uk", os::physical_memory() >> 10); 1706 st->print("(%uk free)", os::available_memory() >> 10); 1707 1708 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1709 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1710 st->cr(); 1711 } 1712 1713 void os::print_siginfo(outputStream *st, void *siginfo) { 1714 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1715 st->print("siginfo:"); 1716 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1717 1718 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1719 er->NumberParameters >= 2) { 1720 switch (er->ExceptionInformation[0]) { 1721 case 0: st->print(", reading address"); break; 1722 case 1: st->print(", writing address"); break; 1723 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1724 er->ExceptionInformation[0]); 1725 } 1726 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1727 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1728 er->NumberParameters >= 2 && UseSharedSpaces) { 1729 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1730 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1731 st->print("\n\nError accessing class data sharing archive." \ 1732 " Mapped file inaccessible during execution, " \ 1733 " possible disk/network problem."); 1734 } 1735 } else { 1736 int num = er->NumberParameters; 1737 if (num > 0) { 1738 st->print(", ExceptionInformation="); 1739 for (int i = 0; i < num; i++) { 1740 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1741 } 1742 } 1743 } 1744 st->cr(); 1745 } 1746 1747 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1748 // do nothing 1749 } 1750 1751 static char saved_jvm_path[MAX_PATH] = {0}; 1752 1753 // Find the full path to the current module, jvm.dll 1754 void os::jvm_path(char *buf, jint buflen) { 1755 // Error checking. 1756 if (buflen < MAX_PATH) { 1757 assert(false, "must use a large-enough buffer"); 1758 buf[0] = '\0'; 1759 return; 1760 } 1761 // Lazy resolve the path to current module. 1762 if (saved_jvm_path[0] != 0) { 1763 strcpy(buf, saved_jvm_path); 1764 return; 1765 } 1766 1767 buf[0] = '\0'; 1768 if (Arguments::sun_java_launcher_is_altjvm()) { 1769 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1770 // for a JAVA_HOME environment variable and fix up the path so it 1771 // looks like jvm.dll is installed there (append a fake suffix 1772 // hotspot/jvm.dll). 1773 char* java_home_var = ::getenv("JAVA_HOME"); 1774 if (java_home_var != NULL && java_home_var[0] != 0 && 1775 strlen(java_home_var) < (size_t)buflen) { 1776 1777 strncpy(buf, java_home_var, buflen); 1778 1779 // determine if this is a legacy image or modules image 1780 // modules image doesn't have "jre" subdirectory 1781 size_t len = strlen(buf); 1782 char* jrebin_p = buf + len; 1783 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1784 if (0 != _access(buf, 0)) { 1785 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1786 } 1787 len = strlen(buf); 1788 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1789 } 1790 } 1791 1792 if (buf[0] == '\0') { 1793 GetModuleFileName(vm_lib_handle, buf, buflen); 1794 } 1795 strncpy(saved_jvm_path, buf, MAX_PATH); 1796 } 1797 1798 1799 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1800 #ifndef _WIN64 1801 st->print("_"); 1802 #endif 1803 } 1804 1805 1806 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1807 #ifndef _WIN64 1808 st->print("@%d", args_size * sizeof(int)); 1809 #endif 1810 } 1811 1812 // This method is a copy of JDK's sysGetLastErrorString 1813 // from src/windows/hpi/src/system_md.c 1814 1815 size_t os::lasterror(char* buf, size_t len) { 1816 DWORD errval; 1817 1818 if ((errval = GetLastError()) != 0) { 1819 // DOS error 1820 size_t n = (size_t)FormatMessage( 1821 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1822 NULL, 1823 errval, 1824 0, 1825 buf, 1826 (DWORD)len, 1827 NULL); 1828 if (n > 3) { 1829 // Drop final '.', CR, LF 1830 if (buf[n - 1] == '\n') n--; 1831 if (buf[n - 1] == '\r') n--; 1832 if (buf[n - 1] == '.') n--; 1833 buf[n] = '\0'; 1834 } 1835 return n; 1836 } 1837 1838 if (errno != 0) { 1839 // C runtime error that has no corresponding DOS error code 1840 const char* s = strerror(errno); 1841 size_t n = strlen(s); 1842 if (n >= len) n = len - 1; 1843 strncpy(buf, s, n); 1844 buf[n] = '\0'; 1845 return n; 1846 } 1847 1848 return 0; 1849 } 1850 1851 int os::get_last_error() { 1852 DWORD error = GetLastError(); 1853 if (error == 0) 1854 error = errno; 1855 return (int)error; 1856 } 1857 1858 // sun.misc.Signal 1859 // NOTE that this is a workaround for an apparent kernel bug where if 1860 // a signal handler for SIGBREAK is installed then that signal handler 1861 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1862 // See bug 4416763. 1863 static void (*sigbreakHandler)(int) = NULL; 1864 1865 static void UserHandler(int sig, void *siginfo, void *context) { 1866 os::signal_notify(sig); 1867 // We need to reinstate the signal handler each time... 1868 os::signal(sig, (void*)UserHandler); 1869 } 1870 1871 void* os::user_handler() { 1872 return (void*) UserHandler; 1873 } 1874 1875 void* os::signal(int signal_number, void* handler) { 1876 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1877 void (*oldHandler)(int) = sigbreakHandler; 1878 sigbreakHandler = (void (*)(int)) handler; 1879 return (void*) oldHandler; 1880 } else { 1881 return (void*)::signal(signal_number, (void (*)(int))handler); 1882 } 1883 } 1884 1885 void os::signal_raise(int signal_number) { 1886 raise(signal_number); 1887 } 1888 1889 // The Win32 C runtime library maps all console control events other than ^C 1890 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1891 // logoff, and shutdown events. We therefore install our own console handler 1892 // that raises SIGTERM for the latter cases. 1893 // 1894 static BOOL WINAPI consoleHandler(DWORD event) { 1895 switch (event) { 1896 case CTRL_C_EVENT: 1897 if (is_error_reported()) { 1898 // Ctrl-C is pressed during error reporting, likely because the error 1899 // handler fails to abort. Let VM die immediately. 1900 os::die(); 1901 } 1902 1903 os::signal_raise(SIGINT); 1904 return TRUE; 1905 break; 1906 case CTRL_BREAK_EVENT: 1907 if (sigbreakHandler != NULL) { 1908 (*sigbreakHandler)(SIGBREAK); 1909 } 1910 return TRUE; 1911 break; 1912 case CTRL_LOGOFF_EVENT: { 1913 // Don't terminate JVM if it is running in a non-interactive session, 1914 // such as a service process. 1915 USEROBJECTFLAGS flags; 1916 HANDLE handle = GetProcessWindowStation(); 1917 if (handle != NULL && 1918 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1919 sizeof(USEROBJECTFLAGS), NULL)) { 1920 // If it is a non-interactive session, let next handler to deal 1921 // with it. 1922 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1923 return FALSE; 1924 } 1925 } 1926 } 1927 case CTRL_CLOSE_EVENT: 1928 case CTRL_SHUTDOWN_EVENT: 1929 os::signal_raise(SIGTERM); 1930 return TRUE; 1931 break; 1932 default: 1933 break; 1934 } 1935 return FALSE; 1936 } 1937 1938 /* 1939 * The following code is moved from os.cpp for making this 1940 * code platform specific, which it is by its very nature. 1941 */ 1942 1943 // Return maximum OS signal used + 1 for internal use only 1944 // Used as exit signal for signal_thread 1945 int os::sigexitnum_pd() { 1946 return NSIG; 1947 } 1948 1949 // a counter for each possible signal value, including signal_thread exit signal 1950 static volatile jint pending_signals[NSIG+1] = { 0 }; 1951 static HANDLE sig_sem = NULL; 1952 1953 void os::signal_init_pd() { 1954 // Initialize signal structures 1955 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1956 1957 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1958 1959 // Programs embedding the VM do not want it to attempt to receive 1960 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1961 // shutdown hooks mechanism introduced in 1.3. For example, when 1962 // the VM is run as part of a Windows NT service (i.e., a servlet 1963 // engine in a web server), the correct behavior is for any console 1964 // control handler to return FALSE, not TRUE, because the OS's 1965 // "final" handler for such events allows the process to continue if 1966 // it is a service (while terminating it if it is not a service). 1967 // To make this behavior uniform and the mechanism simpler, we 1968 // completely disable the VM's usage of these console events if -Xrs 1969 // (=ReduceSignalUsage) is specified. This means, for example, that 1970 // the CTRL-BREAK thread dump mechanism is also disabled in this 1971 // case. See bugs 4323062, 4345157, and related bugs. 1972 1973 if (!ReduceSignalUsage) { 1974 // Add a CTRL-C handler 1975 SetConsoleCtrlHandler(consoleHandler, TRUE); 1976 } 1977 } 1978 1979 void os::signal_notify(int signal_number) { 1980 BOOL ret; 1981 if (sig_sem != NULL) { 1982 Atomic::inc(&pending_signals[signal_number]); 1983 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1984 assert(ret != 0, "ReleaseSemaphore() failed"); 1985 } 1986 } 1987 1988 static int check_pending_signals(bool wait_for_signal) { 1989 DWORD ret; 1990 while (true) { 1991 for (int i = 0; i < NSIG + 1; i++) { 1992 jint n = pending_signals[i]; 1993 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1994 return i; 1995 } 1996 } 1997 if (!wait_for_signal) { 1998 return -1; 1999 } 2000 2001 JavaThread *thread = JavaThread::current(); 2002 2003 ThreadBlockInVM tbivm(thread); 2004 2005 bool threadIsSuspended; 2006 do { 2007 thread->set_suspend_equivalent(); 2008 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2009 ret = ::WaitForSingleObject(sig_sem, INFINITE); 2010 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 2011 2012 // were we externally suspended while we were waiting? 2013 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2014 if (threadIsSuspended) { 2015 // 2016 // The semaphore has been incremented, but while we were waiting 2017 // another thread suspended us. We don't want to continue running 2018 // while suspended because that would surprise the thread that 2019 // suspended us. 2020 // 2021 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 2022 assert(ret != 0, "ReleaseSemaphore() failed"); 2023 2024 thread->java_suspend_self(); 2025 } 2026 } while (threadIsSuspended); 2027 } 2028 } 2029 2030 int os::signal_lookup() { 2031 return check_pending_signals(false); 2032 } 2033 2034 int os::signal_wait() { 2035 return check_pending_signals(true); 2036 } 2037 2038 // Implicit OS exception handling 2039 2040 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 2041 JavaThread* thread = JavaThread::current(); 2042 // Save pc in thread 2043 #ifdef _M_IA64 2044 // Do not blow up if no thread info available. 2045 if (thread) { 2046 // Saving PRECISE pc (with slot information) in thread. 2047 uint64_t precise_pc = (uint64_t) exceptionInfo->ExceptionRecord->ExceptionAddress; 2048 // Convert precise PC into "Unix" format 2049 precise_pc = (precise_pc & 0xFFFFFFFFFFFFFFF0) | ((precise_pc & 0xF) >> 2); 2050 thread->set_saved_exception_pc((address)precise_pc); 2051 } 2052 // Set pc to handler 2053 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 2054 // Clear out psr.ri (= Restart Instruction) in order to continue 2055 // at the beginning of the target bundle. 2056 exceptionInfo->ContextRecord->StIPSR &= 0xFFFFF9FFFFFFFFFF; 2057 assert(((DWORD64)handler & 0xF) == 0, "Target address must point to the beginning of a bundle!"); 2058 #elif _M_AMD64 2059 // Do not blow up if no thread info available. 2060 if (thread) { 2061 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2062 } 2063 // Set pc to handler 2064 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2065 #else 2066 // Do not blow up if no thread info available. 2067 if (thread) { 2068 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2069 } 2070 // Set pc to handler 2071 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2072 #endif 2073 2074 // Continue the execution 2075 return EXCEPTION_CONTINUE_EXECUTION; 2076 } 2077 2078 2079 // Used for PostMortemDump 2080 extern "C" void safepoints(); 2081 extern "C" void find(int x); 2082 extern "C" void events(); 2083 2084 // According to Windows API documentation, an illegal instruction sequence should generate 2085 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2086 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2087 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2088 2089 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2090 2091 // From "Execution Protection in the Windows Operating System" draft 0.35 2092 // Once a system header becomes available, the "real" define should be 2093 // included or copied here. 2094 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2095 2096 // Handle NAT Bit consumption on IA64. 2097 #ifdef _M_IA64 2098 #define EXCEPTION_REG_NAT_CONSUMPTION STATUS_REG_NAT_CONSUMPTION 2099 #endif 2100 2101 // Windows Vista/2008 heap corruption check 2102 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2103 2104 #define def_excpt(val) #val, val 2105 2106 struct siglabel { 2107 char *name; 2108 int number; 2109 }; 2110 2111 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2112 // C++ compiler contain this error code. Because this is a compiler-generated 2113 // error, the code is not listed in the Win32 API header files. 2114 // The code is actually a cryptic mnemonic device, with the initial "E" 2115 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2116 // ASCII values of "msc". 2117 2118 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2119 2120 2121 struct siglabel exceptlabels[] = { 2122 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2123 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2124 def_excpt(EXCEPTION_BREAKPOINT), 2125 def_excpt(EXCEPTION_SINGLE_STEP), 2126 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2127 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2128 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2129 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2130 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2131 def_excpt(EXCEPTION_FLT_OVERFLOW), 2132 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2133 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2134 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2135 def_excpt(EXCEPTION_INT_OVERFLOW), 2136 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2137 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2138 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2139 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2140 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2141 def_excpt(EXCEPTION_STACK_OVERFLOW), 2142 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2143 def_excpt(EXCEPTION_GUARD_PAGE), 2144 def_excpt(EXCEPTION_INVALID_HANDLE), 2145 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2146 def_excpt(EXCEPTION_HEAP_CORRUPTION), 2147 #ifdef _M_IA64 2148 def_excpt(EXCEPTION_REG_NAT_CONSUMPTION), 2149 #endif 2150 NULL, 0 2151 }; 2152 2153 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2154 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2155 if (exceptlabels[i].number == exception_code) { 2156 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2157 return buf; 2158 } 2159 } 2160 2161 return NULL; 2162 } 2163 2164 //----------------------------------------------------------------------------- 2165 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2166 // handle exception caused by idiv; should only happen for -MinInt/-1 2167 // (division by zero is handled explicitly) 2168 #ifdef _M_IA64 2169 assert(0, "Fix Handle_IDiv_Exception"); 2170 #elif _M_AMD64 2171 PCONTEXT ctx = exceptionInfo->ContextRecord; 2172 address pc = (address)ctx->Rip; 2173 assert(pc[0] == 0xF7, "not an idiv opcode"); 2174 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2175 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2176 // set correct result values and continue after idiv instruction 2177 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2178 ctx->Rax = (DWORD)min_jint; // result 2179 ctx->Rdx = (DWORD)0; // remainder 2180 // Continue the execution 2181 #else 2182 PCONTEXT ctx = exceptionInfo->ContextRecord; 2183 address pc = (address)ctx->Eip; 2184 assert(pc[0] == 0xF7, "not an idiv opcode"); 2185 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2186 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2187 // set correct result values and continue after idiv instruction 2188 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2189 ctx->Eax = (DWORD)min_jint; // result 2190 ctx->Edx = (DWORD)0; // remainder 2191 // Continue the execution 2192 #endif 2193 return EXCEPTION_CONTINUE_EXECUTION; 2194 } 2195 2196 //----------------------------------------------------------------------------- 2197 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2198 PCONTEXT ctx = exceptionInfo->ContextRecord; 2199 #ifndef _WIN64 2200 // handle exception caused by native method modifying control word 2201 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2202 2203 switch (exception_code) { 2204 case EXCEPTION_FLT_DENORMAL_OPERAND: 2205 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2206 case EXCEPTION_FLT_INEXACT_RESULT: 2207 case EXCEPTION_FLT_INVALID_OPERATION: 2208 case EXCEPTION_FLT_OVERFLOW: 2209 case EXCEPTION_FLT_STACK_CHECK: 2210 case EXCEPTION_FLT_UNDERFLOW: 2211 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2212 if (fp_control_word != ctx->FloatSave.ControlWord) { 2213 // Restore FPCW and mask out FLT exceptions 2214 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2215 // Mask out pending FLT exceptions 2216 ctx->FloatSave.StatusWord &= 0xffffff00; 2217 return EXCEPTION_CONTINUE_EXECUTION; 2218 } 2219 } 2220 2221 if (prev_uef_handler != NULL) { 2222 // We didn't handle this exception so pass it to the previous 2223 // UnhandledExceptionFilter. 2224 return (prev_uef_handler)(exceptionInfo); 2225 } 2226 #else // !_WIN64 2227 /* 2228 On Windows, the mxcsr control bits are non-volatile across calls 2229 See also CR 6192333 2230 */ 2231 jint MxCsr = INITIAL_MXCSR; 2232 // we can't use StubRoutines::addr_mxcsr_std() 2233 // because in Win64 mxcsr is not saved there 2234 if (MxCsr != ctx->MxCsr) { 2235 ctx->MxCsr = MxCsr; 2236 return EXCEPTION_CONTINUE_EXECUTION; 2237 } 2238 #endif // !_WIN64 2239 2240 return EXCEPTION_CONTINUE_SEARCH; 2241 } 2242 2243 static inline void report_error(Thread* t, DWORD exception_code, 2244 address addr, void* siginfo, void* context) { 2245 VMError err(t, exception_code, addr, siginfo, context); 2246 err.report_and_die(); 2247 2248 // If UseOsErrorReporting, this will return here and save the error file 2249 // somewhere where we can find it in the minidump. 2250 } 2251 2252 //----------------------------------------------------------------------------- 2253 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2254 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2255 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2256 #ifdef _M_IA64 2257 // On Itanium, we need the "precise pc", which has the slot number coded 2258 // into the least 4 bits: 0000=slot0, 0100=slot1, 1000=slot2 (Windows format). 2259 address pc = (address) exceptionInfo->ExceptionRecord->ExceptionAddress; 2260 // Convert the pc to "Unix format", which has the slot number coded 2261 // into the least 2 bits: 0000=slot0, 0001=slot1, 0010=slot2 2262 // This is needed for IA64 because "relocation" / "implicit null check" / "poll instruction" 2263 // information is saved in the Unix format. 2264 address pc_unix_format = (address) ((((uint64_t)pc) & 0xFFFFFFFFFFFFFFF0) | ((((uint64_t)pc) & 0xF) >> 2)); 2265 #elif _M_AMD64 2266 address pc = (address) exceptionInfo->ContextRecord->Rip; 2267 #else 2268 address pc = (address) exceptionInfo->ContextRecord->Eip; 2269 #endif 2270 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2271 2272 // Handle SafeFetch32 and SafeFetchN exceptions. 2273 if (StubRoutines::is_safefetch_fault(pc)) { 2274 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2275 } 2276 2277 #ifndef _WIN64 2278 // Execution protection violation - win32 running on AMD64 only 2279 // Handled first to avoid misdiagnosis as a "normal" access violation; 2280 // This is safe to do because we have a new/unique ExceptionInformation 2281 // code for this condition. 2282 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2283 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2284 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2285 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2286 2287 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2288 int page_size = os::vm_page_size(); 2289 2290 // Make sure the pc and the faulting address are sane. 2291 // 2292 // If an instruction spans a page boundary, and the page containing 2293 // the beginning of the instruction is executable but the following 2294 // page is not, the pc and the faulting address might be slightly 2295 // different - we still want to unguard the 2nd page in this case. 2296 // 2297 // 15 bytes seems to be a (very) safe value for max instruction size. 2298 bool pc_is_near_addr = 2299 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2300 bool instr_spans_page_boundary = 2301 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2302 (intptr_t) page_size) > 0); 2303 2304 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2305 static volatile address last_addr = 2306 (address) os::non_memory_address_word(); 2307 2308 // In conservative mode, don't unguard unless the address is in the VM 2309 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2310 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2311 2312 // Set memory to RWX and retry 2313 address page_start = 2314 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2315 bool res = os::protect_memory((char*) page_start, page_size, 2316 os::MEM_PROT_RWX); 2317 2318 if (PrintMiscellaneous && Verbose) { 2319 char buf[256]; 2320 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2321 "at " INTPTR_FORMAT 2322 ", unguarding " INTPTR_FORMAT ": %s", addr, 2323 page_start, (res ? "success" : strerror(errno))); 2324 tty->print_raw_cr(buf); 2325 } 2326 2327 // Set last_addr so if we fault again at the same address, we don't 2328 // end up in an endless loop. 2329 // 2330 // There are two potential complications here. Two threads trapping 2331 // at the same address at the same time could cause one of the 2332 // threads to think it already unguarded, and abort the VM. Likely 2333 // very rare. 2334 // 2335 // The other race involves two threads alternately trapping at 2336 // different addresses and failing to unguard the page, resulting in 2337 // an endless loop. This condition is probably even more unlikely 2338 // than the first. 2339 // 2340 // Although both cases could be avoided by using locks or thread 2341 // local last_addr, these solutions are unnecessary complication: 2342 // this handler is a best-effort safety net, not a complete solution. 2343 // It is disabled by default and should only be used as a workaround 2344 // in case we missed any no-execute-unsafe VM code. 2345 2346 last_addr = addr; 2347 2348 return EXCEPTION_CONTINUE_EXECUTION; 2349 } 2350 } 2351 2352 // Last unguard failed or not unguarding 2353 tty->print_raw_cr("Execution protection violation"); 2354 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2355 exceptionInfo->ContextRecord); 2356 return EXCEPTION_CONTINUE_SEARCH; 2357 } 2358 } 2359 #endif // _WIN64 2360 2361 // Check to see if we caught the safepoint code in the 2362 // process of write protecting the memory serialization page. 2363 // It write enables the page immediately after protecting it 2364 // so just return. 2365 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2366 JavaThread* thread = (JavaThread*) t; 2367 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2368 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2369 if (os::is_memory_serialize_page(thread, addr)) { 2370 // Block current thread until the memory serialize page permission restored. 2371 os::block_on_serialize_page_trap(); 2372 return EXCEPTION_CONTINUE_EXECUTION; 2373 } 2374 } 2375 2376 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2377 VM_Version::is_cpuinfo_segv_addr(pc)) { 2378 // Verify that OS save/restore AVX registers. 2379 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2380 } 2381 2382 if (t != NULL && t->is_Java_thread()) { 2383 JavaThread* thread = (JavaThread*) t; 2384 bool in_java = thread->thread_state() == _thread_in_Java; 2385 2386 // Handle potential stack overflows up front. 2387 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2388 if (os::uses_stack_guard_pages()) { 2389 #ifdef _M_IA64 2390 // Use guard page for register stack. 2391 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2392 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2393 // Check for a register stack overflow on Itanium 2394 if (thread->addr_inside_register_stack_red_zone(addr)) { 2395 // Fatal red zone violation happens if the Java program 2396 // catches a StackOverflow error and does so much processing 2397 // that it runs beyond the unprotected yellow guard zone. As 2398 // a result, we are out of here. 2399 fatal("ERROR: Unrecoverable stack overflow happened. JVM will exit."); 2400 } else if(thread->addr_inside_register_stack(addr)) { 2401 // Disable the yellow zone which sets the state that 2402 // we've got a stack overflow problem. 2403 if (thread->stack_yellow_zone_enabled()) { 2404 thread->disable_stack_yellow_zone(); 2405 } 2406 // Give us some room to process the exception. 2407 thread->disable_register_stack_guard(); 2408 // Tracing with +Verbose. 2409 if (Verbose) { 2410 tty->print_cr("SOF Compiled Register Stack overflow at " INTPTR_FORMAT " (SIGSEGV)", pc); 2411 tty->print_cr("Register Stack access at " INTPTR_FORMAT, addr); 2412 tty->print_cr("Register Stack base " INTPTR_FORMAT, thread->register_stack_base()); 2413 tty->print_cr("Register Stack [" INTPTR_FORMAT "," INTPTR_FORMAT "]", 2414 thread->register_stack_base(), 2415 thread->register_stack_base() + thread->stack_size()); 2416 } 2417 2418 // Reguard the permanent register stack red zone just to be sure. 2419 // We saw Windows silently disabling this without telling us. 2420 thread->enable_register_stack_red_zone(); 2421 2422 return Handle_Exception(exceptionInfo, 2423 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2424 } 2425 #endif 2426 if (thread->stack_yellow_zone_enabled()) { 2427 // Yellow zone violation. The o/s has unprotected the first yellow 2428 // zone page for us. Note: must call disable_stack_yellow_zone to 2429 // update the enabled status, even if the zone contains only one page. 2430 thread->disable_stack_yellow_zone(); 2431 // If not in java code, return and hope for the best. 2432 return in_java ? Handle_Exception(exceptionInfo, 2433 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2434 : EXCEPTION_CONTINUE_EXECUTION; 2435 } else { 2436 // Fatal red zone violation. 2437 thread->disable_stack_red_zone(); 2438 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2439 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2440 exceptionInfo->ContextRecord); 2441 return EXCEPTION_CONTINUE_SEARCH; 2442 } 2443 } else if (in_java) { 2444 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2445 // a one-time-only guard page, which it has released to us. The next 2446 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2447 return Handle_Exception(exceptionInfo, 2448 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2449 } else { 2450 // Can only return and hope for the best. Further stack growth will 2451 // result in an ACCESS_VIOLATION. 2452 return EXCEPTION_CONTINUE_EXECUTION; 2453 } 2454 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2455 // Either stack overflow or null pointer exception. 2456 if (in_java) { 2457 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2458 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2459 address stack_end = thread->stack_base() - thread->stack_size(); 2460 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2461 // Stack overflow. 2462 assert(!os::uses_stack_guard_pages(), 2463 "should be caught by red zone code above."); 2464 return Handle_Exception(exceptionInfo, 2465 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2466 } 2467 // 2468 // Check for safepoint polling and implicit null 2469 // We only expect null pointers in the stubs (vtable) 2470 // the rest are checked explicitly now. 2471 // 2472 CodeBlob* cb = CodeCache::find_blob(pc); 2473 if (cb != NULL) { 2474 if (os::is_poll_address(addr)) { 2475 address stub = SharedRuntime::get_poll_stub(pc); 2476 return Handle_Exception(exceptionInfo, stub); 2477 } 2478 } 2479 { 2480 #ifdef _WIN64 2481 // 2482 // If it's a legal stack address map the entire region in 2483 // 2484 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2485 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2486 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base()) { 2487 addr = (address)((uintptr_t)addr & 2488 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2489 os::commit_memory((char *)addr, thread->stack_base() - addr, 2490 !ExecMem); 2491 return EXCEPTION_CONTINUE_EXECUTION; 2492 } 2493 else 2494 #endif 2495 { 2496 // Null pointer exception. 2497 #ifdef _M_IA64 2498 // Process implicit null checks in compiled code. Note: Implicit null checks 2499 // can happen even if "ImplicitNullChecks" is disabled, e.g. in vtable stubs. 2500 if (CodeCache::contains((void*) pc_unix_format) && !MacroAssembler::needs_explicit_null_check((intptr_t) addr)) { 2501 CodeBlob *cb = CodeCache::find_blob_unsafe(pc_unix_format); 2502 // Handle implicit null check in UEP method entry 2503 if (cb && (cb->is_frame_complete_at(pc) || 2504 (cb->is_nmethod() && ((nmethod *)cb)->inlinecache_check_contains(pc)))) { 2505 if (Verbose) { 2506 intptr_t *bundle_start = (intptr_t*) ((intptr_t) pc_unix_format & 0xFFFFFFFFFFFFFFF0); 2507 tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc_unix_format); 2508 tty->print_cr(" to addr " INTPTR_FORMAT, addr); 2509 tty->print_cr(" bundle is " INTPTR_FORMAT " (high), " INTPTR_FORMAT " (low)", 2510 *(bundle_start + 1), *bundle_start); 2511 } 2512 return Handle_Exception(exceptionInfo, 2513 SharedRuntime::continuation_for_implicit_exception(thread, pc_unix_format, SharedRuntime::IMPLICIT_NULL)); 2514 } 2515 } 2516 2517 // Implicit null checks were processed above. Hence, we should not reach 2518 // here in the usual case => die! 2519 if (Verbose) tty->print_raw_cr("Access violation, possible null pointer exception"); 2520 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2521 exceptionInfo->ContextRecord); 2522 return EXCEPTION_CONTINUE_SEARCH; 2523 2524 #else // !IA64 2525 2526 // Windows 98 reports faulting addresses incorrectly 2527 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2528 !os::win32::is_nt()) { 2529 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2530 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2531 } 2532 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2533 exceptionInfo->ContextRecord); 2534 return EXCEPTION_CONTINUE_SEARCH; 2535 #endif 2536 } 2537 } 2538 } 2539 2540 #ifdef _WIN64 2541 // Special care for fast JNI field accessors. 2542 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2543 // in and the heap gets shrunk before the field access. 2544 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2545 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2546 if (addr != (address)-1) { 2547 return Handle_Exception(exceptionInfo, addr); 2548 } 2549 } 2550 #endif 2551 2552 // Stack overflow or null pointer exception in native code. 2553 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2554 exceptionInfo->ContextRecord); 2555 return EXCEPTION_CONTINUE_SEARCH; 2556 } // /EXCEPTION_ACCESS_VIOLATION 2557 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2558 #if defined _M_IA64 2559 else if ((exception_code == EXCEPTION_ILLEGAL_INSTRUCTION || 2560 exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) { 2561 M37 handle_wrong_method_break(0, NativeJump::HANDLE_WRONG_METHOD, PR0); 2562 2563 // Compiled method patched to be non entrant? Following conditions must apply: 2564 // 1. must be first instruction in bundle 2565 // 2. must be a break instruction with appropriate code 2566 if ((((uint64_t) pc & 0x0F) == 0) && 2567 (((IPF_Bundle*) pc)->get_slot0() == handle_wrong_method_break.bits())) { 2568 return Handle_Exception(exceptionInfo, 2569 (address)SharedRuntime::get_handle_wrong_method_stub()); 2570 } 2571 } // /EXCEPTION_ILLEGAL_INSTRUCTION 2572 #endif 2573 2574 2575 if (in_java) { 2576 switch (exception_code) { 2577 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2578 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2579 2580 case EXCEPTION_INT_OVERFLOW: 2581 return Handle_IDiv_Exception(exceptionInfo); 2582 2583 } // switch 2584 } 2585 if (((thread->thread_state() == _thread_in_Java) || 2586 (thread->thread_state() == _thread_in_native)) && 2587 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2588 { 2589 LONG result=Handle_FLT_Exception(exceptionInfo); 2590 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2591 } 2592 } 2593 2594 if (exception_code != EXCEPTION_BREAKPOINT) { 2595 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2596 exceptionInfo->ContextRecord); 2597 } 2598 return EXCEPTION_CONTINUE_SEARCH; 2599 } 2600 2601 #ifndef _WIN64 2602 // Special care for fast JNI accessors. 2603 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2604 // the heap gets shrunk before the field access. 2605 // Need to install our own structured exception handler since native code may 2606 // install its own. 2607 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2608 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2609 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2610 address pc = (address) exceptionInfo->ContextRecord->Eip; 2611 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2612 if (addr != (address)-1) { 2613 return Handle_Exception(exceptionInfo, addr); 2614 } 2615 } 2616 return EXCEPTION_CONTINUE_SEARCH; 2617 } 2618 2619 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2620 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2621 __try { \ 2622 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2623 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2624 } \ 2625 return 0; \ 2626 } 2627 2628 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2629 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2630 DEFINE_FAST_GETFIELD(jchar, char, Char) 2631 DEFINE_FAST_GETFIELD(jshort, short, Short) 2632 DEFINE_FAST_GETFIELD(jint, int, Int) 2633 DEFINE_FAST_GETFIELD(jlong, long, Long) 2634 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2635 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2636 2637 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2638 switch (type) { 2639 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2640 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2641 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2642 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2643 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2644 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2645 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2646 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2647 default: ShouldNotReachHere(); 2648 } 2649 return (address)-1; 2650 } 2651 #endif 2652 2653 void os::win32::call_test_func_with_wrapper(void (*funcPtr)(void)) { 2654 // Install a win32 structured exception handler around the test 2655 // function call so the VM can generate an error dump if needed. 2656 __try { 2657 (*funcPtr)(); 2658 } __except(topLevelExceptionFilter( 2659 (_EXCEPTION_POINTERS*)_exception_info())) { 2660 // Nothing to do. 2661 } 2662 } 2663 2664 // Virtual Memory 2665 2666 int os::vm_page_size() { return os::win32::vm_page_size(); } 2667 int os::vm_allocation_granularity() { 2668 return os::win32::vm_allocation_granularity(); 2669 } 2670 2671 // Windows large page support is available on Windows 2003. In order to use 2672 // large page memory, the administrator must first assign additional privilege 2673 // to the user: 2674 // + select Control Panel -> Administrative Tools -> Local Security Policy 2675 // + select Local Policies -> User Rights Assignment 2676 // + double click "Lock pages in memory", add users and/or groups 2677 // + reboot 2678 // Note the above steps are needed for administrator as well, as administrators 2679 // by default do not have the privilege to lock pages in memory. 2680 // 2681 // Note about Windows 2003: although the API supports committing large page 2682 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2683 // scenario, I found through experiment it only uses large page if the entire 2684 // memory region is reserved and committed in a single VirtualAlloc() call. 2685 // This makes Windows large page support more or less like Solaris ISM, in 2686 // that the entire heap must be committed upfront. This probably will change 2687 // in the future, if so the code below needs to be revisited. 2688 2689 #ifndef MEM_LARGE_PAGES 2690 #define MEM_LARGE_PAGES 0x20000000 2691 #endif 2692 2693 static HANDLE _hProcess; 2694 static HANDLE _hToken; 2695 2696 // Container for NUMA node list info 2697 class NUMANodeListHolder { 2698 private: 2699 int *_numa_used_node_list; // allocated below 2700 int _numa_used_node_count; 2701 2702 void free_node_list() { 2703 if (_numa_used_node_list != NULL) { 2704 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2705 } 2706 } 2707 2708 public: 2709 NUMANodeListHolder() { 2710 _numa_used_node_count = 0; 2711 _numa_used_node_list = NULL; 2712 // do rest of initialization in build routine (after function pointers are set up) 2713 } 2714 2715 ~NUMANodeListHolder() { 2716 free_node_list(); 2717 } 2718 2719 bool build() { 2720 DWORD_PTR proc_aff_mask; 2721 DWORD_PTR sys_aff_mask; 2722 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2723 ULONG highest_node_number; 2724 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2725 free_node_list(); 2726 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2727 for (unsigned int i = 0; i <= highest_node_number; i++) { 2728 ULONGLONG proc_mask_numa_node; 2729 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2730 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2731 _numa_used_node_list[_numa_used_node_count++] = i; 2732 } 2733 } 2734 return (_numa_used_node_count > 1); 2735 } 2736 2737 int get_count() { return _numa_used_node_count; } 2738 int get_node_list_entry(int n) { 2739 // for indexes out of range, returns -1 2740 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2741 } 2742 2743 } numa_node_list_holder; 2744 2745 2746 2747 static size_t _large_page_size = 0; 2748 2749 static bool resolve_functions_for_large_page_init() { 2750 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2751 os::Advapi32Dll::AdvapiAvailable(); 2752 } 2753 2754 static bool request_lock_memory_privilege() { 2755 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2756 os::current_process_id()); 2757 2758 LUID luid; 2759 if (_hProcess != NULL && 2760 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2761 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2762 2763 TOKEN_PRIVILEGES tp; 2764 tp.PrivilegeCount = 1; 2765 tp.Privileges[0].Luid = luid; 2766 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2767 2768 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2769 // privilege. Check GetLastError() too. See MSDN document. 2770 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2771 (GetLastError() == ERROR_SUCCESS)) { 2772 return true; 2773 } 2774 } 2775 2776 return false; 2777 } 2778 2779 static void cleanup_after_large_page_init() { 2780 if (_hProcess) CloseHandle(_hProcess); 2781 _hProcess = NULL; 2782 if (_hToken) CloseHandle(_hToken); 2783 _hToken = NULL; 2784 } 2785 2786 static bool numa_interleaving_init() { 2787 bool success = false; 2788 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2789 2790 // print a warning if UseNUMAInterleaving flag is specified on command line 2791 bool warn_on_failure = use_numa_interleaving_specified; 2792 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2793 2794 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2795 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2796 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2797 2798 if (os::Kernel32Dll::NumaCallsAvailable()) { 2799 if (numa_node_list_holder.build()) { 2800 if (PrintMiscellaneous && Verbose) { 2801 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2802 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2803 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2804 } 2805 tty->print("\n"); 2806 } 2807 success = true; 2808 } else { 2809 WARN("Process does not cover multiple NUMA nodes."); 2810 } 2811 } else { 2812 WARN("NUMA Interleaving is not supported by the operating system."); 2813 } 2814 if (!success) { 2815 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2816 } 2817 return success; 2818 #undef WARN 2819 } 2820 2821 // this routine is used whenever we need to reserve a contiguous VA range 2822 // but we need to make separate VirtualAlloc calls for each piece of the range 2823 // Reasons for doing this: 2824 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2825 // * UseNUMAInterleaving requires a separate node for each piece 2826 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2827 bool should_inject_error=false) { 2828 char * p_buf; 2829 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2830 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2831 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2832 2833 // first reserve enough address space in advance since we want to be 2834 // able to break a single contiguous virtual address range into multiple 2835 // large page commits but WS2003 does not allow reserving large page space 2836 // so we just use 4K pages for reserve, this gives us a legal contiguous 2837 // address space. then we will deallocate that reservation, and re alloc 2838 // using large pages 2839 const size_t size_of_reserve = bytes + chunk_size; 2840 if (bytes > size_of_reserve) { 2841 // Overflowed. 2842 return NULL; 2843 } 2844 p_buf = (char *) VirtualAlloc(addr, 2845 size_of_reserve, // size of Reserve 2846 MEM_RESERVE, 2847 PAGE_READWRITE); 2848 // If reservation failed, return NULL 2849 if (p_buf == NULL) return NULL; 2850 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2851 os::release_memory(p_buf, bytes + chunk_size); 2852 2853 // we still need to round up to a page boundary (in case we are using large pages) 2854 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2855 // instead we handle this in the bytes_to_rq computation below 2856 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2857 2858 // now go through and allocate one chunk at a time until all bytes are 2859 // allocated 2860 size_t bytes_remaining = bytes; 2861 // An overflow of align_size_up() would have been caught above 2862 // in the calculation of size_of_reserve. 2863 char * next_alloc_addr = p_buf; 2864 HANDLE hProc = GetCurrentProcess(); 2865 2866 #ifdef ASSERT 2867 // Variable for the failure injection 2868 long ran_num = os::random(); 2869 size_t fail_after = ran_num % bytes; 2870 #endif 2871 2872 int count=0; 2873 while (bytes_remaining) { 2874 // select bytes_to_rq to get to the next chunk_size boundary 2875 2876 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2877 // Note allocate and commit 2878 char * p_new; 2879 2880 #ifdef ASSERT 2881 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2882 #else 2883 const bool inject_error_now = false; 2884 #endif 2885 2886 if (inject_error_now) { 2887 p_new = NULL; 2888 } else { 2889 if (!UseNUMAInterleaving) { 2890 p_new = (char *) VirtualAlloc(next_alloc_addr, 2891 bytes_to_rq, 2892 flags, 2893 prot); 2894 } else { 2895 // get the next node to use from the used_node_list 2896 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2897 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2898 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2899 next_alloc_addr, 2900 bytes_to_rq, 2901 flags, 2902 prot, 2903 node); 2904 } 2905 } 2906 2907 if (p_new == NULL) { 2908 // Free any allocated pages 2909 if (next_alloc_addr > p_buf) { 2910 // Some memory was committed so release it. 2911 size_t bytes_to_release = bytes - bytes_remaining; 2912 // NMT has yet to record any individual blocks, so it 2913 // need to create a dummy 'reserve' record to match 2914 // the release. 2915 MemTracker::record_virtual_memory_reserve((address)p_buf, 2916 bytes_to_release, CALLER_PC); 2917 os::release_memory(p_buf, bytes_to_release); 2918 } 2919 #ifdef ASSERT 2920 if (should_inject_error) { 2921 if (TracePageSizes && Verbose) { 2922 tty->print_cr("Reserving pages individually failed."); 2923 } 2924 } 2925 #endif 2926 return NULL; 2927 } 2928 2929 bytes_remaining -= bytes_to_rq; 2930 next_alloc_addr += bytes_to_rq; 2931 count++; 2932 } 2933 // Although the memory is allocated individually, it is returned as one. 2934 // NMT records it as one block. 2935 if ((flags & MEM_COMMIT) != 0) { 2936 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2937 } else { 2938 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2939 } 2940 2941 // made it this far, success 2942 return p_buf; 2943 } 2944 2945 2946 2947 void os::large_page_init() { 2948 if (!UseLargePages) return; 2949 2950 // print a warning if any large page related flag is specified on command line 2951 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2952 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2953 bool success = false; 2954 2955 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2956 if (resolve_functions_for_large_page_init()) { 2957 if (request_lock_memory_privilege()) { 2958 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 2959 if (s) { 2960 #if defined(IA32) || defined(AMD64) 2961 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2962 WARN("JVM cannot use large pages bigger than 4mb."); 2963 } else { 2964 #endif 2965 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2966 _large_page_size = LargePageSizeInBytes; 2967 } else { 2968 _large_page_size = s; 2969 } 2970 success = true; 2971 #if defined(IA32) || defined(AMD64) 2972 } 2973 #endif 2974 } else { 2975 WARN("Large page is not supported by the processor."); 2976 } 2977 } else { 2978 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2979 } 2980 } else { 2981 WARN("Large page is not supported by the operating system."); 2982 } 2983 #undef WARN 2984 2985 const size_t default_page_size = (size_t) vm_page_size(); 2986 if (success && _large_page_size > default_page_size) { 2987 _page_sizes[0] = _large_page_size; 2988 _page_sizes[1] = default_page_size; 2989 _page_sizes[2] = 0; 2990 } 2991 2992 cleanup_after_large_page_init(); 2993 UseLargePages = success; 2994 } 2995 2996 // On win32, one cannot release just a part of reserved memory, it's an 2997 // all or nothing deal. When we split a reservation, we must break the 2998 // reservation into two reservations. 2999 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3000 bool realloc) { 3001 if (size > 0) { 3002 release_memory(base, size); 3003 if (realloc) { 3004 reserve_memory(split, base); 3005 } 3006 if (size != split) { 3007 reserve_memory(size - split, base + split); 3008 } 3009 } 3010 } 3011 3012 // Multiple threads can race in this code but it's not possible to unmap small sections of 3013 // virtual space to get requested alignment, like posix-like os's. 3014 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3015 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 3016 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3017 "Alignment must be a multiple of allocation granularity (page size)"); 3018 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3019 3020 size_t extra_size = size + alignment; 3021 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3022 3023 char* aligned_base = NULL; 3024 3025 do { 3026 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 3027 if (extra_base == NULL) { 3028 return NULL; 3029 } 3030 // Do manual alignment 3031 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 3032 3033 os::release_memory(extra_base, extra_size); 3034 3035 aligned_base = os::reserve_memory(size, aligned_base); 3036 3037 } while (aligned_base == NULL); 3038 3039 return aligned_base; 3040 } 3041 3042 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3043 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3044 "reserve alignment"); 3045 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 3046 char* res; 3047 // note that if UseLargePages is on, all the areas that require interleaving 3048 // will go thru reserve_memory_special rather than thru here. 3049 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3050 if (!use_individual) { 3051 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3052 } else { 3053 elapsedTimer reserveTimer; 3054 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3055 // in numa interleaving, we have to allocate pages individually 3056 // (well really chunks of NUMAInterleaveGranularity size) 3057 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3058 if (res == NULL) { 3059 warning("NUMA page allocation failed"); 3060 } 3061 if (Verbose && PrintMiscellaneous) { 3062 reserveTimer.stop(); 3063 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3064 reserveTimer.milliseconds(), reserveTimer.ticks()); 3065 } 3066 } 3067 assert(res == NULL || addr == NULL || addr == res, 3068 "Unexpected address from reserve."); 3069 3070 return res; 3071 } 3072 3073 // Reserve memory at an arbitrary address, only if that area is 3074 // available (and not reserved for something else). 3075 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3076 // Windows os::reserve_memory() fails of the requested address range is 3077 // not avilable. 3078 return reserve_memory(bytes, requested_addr); 3079 } 3080 3081 size_t os::large_page_size() { 3082 return _large_page_size; 3083 } 3084 3085 bool os::can_commit_large_page_memory() { 3086 // Windows only uses large page memory when the entire region is reserved 3087 // and committed in a single VirtualAlloc() call. This may change in the 3088 // future, but with Windows 2003 it's not possible to commit on demand. 3089 return false; 3090 } 3091 3092 bool os::can_execute_large_page_memory() { 3093 return true; 3094 } 3095 3096 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, bool exec) { 3097 assert(UseLargePages, "only for large pages"); 3098 3099 if (!is_size_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3100 return NULL; // Fallback to small pages. 3101 } 3102 3103 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3104 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3105 3106 // with large pages, there are two cases where we need to use Individual Allocation 3107 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3108 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3109 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3110 if (TracePageSizes && Verbose) { 3111 tty->print_cr("Reserving large pages individually."); 3112 } 3113 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3114 if (p_buf == NULL) { 3115 // give an appropriate warning message 3116 if (UseNUMAInterleaving) { 3117 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3118 } 3119 if (UseLargePagesIndividualAllocation) { 3120 warning("Individually allocated large pages failed, " 3121 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3122 } 3123 return NULL; 3124 } 3125 3126 return p_buf; 3127 3128 } else { 3129 if (TracePageSizes && Verbose) { 3130 tty->print_cr("Reserving large pages in a single large chunk."); 3131 } 3132 // normal policy just allocate it all at once 3133 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3134 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3135 if (res != NULL) { 3136 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3137 } 3138 3139 return res; 3140 } 3141 } 3142 3143 bool os::release_memory_special(char* base, size_t bytes) { 3144 assert(base != NULL, "Sanity check"); 3145 return release_memory(base, bytes); 3146 } 3147 3148 void os::print_statistics() { 3149 } 3150 3151 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3152 int err = os::get_last_error(); 3153 char buf[256]; 3154 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3155 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3156 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3157 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3158 } 3159 3160 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3161 if (bytes == 0) { 3162 // Don't bother the OS with noops. 3163 return true; 3164 } 3165 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3166 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3167 // Don't attempt to print anything if the OS call fails. We're 3168 // probably low on resources, so the print itself may cause crashes. 3169 3170 // unless we have NUMAInterleaving enabled, the range of a commit 3171 // is always within a reserve covered by a single VirtualAlloc 3172 // in that case we can just do a single commit for the requested size 3173 if (!UseNUMAInterleaving) { 3174 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3175 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3176 return false; 3177 } 3178 if (exec) { 3179 DWORD oldprot; 3180 // Windows doc says to use VirtualProtect to get execute permissions 3181 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3182 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3183 return false; 3184 } 3185 } 3186 return true; 3187 } else { 3188 3189 // when NUMAInterleaving is enabled, the commit might cover a range that 3190 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3191 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3192 // returns represents the number of bytes that can be committed in one step. 3193 size_t bytes_remaining = bytes; 3194 char * next_alloc_addr = addr; 3195 while (bytes_remaining > 0) { 3196 MEMORY_BASIC_INFORMATION alloc_info; 3197 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3198 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3199 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3200 PAGE_READWRITE) == NULL) { 3201 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3202 exec);) 3203 return false; 3204 } 3205 if (exec) { 3206 DWORD oldprot; 3207 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3208 PAGE_EXECUTE_READWRITE, &oldprot)) { 3209 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3210 exec);) 3211 return false; 3212 } 3213 } 3214 bytes_remaining -= bytes_to_rq; 3215 next_alloc_addr += bytes_to_rq; 3216 } 3217 } 3218 // if we made it this far, return true 3219 return true; 3220 } 3221 3222 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3223 bool exec) { 3224 // alignment_hint is ignored on this OS 3225 return pd_commit_memory(addr, size, exec); 3226 } 3227 3228 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3229 const char* mesg) { 3230 assert(mesg != NULL, "mesg must be specified"); 3231 if (!pd_commit_memory(addr, size, exec)) { 3232 warn_fail_commit_memory(addr, size, exec); 3233 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 3234 } 3235 } 3236 3237 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3238 size_t alignment_hint, bool exec, 3239 const char* mesg) { 3240 // alignment_hint is ignored on this OS 3241 pd_commit_memory_or_exit(addr, size, exec, mesg); 3242 } 3243 3244 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3245 if (bytes == 0) { 3246 // Don't bother the OS with noops. 3247 return true; 3248 } 3249 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3250 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3251 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3252 } 3253 3254 bool os::pd_release_memory(char* addr, size_t bytes) { 3255 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3256 } 3257 3258 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3259 return os::commit_memory(addr, size, !ExecMem); 3260 } 3261 3262 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3263 return os::uncommit_memory(addr, size); 3264 } 3265 3266 // Set protections specified 3267 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3268 bool is_committed) { 3269 unsigned int p = 0; 3270 switch (prot) { 3271 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3272 case MEM_PROT_READ: p = PAGE_READONLY; break; 3273 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3274 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3275 default: 3276 ShouldNotReachHere(); 3277 } 3278 3279 DWORD old_status; 3280 3281 // Strange enough, but on Win32 one can change protection only for committed 3282 // memory, not a big deal anyway, as bytes less or equal than 64K 3283 if (!is_committed) { 3284 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3285 "cannot commit protection page"); 3286 } 3287 // One cannot use os::guard_memory() here, as on Win32 guard page 3288 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3289 // 3290 // Pages in the region become guard pages. Any attempt to access a guard page 3291 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3292 // the guard page status. Guard pages thus act as a one-time access alarm. 3293 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3294 } 3295 3296 bool os::guard_memory(char* addr, size_t bytes) { 3297 DWORD old_status; 3298 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3299 } 3300 3301 bool os::unguard_memory(char* addr, size_t bytes) { 3302 DWORD old_status; 3303 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3304 } 3305 3306 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3307 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3308 void os::numa_make_global(char *addr, size_t bytes) { } 3309 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3310 bool os::numa_topology_changed() { return false; } 3311 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3312 int os::numa_get_group_id() { return 0; } 3313 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3314 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3315 // Provide an answer for UMA systems 3316 ids[0] = 0; 3317 return 1; 3318 } else { 3319 // check for size bigger than actual groups_num 3320 size = MIN2(size, numa_get_groups_num()); 3321 for (int i = 0; i < (int)size; i++) { 3322 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3323 } 3324 return size; 3325 } 3326 } 3327 3328 bool os::get_page_info(char *start, page_info* info) { 3329 return false; 3330 } 3331 3332 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3333 return end; 3334 } 3335 3336 char* os::non_memory_address_word() { 3337 // Must never look like an address returned by reserve_memory, 3338 // even in its subfields (as defined by the CPU immediate fields, 3339 // if the CPU splits constants across multiple instructions). 3340 return (char*)-1; 3341 } 3342 3343 #define MAX_ERROR_COUNT 100 3344 #define SYS_THREAD_ERROR 0xffffffffUL 3345 3346 void os::pd_start_thread(Thread* thread) { 3347 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3348 // Returns previous suspend state: 3349 // 0: Thread was not suspended 3350 // 1: Thread is running now 3351 // >1: Thread is still suspended. 3352 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3353 } 3354 3355 class HighResolutionInterval : public CHeapObj<mtThread> { 3356 // The default timer resolution seems to be 10 milliseconds. 3357 // (Where is this written down?) 3358 // If someone wants to sleep for only a fraction of the default, 3359 // then we set the timer resolution down to 1 millisecond for 3360 // the duration of their interval. 3361 // We carefully set the resolution back, since otherwise we 3362 // seem to incur an overhead (3%?) that we don't need. 3363 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3364 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3365 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3366 // timeBeginPeriod() if the relative error exceeded some threshold. 3367 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3368 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3369 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3370 // resolution timers running. 3371 private: 3372 jlong resolution; 3373 public: 3374 HighResolutionInterval(jlong ms) { 3375 resolution = ms % 10L; 3376 if (resolution != 0) { 3377 MMRESULT result = timeBeginPeriod(1L); 3378 } 3379 } 3380 ~HighResolutionInterval() { 3381 if (resolution != 0) { 3382 MMRESULT result = timeEndPeriod(1L); 3383 } 3384 resolution = 0L; 3385 } 3386 }; 3387 3388 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3389 jlong limit = (jlong) MAXDWORD; 3390 3391 while (ms > limit) { 3392 int res; 3393 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3394 return res; 3395 ms -= limit; 3396 } 3397 3398 assert(thread == Thread::current(), "thread consistency check"); 3399 OSThread* osthread = thread->osthread(); 3400 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3401 int result; 3402 if (interruptable) { 3403 assert(thread->is_Java_thread(), "must be java thread"); 3404 JavaThread *jt = (JavaThread *) thread; 3405 ThreadBlockInVM tbivm(jt); 3406 3407 jt->set_suspend_equivalent(); 3408 // cleared by handle_special_suspend_equivalent_condition() or 3409 // java_suspend_self() via check_and_wait_while_suspended() 3410 3411 HANDLE events[1]; 3412 events[0] = osthread->interrupt_event(); 3413 HighResolutionInterval *phri=NULL; 3414 if (!ForceTimeHighResolution) 3415 phri = new HighResolutionInterval(ms); 3416 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3417 result = OS_TIMEOUT; 3418 } else { 3419 ResetEvent(osthread->interrupt_event()); 3420 osthread->set_interrupted(false); 3421 result = OS_INTRPT; 3422 } 3423 delete phri; //if it is NULL, harmless 3424 3425 // were we externally suspended while we were waiting? 3426 jt->check_and_wait_while_suspended(); 3427 } else { 3428 assert(!thread->is_Java_thread(), "must not be java thread"); 3429 Sleep((long) ms); 3430 result = OS_TIMEOUT; 3431 } 3432 return result; 3433 } 3434 3435 // 3436 // Short sleep, direct OS call. 3437 // 3438 // ms = 0, means allow others (if any) to run. 3439 // 3440 void os::naked_short_sleep(jlong ms) { 3441 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3442 Sleep(ms); 3443 } 3444 3445 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3446 void os::infinite_sleep() { 3447 while (true) { // sleep forever ... 3448 Sleep(100000); // ... 100 seconds at a time 3449 } 3450 } 3451 3452 typedef BOOL (WINAPI * STTSignature)(void); 3453 3454 void os::naked_yield() { 3455 // Use either SwitchToThread() or Sleep(0) 3456 // Consider passing back the return value from SwitchToThread(). 3457 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3458 SwitchToThread(); 3459 } else { 3460 Sleep(0); 3461 } 3462 } 3463 3464 // Win32 only gives you access to seven real priorities at a time, 3465 // so we compress Java's ten down to seven. It would be better 3466 // if we dynamically adjusted relative priorities. 3467 3468 int os::java_to_os_priority[CriticalPriority + 1] = { 3469 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3470 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3471 THREAD_PRIORITY_LOWEST, // 2 3472 THREAD_PRIORITY_BELOW_NORMAL, // 3 3473 THREAD_PRIORITY_BELOW_NORMAL, // 4 3474 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3475 THREAD_PRIORITY_NORMAL, // 6 3476 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3477 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3478 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3479 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3480 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3481 }; 3482 3483 int prio_policy1[CriticalPriority + 1] = { 3484 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3485 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3486 THREAD_PRIORITY_LOWEST, // 2 3487 THREAD_PRIORITY_BELOW_NORMAL, // 3 3488 THREAD_PRIORITY_BELOW_NORMAL, // 4 3489 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3490 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3491 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3492 THREAD_PRIORITY_HIGHEST, // 8 3493 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3494 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3495 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3496 }; 3497 3498 static int prio_init() { 3499 // If ThreadPriorityPolicy is 1, switch tables 3500 if (ThreadPriorityPolicy == 1) { 3501 int i; 3502 for (i = 0; i < CriticalPriority + 1; i++) { 3503 os::java_to_os_priority[i] = prio_policy1[i]; 3504 } 3505 } 3506 if (UseCriticalJavaThreadPriority) { 3507 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3508 } 3509 return 0; 3510 } 3511 3512 OSReturn os::set_native_priority(Thread* thread, int priority) { 3513 if (!UseThreadPriorities) return OS_OK; 3514 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3515 return ret ? OS_OK : OS_ERR; 3516 } 3517 3518 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3519 if (!UseThreadPriorities) { 3520 *priority_ptr = java_to_os_priority[NormPriority]; 3521 return OS_OK; 3522 } 3523 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3524 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3525 assert(false, "GetThreadPriority failed"); 3526 return OS_ERR; 3527 } 3528 *priority_ptr = os_prio; 3529 return OS_OK; 3530 } 3531 3532 3533 // Hint to the underlying OS that a task switch would not be good. 3534 // Void return because it's a hint and can fail. 3535 void os::hint_no_preempt() {} 3536 3537 void os::interrupt(Thread* thread) { 3538 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3539 "possibility of dangling Thread pointer"); 3540 3541 OSThread* osthread = thread->osthread(); 3542 osthread->set_interrupted(true); 3543 // More than one thread can get here with the same value of osthread, 3544 // resulting in multiple notifications. We do, however, want the store 3545 // to interrupted() to be visible to other threads before we post 3546 // the interrupt event. 3547 OrderAccess::release(); 3548 SetEvent(osthread->interrupt_event()); 3549 // For JSR166: unpark after setting status 3550 if (thread->is_Java_thread()) 3551 ((JavaThread*)thread)->parker()->unpark(); 3552 3553 ParkEvent * ev = thread->_ParkEvent; 3554 if (ev != NULL) ev->unpark(); 3555 3556 } 3557 3558 3559 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3560 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3561 "possibility of dangling Thread pointer"); 3562 3563 OSThread* osthread = thread->osthread(); 3564 // There is no synchronization between the setting of the interrupt 3565 // and it being cleared here. It is critical - see 6535709 - that 3566 // we only clear the interrupt state, and reset the interrupt event, 3567 // if we are going to report that we were indeed interrupted - else 3568 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3569 // depending on the timing. By checking thread interrupt event to see 3570 // if the thread gets real interrupt thus prevent spurious wakeup. 3571 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3572 if (interrupted && clear_interrupted) { 3573 osthread->set_interrupted(false); 3574 ResetEvent(osthread->interrupt_event()); 3575 } // Otherwise leave the interrupted state alone 3576 3577 return interrupted; 3578 } 3579 3580 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3581 ExtendedPC os::get_thread_pc(Thread* thread) { 3582 CONTEXT context; 3583 context.ContextFlags = CONTEXT_CONTROL; 3584 HANDLE handle = thread->osthread()->thread_handle(); 3585 #ifdef _M_IA64 3586 assert(0, "Fix get_thread_pc"); 3587 return ExtendedPC(NULL); 3588 #else 3589 if (GetThreadContext(handle, &context)) { 3590 #ifdef _M_AMD64 3591 return ExtendedPC((address) context.Rip); 3592 #else 3593 return ExtendedPC((address) context.Eip); 3594 #endif 3595 } else { 3596 return ExtendedPC(NULL); 3597 } 3598 #endif 3599 } 3600 3601 // GetCurrentThreadId() returns DWORD 3602 intx os::current_thread_id() { return GetCurrentThreadId(); } 3603 3604 static int _initial_pid = 0; 3605 3606 int os::current_process_id() 3607 { 3608 return (_initial_pid ? _initial_pid : _getpid()); 3609 } 3610 3611 int os::win32::_vm_page_size = 0; 3612 int os::win32::_vm_allocation_granularity = 0; 3613 int os::win32::_processor_type = 0; 3614 // Processor level is not available on non-NT systems, use vm_version instead 3615 int os::win32::_processor_level = 0; 3616 julong os::win32::_physical_memory = 0; 3617 size_t os::win32::_default_stack_size = 0; 3618 3619 intx os::win32::_os_thread_limit = 0; 3620 volatile intx os::win32::_os_thread_count = 0; 3621 3622 bool os::win32::_is_nt = false; 3623 bool os::win32::_is_windows_2003 = false; 3624 bool os::win32::_is_windows_server = false; 3625 3626 bool os::win32::_has_performance_count = 0; 3627 3628 void os::win32::initialize_system_info() { 3629 SYSTEM_INFO si; 3630 GetSystemInfo(&si); 3631 _vm_page_size = si.dwPageSize; 3632 _vm_allocation_granularity = si.dwAllocationGranularity; 3633 _processor_type = si.dwProcessorType; 3634 _processor_level = si.wProcessorLevel; 3635 set_processor_count(si.dwNumberOfProcessors); 3636 3637 MEMORYSTATUSEX ms; 3638 ms.dwLength = sizeof(ms); 3639 3640 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3641 // dwMemoryLoad (% of memory in use) 3642 GlobalMemoryStatusEx(&ms); 3643 _physical_memory = ms.ullTotalPhys; 3644 3645 OSVERSIONINFOEX oi; 3646 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3647 GetVersionEx((OSVERSIONINFO*)&oi); 3648 switch (oi.dwPlatformId) { 3649 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3650 case VER_PLATFORM_WIN32_NT: 3651 _is_nt = true; 3652 { 3653 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3654 if (os_vers == 5002) { 3655 _is_windows_2003 = true; 3656 } 3657 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3658 oi.wProductType == VER_NT_SERVER) { 3659 _is_windows_server = true; 3660 } 3661 } 3662 break; 3663 default: fatal("Unknown platform"); 3664 } 3665 3666 _default_stack_size = os::current_stack_size(); 3667 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3668 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3669 "stack size not a multiple of page size"); 3670 3671 initialize_performance_counter(); 3672 3673 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3674 // known to deadlock the system, if the VM issues to thread operations with 3675 // a too high frequency, e.g., such as changing the priorities. 3676 // The 6000 seems to work well - no deadlocks has been notices on the test 3677 // programs that we have seen experience this problem. 3678 if (!os::win32::is_nt()) { 3679 StarvationMonitorInterval = 6000; 3680 } 3681 } 3682 3683 3684 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3685 char path[MAX_PATH]; 3686 DWORD size; 3687 DWORD pathLen = (DWORD)sizeof(path); 3688 HINSTANCE result = NULL; 3689 3690 // only allow library name without path component 3691 assert(strchr(name, '\\') == NULL, "path not allowed"); 3692 assert(strchr(name, ':') == NULL, "path not allowed"); 3693 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3694 jio_snprintf(ebuf, ebuflen, 3695 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3696 return NULL; 3697 } 3698 3699 // search system directory 3700 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3701 strcat(path, "\\"); 3702 strcat(path, name); 3703 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3704 return result; 3705 } 3706 } 3707 3708 // try Windows directory 3709 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3710 strcat(path, "\\"); 3711 strcat(path, name); 3712 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3713 return result; 3714 } 3715 } 3716 3717 jio_snprintf(ebuf, ebuflen, 3718 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3719 return NULL; 3720 } 3721 3722 void os::win32::setmode_streams() { 3723 _setmode(_fileno(stdin), _O_BINARY); 3724 _setmode(_fileno(stdout), _O_BINARY); 3725 _setmode(_fileno(stderr), _O_BINARY); 3726 } 3727 3728 3729 bool os::is_debugger_attached() { 3730 return IsDebuggerPresent() ? true : false; 3731 } 3732 3733 3734 void os::wait_for_keypress_at_exit(void) { 3735 if (PauseAtExit) { 3736 fprintf(stderr, "Press any key to continue...\n"); 3737 fgetc(stdin); 3738 } 3739 } 3740 3741 3742 int os::message_box(const char* title, const char* message) { 3743 int result = MessageBox(NULL, message, title, 3744 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3745 return result == IDYES; 3746 } 3747 3748 int os::allocate_thread_local_storage() { 3749 return TlsAlloc(); 3750 } 3751 3752 3753 void os::free_thread_local_storage(int index) { 3754 TlsFree(index); 3755 } 3756 3757 3758 void os::thread_local_storage_at_put(int index, void* value) { 3759 TlsSetValue(index, value); 3760 assert(thread_local_storage_at(index) == value, "Just checking"); 3761 } 3762 3763 3764 void* os::thread_local_storage_at(int index) { 3765 return TlsGetValue(index); 3766 } 3767 3768 3769 #ifndef PRODUCT 3770 #ifndef _WIN64 3771 // Helpers to check whether NX protection is enabled 3772 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3773 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3774 pex->ExceptionRecord->NumberParameters > 0 && 3775 pex->ExceptionRecord->ExceptionInformation[0] == 3776 EXCEPTION_INFO_EXEC_VIOLATION) { 3777 return EXCEPTION_EXECUTE_HANDLER; 3778 } 3779 return EXCEPTION_CONTINUE_SEARCH; 3780 } 3781 3782 void nx_check_protection() { 3783 // If NX is enabled we'll get an exception calling into code on the stack 3784 char code[] = { (char)0xC3 }; // ret 3785 void *code_ptr = (void *)code; 3786 __try { 3787 __asm call code_ptr 3788 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3789 tty->print_raw_cr("NX protection detected."); 3790 } 3791 } 3792 #endif // _WIN64 3793 #endif // PRODUCT 3794 3795 // this is called _before_ the global arguments have been parsed 3796 void os::init(void) { 3797 _initial_pid = _getpid(); 3798 3799 init_random(1234567); 3800 3801 win32::initialize_system_info(); 3802 win32::setmode_streams(); 3803 init_page_sizes((size_t) win32::vm_page_size()); 3804 3805 // This may be overridden later when argument processing is done. 3806 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3807 os::win32::is_windows_2003()); 3808 3809 // Initialize main_process and main_thread 3810 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3811 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3812 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3813 fatal("DuplicateHandle failed\n"); 3814 } 3815 main_thread_id = (int) GetCurrentThreadId(); 3816 } 3817 3818 // To install functions for atexit processing 3819 extern "C" { 3820 static void perfMemory_exit_helper() { 3821 perfMemory_exit(); 3822 } 3823 } 3824 3825 static jint initSock(); 3826 3827 // this is called _after_ the global arguments have been parsed 3828 jint os::init_2(void) { 3829 // Allocate a single page and mark it as readable for safepoint polling 3830 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3831 guarantee(polling_page != NULL, "Reserve Failed for polling page"); 3832 3833 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3834 guarantee(return_page != NULL, "Commit Failed for polling page"); 3835 3836 os::set_polling_page(polling_page); 3837 3838 #ifndef PRODUCT 3839 if (Verbose && PrintMiscellaneous) 3840 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3841 #endif 3842 3843 if (!UseMembar) { 3844 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3845 guarantee(mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3846 3847 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3848 guarantee(return_page != NULL, "Commit Failed for memory serialize page"); 3849 3850 os::set_memory_serialize_page(mem_serialize_page); 3851 3852 #ifndef PRODUCT 3853 if (Verbose && PrintMiscellaneous) 3854 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3855 #endif 3856 } 3857 3858 // Setup Windows Exceptions 3859 3860 // for debugging float code generation bugs 3861 if (ForceFloatExceptions) { 3862 #ifndef _WIN64 3863 static long fp_control_word = 0; 3864 __asm { fstcw fp_control_word } 3865 // see Intel PPro Manual, Vol. 2, p 7-16 3866 const long precision = 0x20; 3867 const long underflow = 0x10; 3868 const long overflow = 0x08; 3869 const long zero_div = 0x04; 3870 const long denorm = 0x02; 3871 const long invalid = 0x01; 3872 fp_control_word |= invalid; 3873 __asm { fldcw fp_control_word } 3874 #endif 3875 } 3876 3877 // If stack_commit_size is 0, windows will reserve the default size, 3878 // but only commit a small portion of it. 3879 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 3880 size_t default_reserve_size = os::win32::default_stack_size(); 3881 size_t actual_reserve_size = stack_commit_size; 3882 if (stack_commit_size < default_reserve_size) { 3883 // If stack_commit_size == 0, we want this too 3884 actual_reserve_size = default_reserve_size; 3885 } 3886 3887 // Check minimum allowable stack size for thread creation and to initialize 3888 // the java system classes, including StackOverflowError - depends on page 3889 // size. Add a page for compiler2 recursion in main thread. 3890 // Add in 2*BytesPerWord times page size to account for VM stack during 3891 // class initialization depending on 32 or 64 bit VM. 3892 size_t min_stack_allowed = 3893 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 3894 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 3895 if (actual_reserve_size < min_stack_allowed) { 3896 tty->print_cr("\nThe stack size specified is too small, " 3897 "Specify at least %dk", 3898 min_stack_allowed / K); 3899 return JNI_ERR; 3900 } 3901 3902 JavaThread::set_stack_size_at_create(stack_commit_size); 3903 3904 // Calculate theoretical max. size of Threads to guard gainst artifical 3905 // out-of-memory situations, where all available address-space has been 3906 // reserved by thread stacks. 3907 assert(actual_reserve_size != 0, "Must have a stack"); 3908 3909 // Calculate the thread limit when we should start doing Virtual Memory 3910 // banging. Currently when the threads will have used all but 200Mb of space. 3911 // 3912 // TODO: consider performing a similar calculation for commit size instead 3913 // as reserve size, since on a 64-bit platform we'll run into that more 3914 // often than running out of virtual memory space. We can use the 3915 // lower value of the two calculations as the os_thread_limit. 3916 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 3917 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 3918 3919 // at exit methods are called in the reverse order of their registration. 3920 // there is no limit to the number of functions registered. atexit does 3921 // not set errno. 3922 3923 if (PerfAllowAtExitRegistration) { 3924 // only register atexit functions if PerfAllowAtExitRegistration is set. 3925 // atexit functions can be delayed until process exit time, which 3926 // can be problematic for embedded VM situations. Embedded VMs should 3927 // call DestroyJavaVM() to assure that VM resources are released. 3928 3929 // note: perfMemory_exit_helper atexit function may be removed in 3930 // the future if the appropriate cleanup code can be added to the 3931 // VM_Exit VMOperation's doit method. 3932 if (atexit(perfMemory_exit_helper) != 0) { 3933 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3934 } 3935 } 3936 3937 #ifndef _WIN64 3938 // Print something if NX is enabled (win32 on AMD64) 3939 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 3940 #endif 3941 3942 // initialize thread priority policy 3943 prio_init(); 3944 3945 if (UseNUMA && !ForceNUMA) { 3946 UseNUMA = false; // We don't fully support this yet 3947 } 3948 3949 if (UseNUMAInterleaving) { 3950 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 3951 bool success = numa_interleaving_init(); 3952 if (!success) UseNUMAInterleaving = false; 3953 } 3954 3955 if (initSock() != JNI_OK) { 3956 return JNI_ERR; 3957 } 3958 3959 return JNI_OK; 3960 } 3961 3962 void os::init_3(void) { 3963 return; 3964 } 3965 3966 // Mark the polling page as unreadable 3967 void os::make_polling_page_unreadable(void) { 3968 DWORD old_status; 3969 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status)) 3970 fatal("Could not disable polling page"); 3971 }; 3972 3973 // Mark the polling page as readable 3974 void os::make_polling_page_readable(void) { 3975 DWORD old_status; 3976 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status)) 3977 fatal("Could not enable polling page"); 3978 }; 3979 3980 3981 int os::stat(const char *path, struct stat *sbuf) { 3982 char pathbuf[MAX_PATH]; 3983 if (strlen(path) > MAX_PATH - 1) { 3984 errno = ENAMETOOLONG; 3985 return -1; 3986 } 3987 os::native_path(strcpy(pathbuf, path)); 3988 int ret = ::stat(pathbuf, sbuf); 3989 if (sbuf != NULL && UseUTCFileTimestamp) { 3990 // Fix for 6539723. st_mtime returned from stat() is dependent on 3991 // the system timezone and so can return different values for the 3992 // same file if/when daylight savings time changes. This adjustment 3993 // makes sure the same timestamp is returned regardless of the TZ. 3994 // 3995 // See: 3996 // http://msdn.microsoft.com/library/ 3997 // default.asp?url=/library/en-us/sysinfo/base/ 3998 // time_zone_information_str.asp 3999 // and 4000 // http://msdn.microsoft.com/library/default.asp?url= 4001 // /library/en-us/sysinfo/base/settimezoneinformation.asp 4002 // 4003 // NOTE: there is a insidious bug here: If the timezone is changed 4004 // after the call to stat() but before 'GetTimeZoneInformation()', then 4005 // the adjustment we do here will be wrong and we'll return the wrong 4006 // value (which will likely end up creating an invalid class data 4007 // archive). Absent a better API for this, or some time zone locking 4008 // mechanism, we'll have to live with this risk. 4009 TIME_ZONE_INFORMATION tz; 4010 DWORD tzid = GetTimeZoneInformation(&tz); 4011 int daylightBias = 4012 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 4013 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 4014 } 4015 return ret; 4016 } 4017 4018 4019 #define FT2INT64(ft) \ 4020 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4021 4022 4023 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4024 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4025 // of a thread. 4026 // 4027 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4028 // the fast estimate available on the platform. 4029 4030 // current_thread_cpu_time() is not optimized for Windows yet 4031 jlong os::current_thread_cpu_time() { 4032 // return user + sys since the cost is the same 4033 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4034 } 4035 4036 jlong os::thread_cpu_time(Thread* thread) { 4037 // consistent with what current_thread_cpu_time() returns. 4038 return os::thread_cpu_time(thread, true /* user+sys */); 4039 } 4040 4041 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4042 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4043 } 4044 4045 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4046 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4047 // If this function changes, os::is_thread_cpu_time_supported() should too 4048 if (os::win32::is_nt()) { 4049 FILETIME CreationTime; 4050 FILETIME ExitTime; 4051 FILETIME KernelTime; 4052 FILETIME UserTime; 4053 4054 if (GetThreadTimes(thread->osthread()->thread_handle(), 4055 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4056 return -1; 4057 else 4058 if (user_sys_cpu_time) { 4059 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4060 } else { 4061 return FT2INT64(UserTime) * 100; 4062 } 4063 } else { 4064 return (jlong) timeGetTime() * 1000000; 4065 } 4066 } 4067 4068 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4069 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4070 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4071 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4072 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4073 } 4074 4075 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4076 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4077 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4078 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4079 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4080 } 4081 4082 bool os::is_thread_cpu_time_supported() { 4083 // see os::thread_cpu_time 4084 if (os::win32::is_nt()) { 4085 FILETIME CreationTime; 4086 FILETIME ExitTime; 4087 FILETIME KernelTime; 4088 FILETIME UserTime; 4089 4090 if (GetThreadTimes(GetCurrentThread(), 4091 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 4092 return false; 4093 else 4094 return true; 4095 } else { 4096 return false; 4097 } 4098 } 4099 4100 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4101 // It does have primitives (PDH API) to get CPU usage and run queue length. 4102 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4103 // If we wanted to implement loadavg on Windows, we have a few options: 4104 // 4105 // a) Query CPU usage and run queue length and "fake" an answer by 4106 // returning the CPU usage if it's under 100%, and the run queue 4107 // length otherwise. It turns out that querying is pretty slow 4108 // on Windows, on the order of 200 microseconds on a fast machine. 4109 // Note that on the Windows the CPU usage value is the % usage 4110 // since the last time the API was called (and the first call 4111 // returns 100%), so we'd have to deal with that as well. 4112 // 4113 // b) Sample the "fake" answer using a sampling thread and store 4114 // the answer in a global variable. The call to loadavg would 4115 // just return the value of the global, avoiding the slow query. 4116 // 4117 // c) Sample a better answer using exponential decay to smooth the 4118 // value. This is basically the algorithm used by UNIX kernels. 4119 // 4120 // Note that sampling thread starvation could affect both (b) and (c). 4121 int os::loadavg(double loadavg[], int nelem) { 4122 return -1; 4123 } 4124 4125 4126 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4127 bool os::dont_yield() { 4128 return DontYieldALot; 4129 } 4130 4131 // This method is a slightly reworked copy of JDK's sysOpen 4132 // from src/windows/hpi/src/sys_api_md.c 4133 4134 int os::open(const char *path, int oflag, int mode) { 4135 char pathbuf[MAX_PATH]; 4136 4137 if (strlen(path) > MAX_PATH - 1) { 4138 errno = ENAMETOOLONG; 4139 return -1; 4140 } 4141 os::native_path(strcpy(pathbuf, path)); 4142 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4143 } 4144 4145 FILE* os::open(int fd, const char* mode) { 4146 return ::_fdopen(fd, mode); 4147 } 4148 4149 // Is a (classpath) directory empty? 4150 bool os::dir_is_empty(const char* path) { 4151 WIN32_FIND_DATA fd; 4152 HANDLE f = FindFirstFile(path, &fd); 4153 if (f == INVALID_HANDLE_VALUE) { 4154 return true; 4155 } 4156 FindClose(f); 4157 return false; 4158 } 4159 4160 // create binary file, rewriting existing file if required 4161 int os::create_binary_file(const char* path, bool rewrite_existing) { 4162 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4163 if (!rewrite_existing) { 4164 oflags |= _O_EXCL; 4165 } 4166 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4167 } 4168 4169 // return current position of file pointer 4170 jlong os::current_file_offset(int fd) { 4171 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4172 } 4173 4174 // move file pointer to the specified offset 4175 jlong os::seek_to_file_offset(int fd, jlong offset) { 4176 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4177 } 4178 4179 4180 jlong os::lseek(int fd, jlong offset, int whence) { 4181 return (jlong) ::_lseeki64(fd, offset, whence); 4182 } 4183 4184 // This method is a slightly reworked copy of JDK's sysNativePath 4185 // from src/windows/hpi/src/path_md.c 4186 4187 /* Convert a pathname to native format. On win32, this involves forcing all 4188 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4189 sometimes rejects '/') and removing redundant separators. The input path is 4190 assumed to have been converted into the character encoding used by the local 4191 system. Because this might be a double-byte encoding, care is taken to 4192 treat double-byte lead characters correctly. 4193 4194 This procedure modifies the given path in place, as the result is never 4195 longer than the original. There is no error return; this operation always 4196 succeeds. */ 4197 char * os::native_path(char *path) { 4198 char *src = path, *dst = path, *end = path; 4199 char *colon = NULL; /* If a drive specifier is found, this will 4200 point to the colon following the drive 4201 letter */ 4202 4203 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4204 assert(((!::IsDBCSLeadByte('/')) 4205 && (!::IsDBCSLeadByte('\\')) 4206 && (!::IsDBCSLeadByte(':'))), 4207 "Illegal lead byte"); 4208 4209 /* Check for leading separators */ 4210 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4211 while (isfilesep(*src)) { 4212 src++; 4213 } 4214 4215 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4216 /* Remove leading separators if followed by drive specifier. This 4217 hack is necessary to support file URLs containing drive 4218 specifiers (e.g., "file://c:/path"). As a side effect, 4219 "/c:/path" can be used as an alternative to "c:/path". */ 4220 *dst++ = *src++; 4221 colon = dst; 4222 *dst++ = ':'; 4223 src++; 4224 } else { 4225 src = path; 4226 if (isfilesep(src[0]) && isfilesep(src[1])) { 4227 /* UNC pathname: Retain first separator; leave src pointed at 4228 second separator so that further separators will be collapsed 4229 into the second separator. The result will be a pathname 4230 beginning with "\\\\" followed (most likely) by a host name. */ 4231 src = dst = path + 1; 4232 path[0] = '\\'; /* Force first separator to '\\' */ 4233 } 4234 } 4235 4236 end = dst; 4237 4238 /* Remove redundant separators from remainder of path, forcing all 4239 separators to be '\\' rather than '/'. Also, single byte space 4240 characters are removed from the end of the path because those 4241 are not legal ending characters on this operating system. 4242 */ 4243 while (*src != '\0') { 4244 if (isfilesep(*src)) { 4245 *dst++ = '\\'; src++; 4246 while (isfilesep(*src)) src++; 4247 if (*src == '\0') { 4248 /* Check for trailing separator */ 4249 end = dst; 4250 if (colon == dst - 2) break; /* "z:\\" */ 4251 if (dst == path + 1) break; /* "\\" */ 4252 if (dst == path + 2 && isfilesep(path[0])) { 4253 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4254 beginning of a UNC pathname. Even though it is not, by 4255 itself, a valid UNC pathname, we leave it as is in order 4256 to be consistent with the path canonicalizer as well 4257 as the win32 APIs, which treat this case as an invalid 4258 UNC pathname rather than as an alias for the root 4259 directory of the current drive. */ 4260 break; 4261 } 4262 end = --dst; /* Path does not denote a root directory, so 4263 remove trailing separator */ 4264 break; 4265 } 4266 end = dst; 4267 } else { 4268 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4269 *dst++ = *src++; 4270 if (*src) *dst++ = *src++; 4271 end = dst; 4272 } else { /* Copy a single-byte character */ 4273 char c = *src++; 4274 *dst++ = c; 4275 /* Space is not a legal ending character */ 4276 if (c != ' ') end = dst; 4277 } 4278 } 4279 } 4280 4281 *end = '\0'; 4282 4283 /* For "z:", add "." to work around a bug in the C runtime library */ 4284 if (colon == dst - 1) { 4285 path[2] = '.'; 4286 path[3] = '\0'; 4287 } 4288 4289 return path; 4290 } 4291 4292 // This code is a copy of JDK's sysSetLength 4293 // from src/windows/hpi/src/sys_api_md.c 4294 4295 int os::ftruncate(int fd, jlong length) { 4296 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4297 long high = (long)(length >> 32); 4298 DWORD ret; 4299 4300 if (h == (HANDLE)(-1)) { 4301 return -1; 4302 } 4303 4304 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4305 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4306 return -1; 4307 } 4308 4309 if (::SetEndOfFile(h) == FALSE) { 4310 return -1; 4311 } 4312 4313 return 0; 4314 } 4315 4316 4317 // This code is a copy of JDK's sysSync 4318 // from src/windows/hpi/src/sys_api_md.c 4319 // except for the legacy workaround for a bug in Win 98 4320 4321 int os::fsync(int fd) { 4322 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4323 4324 if ((!::FlushFileBuffers(handle)) && 4325 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4326 /* from winerror.h */ 4327 return -1; 4328 } 4329 return 0; 4330 } 4331 4332 static int nonSeekAvailable(int, long *); 4333 static int stdinAvailable(int, long *); 4334 4335 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4336 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4337 4338 // This code is a copy of JDK's sysAvailable 4339 // from src/windows/hpi/src/sys_api_md.c 4340 4341 int os::available(int fd, jlong *bytes) { 4342 jlong cur, end; 4343 struct _stati64 stbuf64; 4344 4345 if (::_fstati64(fd, &stbuf64) >= 0) { 4346 int mode = stbuf64.st_mode; 4347 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4348 int ret; 4349 long lpbytes; 4350 if (fd == 0) { 4351 ret = stdinAvailable(fd, &lpbytes); 4352 } else { 4353 ret = nonSeekAvailable(fd, &lpbytes); 4354 } 4355 (*bytes) = (jlong)(lpbytes); 4356 return ret; 4357 } 4358 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4359 return FALSE; 4360 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4361 return FALSE; 4362 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4363 return FALSE; 4364 } 4365 *bytes = end - cur; 4366 return TRUE; 4367 } else { 4368 return FALSE; 4369 } 4370 } 4371 4372 // This code is a copy of JDK's nonSeekAvailable 4373 // from src/windows/hpi/src/sys_api_md.c 4374 4375 static int nonSeekAvailable(int fd, long *pbytes) { 4376 /* This is used for available on non-seekable devices 4377 * (like both named and anonymous pipes, such as pipes 4378 * connected to an exec'd process). 4379 * Standard Input is a special case. 4380 * 4381 */ 4382 HANDLE han; 4383 4384 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4385 return FALSE; 4386 } 4387 4388 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4389 /* PeekNamedPipe fails when at EOF. In that case we 4390 * simply make *pbytes = 0 which is consistent with the 4391 * behavior we get on Solaris when an fd is at EOF. 4392 * The only alternative is to raise an Exception, 4393 * which isn't really warranted. 4394 */ 4395 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4396 return FALSE; 4397 } 4398 *pbytes = 0; 4399 } 4400 return TRUE; 4401 } 4402 4403 #define MAX_INPUT_EVENTS 2000 4404 4405 // This code is a copy of JDK's stdinAvailable 4406 // from src/windows/hpi/src/sys_api_md.c 4407 4408 static int stdinAvailable(int fd, long *pbytes) { 4409 HANDLE han; 4410 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4411 DWORD numEvents = 0; /* Number of events in buffer */ 4412 DWORD i = 0; /* Loop index */ 4413 DWORD curLength = 0; /* Position marker */ 4414 DWORD actualLength = 0; /* Number of bytes readable */ 4415 BOOL error = FALSE; /* Error holder */ 4416 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4417 4418 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4419 return FALSE; 4420 } 4421 4422 /* Construct an array of input records in the console buffer */ 4423 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4424 if (error == 0) { 4425 return nonSeekAvailable(fd, pbytes); 4426 } 4427 4428 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4429 if (numEvents > MAX_INPUT_EVENTS) { 4430 numEvents = MAX_INPUT_EVENTS; 4431 } 4432 4433 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4434 if (lpBuffer == NULL) { 4435 return FALSE; 4436 } 4437 4438 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4439 if (error == 0) { 4440 os::free(lpBuffer, mtInternal); 4441 return FALSE; 4442 } 4443 4444 /* Examine input records for the number of bytes available */ 4445 for (i=0; i<numEvents; i++) { 4446 if (lpBuffer[i].EventType == KEY_EVENT) { 4447 4448 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4449 &(lpBuffer[i].Event); 4450 if (keyRecord->bKeyDown == TRUE) { 4451 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4452 curLength++; 4453 if (*keyPressed == '\r') { 4454 actualLength = curLength; 4455 } 4456 } 4457 } 4458 } 4459 4460 if (lpBuffer != NULL) { 4461 os::free(lpBuffer, mtInternal); 4462 } 4463 4464 *pbytes = (long) actualLength; 4465 return TRUE; 4466 } 4467 4468 // Map a block of memory. 4469 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4470 char *addr, size_t bytes, bool read_only, 4471 bool allow_exec) { 4472 HANDLE hFile; 4473 char* base; 4474 4475 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4476 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4477 if (hFile == NULL) { 4478 if (PrintMiscellaneous && Verbose) { 4479 DWORD err = GetLastError(); 4480 tty->print_cr("CreateFile() failed: GetLastError->%ld.", err); 4481 } 4482 return NULL; 4483 } 4484 4485 if (allow_exec) { 4486 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4487 // unless it comes from a PE image (which the shared archive is not.) 4488 // Even VirtualProtect refuses to give execute access to mapped memory 4489 // that was not previously executable. 4490 // 4491 // Instead, stick the executable region in anonymous memory. Yuck. 4492 // Penalty is that ~4 pages will not be shareable - in the future 4493 // we might consider DLLizing the shared archive with a proper PE 4494 // header so that mapping executable + sharing is possible. 4495 4496 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4497 PAGE_READWRITE); 4498 if (base == NULL) { 4499 if (PrintMiscellaneous && Verbose) { 4500 DWORD err = GetLastError(); 4501 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4502 } 4503 CloseHandle(hFile); 4504 return NULL; 4505 } 4506 4507 DWORD bytes_read; 4508 OVERLAPPED overlapped; 4509 overlapped.Offset = (DWORD)file_offset; 4510 overlapped.OffsetHigh = 0; 4511 overlapped.hEvent = NULL; 4512 // ReadFile guarantees that if the return value is true, the requested 4513 // number of bytes were read before returning. 4514 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4515 if (!res) { 4516 if (PrintMiscellaneous && Verbose) { 4517 DWORD err = GetLastError(); 4518 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4519 } 4520 release_memory(base, bytes); 4521 CloseHandle(hFile); 4522 return NULL; 4523 } 4524 } else { 4525 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4526 NULL /*file_name*/); 4527 if (hMap == NULL) { 4528 if (PrintMiscellaneous && Verbose) { 4529 DWORD err = GetLastError(); 4530 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld.", err); 4531 } 4532 CloseHandle(hFile); 4533 return NULL; 4534 } 4535 4536 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4537 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4538 (DWORD)bytes, addr); 4539 if (base == NULL) { 4540 if (PrintMiscellaneous && Verbose) { 4541 DWORD err = GetLastError(); 4542 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4543 } 4544 CloseHandle(hMap); 4545 CloseHandle(hFile); 4546 return NULL; 4547 } 4548 4549 if (CloseHandle(hMap) == 0) { 4550 if (PrintMiscellaneous && Verbose) { 4551 DWORD err = GetLastError(); 4552 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4553 } 4554 CloseHandle(hFile); 4555 return base; 4556 } 4557 } 4558 4559 if (allow_exec) { 4560 DWORD old_protect; 4561 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4562 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4563 4564 if (!res) { 4565 if (PrintMiscellaneous && Verbose) { 4566 DWORD err = GetLastError(); 4567 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4568 } 4569 // Don't consider this a hard error, on IA32 even if the 4570 // VirtualProtect fails, we should still be able to execute 4571 CloseHandle(hFile); 4572 return base; 4573 } 4574 } 4575 4576 if (CloseHandle(hFile) == 0) { 4577 if (PrintMiscellaneous && Verbose) { 4578 DWORD err = GetLastError(); 4579 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4580 } 4581 return base; 4582 } 4583 4584 return base; 4585 } 4586 4587 4588 // Remap a block of memory. 4589 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4590 char *addr, size_t bytes, bool read_only, 4591 bool allow_exec) { 4592 // This OS does not allow existing memory maps to be remapped so we 4593 // have to unmap the memory before we remap it. 4594 if (!os::unmap_memory(addr, bytes)) { 4595 return NULL; 4596 } 4597 4598 // There is a very small theoretical window between the unmap_memory() 4599 // call above and the map_memory() call below where a thread in native 4600 // code may be able to access an address that is no longer mapped. 4601 4602 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4603 read_only, allow_exec); 4604 } 4605 4606 4607 // Unmap a block of memory. 4608 // Returns true=success, otherwise false. 4609 4610 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4611 BOOL result = UnmapViewOfFile(addr); 4612 if (result == 0) { 4613 if (PrintMiscellaneous && Verbose) { 4614 DWORD err = GetLastError(); 4615 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4616 } 4617 return false; 4618 } 4619 return true; 4620 } 4621 4622 void os::pause() { 4623 char filename[MAX_PATH]; 4624 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4625 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4626 } else { 4627 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4628 } 4629 4630 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4631 if (fd != -1) { 4632 struct stat buf; 4633 ::close(fd); 4634 while (::stat(filename, &buf) == 0) { 4635 Sleep(100); 4636 } 4637 } else { 4638 jio_fprintf(stderr, 4639 "Could not open pause file '%s', continuing immediately.\n", filename); 4640 } 4641 } 4642 4643 os::WatcherThreadCrashProtection::WatcherThreadCrashProtection() { 4644 assert(Thread::current()->is_Watcher_thread(), "Must be WatcherThread"); 4645 } 4646 4647 /* 4648 * See the caveats for this class in os_windows.hpp 4649 * Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4650 * into this method and returns false. If no OS EXCEPTION was raised, returns 4651 * true. 4652 * The callback is supposed to provide the method that should be protected. 4653 */ 4654 bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4655 assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread"); 4656 assert(!WatcherThread::watcher_thread()->has_crash_protection(), 4657 "crash_protection already set?"); 4658 4659 bool success = true; 4660 __try { 4661 WatcherThread::watcher_thread()->set_crash_protection(this); 4662 cb.call(); 4663 } __except(EXCEPTION_EXECUTE_HANDLER) { 4664 // only for protection, nothing to do 4665 success = false; 4666 } 4667 WatcherThread::watcher_thread()->set_crash_protection(NULL); 4668 return success; 4669 } 4670 4671 // An Event wraps a win32 "CreateEvent" kernel handle. 4672 // 4673 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4674 // 4675 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4676 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4677 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4678 // In addition, an unpark() operation might fetch the handle field, but the 4679 // event could recycle between the fetch and the SetEvent() operation. 4680 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4681 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4682 // on an stale but recycled handle would be harmless, but in practice this might 4683 // confuse other non-Sun code, so it's not a viable approach. 4684 // 4685 // 2: Once a win32 event handle is associated with an Event, it remains associated 4686 // with the Event. The event handle is never closed. This could be construed 4687 // as handle leakage, but only up to the maximum # of threads that have been extant 4688 // at any one time. This shouldn't be an issue, as windows platforms typically 4689 // permit a process to have hundreds of thousands of open handles. 4690 // 4691 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4692 // and release unused handles. 4693 // 4694 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4695 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4696 // 4697 // 5. Use an RCU-like mechanism (Read-Copy Update). 4698 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4699 // 4700 // We use (2). 4701 // 4702 // TODO-FIXME: 4703 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4704 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4705 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4706 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4707 // into a single win32 CreateEvent() handle. 4708 // 4709 // _Event transitions in park() 4710 // -1 => -1 : illegal 4711 // 1 => 0 : pass - return immediately 4712 // 0 => -1 : block 4713 // 4714 // _Event serves as a restricted-range semaphore : 4715 // -1 : thread is blocked 4716 // 0 : neutral - thread is running or ready 4717 // 1 : signaled - thread is running or ready 4718 // 4719 // Another possible encoding of _Event would be 4720 // with explicit "PARKED" and "SIGNALED" bits. 4721 4722 int os::PlatformEvent::park (jlong Millis) { 4723 guarantee(_ParkHandle != NULL , "Invariant"); 4724 guarantee(Millis > 0 , "Invariant"); 4725 int v; 4726 4727 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4728 // the initial park() operation. 4729 4730 for (;;) { 4731 v = _Event; 4732 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4733 } 4734 guarantee((v == 0) || (v == 1), "invariant"); 4735 if (v != 0) return OS_OK; 4736 4737 // Do this the hard way by blocking ... 4738 // TODO: consider a brief spin here, gated on the success of recent 4739 // spin attempts by this thread. 4740 // 4741 // We decompose long timeouts into series of shorter timed waits. 4742 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4743 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4744 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4745 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4746 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4747 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4748 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4749 // for the already waited time. This policy does not admit any new outcomes. 4750 // In the future, however, we might want to track the accumulated wait time and 4751 // adjust Millis accordingly if we encounter a spurious wakeup. 4752 4753 const int MAXTIMEOUT = 0x10000000; 4754 DWORD rv = WAIT_TIMEOUT; 4755 while (_Event < 0 && Millis > 0) { 4756 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 4757 if (Millis > MAXTIMEOUT) { 4758 prd = MAXTIMEOUT; 4759 } 4760 rv = ::WaitForSingleObject(_ParkHandle, prd); 4761 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 4762 if (rv == WAIT_TIMEOUT) { 4763 Millis -= prd; 4764 } 4765 } 4766 v = _Event; 4767 _Event = 0; 4768 // see comment at end of os::PlatformEvent::park() below: 4769 OrderAccess::fence(); 4770 // If we encounter a nearly simultanous timeout expiry and unpark() 4771 // we return OS_OK indicating we awoke via unpark(). 4772 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4773 return (v >= 0) ? OS_OK : OS_TIMEOUT; 4774 } 4775 4776 void os::PlatformEvent::park() { 4777 guarantee(_ParkHandle != NULL, "Invariant"); 4778 // Invariant: Only the thread associated with the Event/PlatformEvent 4779 // may call park(). 4780 int v; 4781 for (;;) { 4782 v = _Event; 4783 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 4784 } 4785 guarantee((v == 0) || (v == 1), "invariant"); 4786 if (v != 0) return; 4787 4788 // Do this the hard way by blocking ... 4789 // TODO: consider a brief spin here, gated on the success of recent 4790 // spin attempts by this thread. 4791 while (_Event < 0) { 4792 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 4793 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 4794 } 4795 4796 // Usually we'll find _Event == 0 at this point, but as 4797 // an optional optimization we clear it, just in case can 4798 // multiple unpark() operations drove _Event up to 1. 4799 _Event = 0; 4800 OrderAccess::fence(); 4801 guarantee(_Event >= 0, "invariant"); 4802 } 4803 4804 void os::PlatformEvent::unpark() { 4805 guarantee(_ParkHandle != NULL, "Invariant"); 4806 4807 // Transitions for _Event: 4808 // 0 :=> 1 4809 // 1 :=> 1 4810 // -1 :=> either 0 or 1; must signal target thread 4811 // That is, we can safely transition _Event from -1 to either 4812 // 0 or 1. 4813 // See also: "Semaphores in Plan 9" by Mullender & Cox 4814 // 4815 // Note: Forcing a transition from "-1" to "1" on an unpark() means 4816 // that it will take two back-to-back park() calls for the owning 4817 // thread to block. This has the benefit of forcing a spurious return 4818 // from the first park() call after an unpark() call which will help 4819 // shake out uses of park() and unpark() without condition variables. 4820 4821 if (Atomic::xchg(1, &_Event) >= 0) return; 4822 4823 ::SetEvent(_ParkHandle); 4824 } 4825 4826 4827 // JSR166 4828 // ------------------------------------------------------- 4829 4830 /* 4831 * The Windows implementation of Park is very straightforward: Basic 4832 * operations on Win32 Events turn out to have the right semantics to 4833 * use them directly. We opportunistically resuse the event inherited 4834 * from Monitor. 4835 */ 4836 4837 4838 void Parker::park(bool isAbsolute, jlong time) { 4839 guarantee(_ParkEvent != NULL, "invariant"); 4840 // First, demultiplex/decode time arguments 4841 if (time < 0) { // don't wait 4842 return; 4843 } 4844 else if (time == 0 && !isAbsolute) { 4845 time = INFINITE; 4846 } 4847 else if (isAbsolute) { 4848 time -= os::javaTimeMillis(); // convert to relative time 4849 if (time <= 0) // already elapsed 4850 return; 4851 } 4852 else { // relative 4853 time /= 1000000; // Must coarsen from nanos to millis 4854 if (time == 0) // Wait for the minimal time unit if zero 4855 time = 1; 4856 } 4857 4858 JavaThread* thread = (JavaThread*)(Thread::current()); 4859 assert(thread->is_Java_thread(), "Must be JavaThread"); 4860 JavaThread *jt = (JavaThread *)thread; 4861 4862 // Don't wait if interrupted or already triggered 4863 if (Thread::is_interrupted(thread, false) || 4864 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4865 ResetEvent(_ParkEvent); 4866 return; 4867 } 4868 else { 4869 ThreadBlockInVM tbivm(jt); 4870 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4871 jt->set_suspend_equivalent(); 4872 4873 WaitForSingleObject(_ParkEvent, time); 4874 ResetEvent(_ParkEvent); 4875 4876 // If externally suspended while waiting, re-suspend 4877 if (jt->handle_special_suspend_equivalent_condition()) { 4878 jt->java_suspend_self(); 4879 } 4880 } 4881 } 4882 4883 void Parker::unpark() { 4884 guarantee(_ParkEvent != NULL, "invariant"); 4885 SetEvent(_ParkEvent); 4886 } 4887 4888 // Run the specified command in a separate process. Return its exit value, 4889 // or -1 on failure (e.g. can't create a new process). 4890 int os::fork_and_exec(char* cmd) { 4891 STARTUPINFO si; 4892 PROCESS_INFORMATION pi; 4893 4894 memset(&si, 0, sizeof(si)); 4895 si.cb = sizeof(si); 4896 memset(&pi, 0, sizeof(pi)); 4897 BOOL rslt = CreateProcess(NULL, // executable name - use command line 4898 cmd, // command line 4899 NULL, // process security attribute 4900 NULL, // thread security attribute 4901 TRUE, // inherits system handles 4902 0, // no creation flags 4903 NULL, // use parent's environment block 4904 NULL, // use parent's starting directory 4905 &si, // (in) startup information 4906 &pi); // (out) process information 4907 4908 if (rslt) { 4909 // Wait until child process exits. 4910 WaitForSingleObject(pi.hProcess, INFINITE); 4911 4912 DWORD exit_code; 4913 GetExitCodeProcess(pi.hProcess, &exit_code); 4914 4915 // Close process and thread handles. 4916 CloseHandle(pi.hProcess); 4917 CloseHandle(pi.hThread); 4918 4919 return (int)exit_code; 4920 } else { 4921 return -1; 4922 } 4923 } 4924 4925 //-------------------------------------------------------------------------------------------------- 4926 // Non-product code 4927 4928 static int mallocDebugIntervalCounter = 0; 4929 static int mallocDebugCounter = 0; 4930 bool os::check_heap(bool force) { 4931 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 4932 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 4933 // Note: HeapValidate executes two hardware breakpoints when it finds something 4934 // wrong; at these points, eax contains the address of the offending block (I think). 4935 // To get to the exlicit error message(s) below, just continue twice. 4936 HANDLE heap = GetProcessHeap(); 4937 4938 // If we fail to lock the heap, then gflags.exe has been used 4939 // or some other special heap flag has been set that prevents 4940 // locking. We don't try to walk a heap we can't lock. 4941 if (HeapLock(heap) != 0) { 4942 PROCESS_HEAP_ENTRY phe; 4943 phe.lpData = NULL; 4944 while (HeapWalk(heap, &phe) != 0) { 4945 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 4946 !HeapValidate(heap, 0, phe.lpData)) { 4947 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 4948 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 4949 fatal("corrupted C heap"); 4950 } 4951 } 4952 DWORD err = GetLastError(); 4953 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 4954 fatal(err_msg("heap walk aborted with error %d", err)); 4955 } 4956 HeapUnlock(heap); 4957 } 4958 mallocDebugIntervalCounter = 0; 4959 } 4960 return true; 4961 } 4962 4963 4964 bool os::find(address addr, outputStream* st) { 4965 // Nothing yet 4966 return false; 4967 } 4968 4969 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 4970 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 4971 4972 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 4973 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 4974 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 4975 address addr = (address) exceptionRecord->ExceptionInformation[1]; 4976 4977 if (os::is_memory_serialize_page(thread, addr)) 4978 return EXCEPTION_CONTINUE_EXECUTION; 4979 } 4980 4981 return EXCEPTION_CONTINUE_SEARCH; 4982 } 4983 4984 // We don't build a headless jre for Windows 4985 bool os::is_headless_jre() { return false; } 4986 4987 static jint initSock() { 4988 WSADATA wsadata; 4989 4990 if (!os::WinSock2Dll::WinSock2Available()) { 4991 jio_fprintf(stderr, "Could not load Winsock (error: %d)\n", 4992 ::GetLastError()); 4993 return JNI_ERR; 4994 } 4995 4996 if (os::WinSock2Dll::WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 4997 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 4998 ::GetLastError()); 4999 return JNI_ERR; 5000 } 5001 return JNI_OK; 5002 } 5003 5004 struct hostent* os::get_host_by_name(char* name) { 5005 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 5006 } 5007 5008 int os::socket_close(int fd) { 5009 return ::closesocket(fd); 5010 } 5011 5012 int os::socket_available(int fd, jint *pbytes) { 5013 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 5014 return (ret < 0) ? 0 : 1; 5015 } 5016 5017 int os::socket(int domain, int type, int protocol) { 5018 return ::socket(domain, type, protocol); 5019 } 5020 5021 int os::listen(int fd, int count) { 5022 return ::listen(fd, count); 5023 } 5024 5025 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5026 return ::connect(fd, him, len); 5027 } 5028 5029 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 5030 return ::accept(fd, him, len); 5031 } 5032 5033 int os::sendto(int fd, char* buf, size_t len, uint flags, 5034 struct sockaddr* to, socklen_t tolen) { 5035 5036 return ::sendto(fd, buf, (int)len, flags, to, tolen); 5037 } 5038 5039 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 5040 sockaddr* from, socklen_t* fromlen) { 5041 5042 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 5043 } 5044 5045 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5046 return ::recv(fd, buf, (int)nBytes, flags); 5047 } 5048 5049 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5050 return ::send(fd, buf, (int)nBytes, flags); 5051 } 5052 5053 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5054 return ::send(fd, buf, (int)nBytes, flags); 5055 } 5056 5057 int os::timeout(int fd, long timeout) { 5058 fd_set tbl; 5059 struct timeval t; 5060 5061 t.tv_sec = timeout / 1000; 5062 t.tv_usec = (timeout % 1000) * 1000; 5063 5064 tbl.fd_count = 1; 5065 tbl.fd_array[0] = fd; 5066 5067 return ::select(1, &tbl, 0, 0, &t); 5068 } 5069 5070 int os::get_host_name(char* name, int namelen) { 5071 return ::gethostname(name, namelen); 5072 } 5073 5074 int os::socket_shutdown(int fd, int howto) { 5075 return ::shutdown(fd, howto); 5076 } 5077 5078 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 5079 return ::bind(fd, him, len); 5080 } 5081 5082 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 5083 return ::getsockname(fd, him, len); 5084 } 5085 5086 int os::get_sock_opt(int fd, int level, int optname, 5087 char* optval, socklen_t* optlen) { 5088 return ::getsockopt(fd, level, optname, optval, optlen); 5089 } 5090 5091 int os::set_sock_opt(int fd, int level, int optname, 5092 const char* optval, socklen_t optlen) { 5093 return ::setsockopt(fd, level, optname, optval, optlen); 5094 } 5095 5096 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5097 #if defined(IA32) 5098 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5099 #elif defined (AMD64) 5100 # define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5101 #endif 5102 5103 // returns true if thread could be suspended, 5104 // false otherwise 5105 static bool do_suspend(HANDLE* h) { 5106 if (h != NULL) { 5107 if (SuspendThread(*h) != ~0) { 5108 return true; 5109 } 5110 } 5111 return false; 5112 } 5113 5114 // resume the thread 5115 // calling resume on an active thread is a no-op 5116 static void do_resume(HANDLE* h) { 5117 if (h != NULL) { 5118 ResumeThread(*h); 5119 } 5120 } 5121 5122 // retrieve a suspend/resume context capable handle 5123 // from the tid. Caller validates handle return value. 5124 void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) { 5125 if (h != NULL) { 5126 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5127 } 5128 } 5129 5130 // 5131 // Thread sampling implementation 5132 // 5133 void os::SuspendedThreadTask::internal_do_task() { 5134 CONTEXT ctxt; 5135 HANDLE h = NULL; 5136 5137 // get context capable handle for thread 5138 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5139 5140 // sanity 5141 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5142 return; 5143 } 5144 5145 // suspend the thread 5146 if (do_suspend(&h)) { 5147 ctxt.ContextFlags = sampling_context_flags; 5148 // get thread context 5149 GetThreadContext(h, &ctxt); 5150 SuspendedThreadTaskContext context(_thread, &ctxt); 5151 // pass context to Thread Sampling impl 5152 do_task(context); 5153 // resume thread 5154 do_resume(&h); 5155 } 5156 5157 // close handle 5158 CloseHandle(h); 5159 } 5160 5161 5162 // Kernel32 API 5163 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 5164 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 5165 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 5166 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 5167 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 5168 5169 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 5170 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 5171 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 5172 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 5173 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 5174 5175 5176 BOOL os::Kernel32Dll::initialized = FALSE; 5177 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 5178 assert(initialized && _GetLargePageMinimum != NULL, 5179 "GetLargePageMinimumAvailable() not yet called"); 5180 return _GetLargePageMinimum(); 5181 } 5182 5183 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 5184 if (!initialized) { 5185 initialize(); 5186 } 5187 return _GetLargePageMinimum != NULL; 5188 } 5189 5190 BOOL os::Kernel32Dll::NumaCallsAvailable() { 5191 if (!initialized) { 5192 initialize(); 5193 } 5194 return _VirtualAllocExNuma != NULL; 5195 } 5196 5197 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 5198 assert(initialized && _VirtualAllocExNuma != NULL, 5199 "NUMACallsAvailable() not yet called"); 5200 5201 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 5202 } 5203 5204 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 5205 assert(initialized && _GetNumaHighestNodeNumber != NULL, 5206 "NUMACallsAvailable() not yet called"); 5207 5208 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 5209 } 5210 5211 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 5212 assert(initialized && _GetNumaNodeProcessorMask != NULL, 5213 "NUMACallsAvailable() not yet called"); 5214 5215 return _GetNumaNodeProcessorMask(node, proc_mask); 5216 } 5217 5218 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 5219 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 5220 if (!initialized) { 5221 initialize(); 5222 } 5223 5224 if (_RtlCaptureStackBackTrace != NULL) { 5225 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 5226 BackTrace, BackTraceHash); 5227 } else { 5228 return 0; 5229 } 5230 } 5231 5232 void os::Kernel32Dll::initializeCommon() { 5233 if (!initialized) { 5234 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5235 assert(handle != NULL, "Just check"); 5236 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 5237 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 5238 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 5239 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 5240 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 5241 initialized = TRUE; 5242 } 5243 } 5244 5245 5246 5247 #ifndef JDK6_OR_EARLIER 5248 5249 void os::Kernel32Dll::initialize() { 5250 initializeCommon(); 5251 } 5252 5253 5254 // Kernel32 API 5255 inline BOOL os::Kernel32Dll::SwitchToThread() { 5256 return ::SwitchToThread(); 5257 } 5258 5259 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5260 return true; 5261 } 5262 5263 // Help tools 5264 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5265 return true; 5266 } 5267 5268 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5269 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5270 } 5271 5272 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5273 return ::Module32First(hSnapshot, lpme); 5274 } 5275 5276 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5277 return ::Module32Next(hSnapshot, lpme); 5278 } 5279 5280 5281 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5282 return true; 5283 } 5284 5285 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5286 ::GetNativeSystemInfo(lpSystemInfo); 5287 } 5288 5289 // PSAPI API 5290 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5291 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5292 } 5293 5294 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5295 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5296 } 5297 5298 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5299 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5300 } 5301 5302 inline BOOL os::PSApiDll::PSApiAvailable() { 5303 return true; 5304 } 5305 5306 5307 // WinSock2 API 5308 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5309 return ::WSAStartup(wVersionRequested, lpWSAData); 5310 } 5311 5312 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5313 return ::gethostbyname(name); 5314 } 5315 5316 inline BOOL os::WinSock2Dll::WinSock2Available() { 5317 return true; 5318 } 5319 5320 // Advapi API 5321 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5322 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5323 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5324 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5325 BufferLength, PreviousState, ReturnLength); 5326 } 5327 5328 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5329 PHANDLE TokenHandle) { 5330 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5331 } 5332 5333 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5334 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5335 } 5336 5337 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5338 return true; 5339 } 5340 5341 void* os::get_default_process_handle() { 5342 return (void*)GetModuleHandle(NULL); 5343 } 5344 5345 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5346 // which is used to find statically linked in agents. 5347 // Additionally for windows, takes into account __stdcall names. 5348 // Parameters: 5349 // sym_name: Symbol in library we are looking for 5350 // lib_name: Name of library to look in, NULL for shared libs. 5351 // is_absolute_path == true if lib_name is absolute path to agent 5352 // such as "C:/a/b/L.dll" 5353 // == false if only the base name of the library is passed in 5354 // such as "L" 5355 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5356 bool is_absolute_path) { 5357 char *agent_entry_name; 5358 size_t len; 5359 size_t name_len; 5360 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5361 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5362 const char *start; 5363 5364 if (lib_name != NULL) { 5365 len = name_len = strlen(lib_name); 5366 if (is_absolute_path) { 5367 // Need to strip path, prefix and suffix 5368 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5369 lib_name = ++start; 5370 } else { 5371 // Need to check for drive prefix 5372 if ((start = strchr(lib_name, ':')) != NULL) { 5373 lib_name = ++start; 5374 } 5375 } 5376 if (len <= (prefix_len + suffix_len)) { 5377 return NULL; 5378 } 5379 lib_name += prefix_len; 5380 name_len = strlen(lib_name) - suffix_len; 5381 } 5382 } 5383 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5384 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5385 if (agent_entry_name == NULL) { 5386 return NULL; 5387 } 5388 if (lib_name != NULL) { 5389 const char *p = strrchr(sym_name, '@'); 5390 if (p != NULL && p != sym_name) { 5391 // sym_name == _Agent_OnLoad@XX 5392 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5393 agent_entry_name[(p-sym_name)] = '\0'; 5394 // agent_entry_name == _Agent_OnLoad 5395 strcat(agent_entry_name, "_"); 5396 strncat(agent_entry_name, lib_name, name_len); 5397 strcat(agent_entry_name, p); 5398 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5399 } else { 5400 strcpy(agent_entry_name, sym_name); 5401 strcat(agent_entry_name, "_"); 5402 strncat(agent_entry_name, lib_name, name_len); 5403 } 5404 } else { 5405 strcpy(agent_entry_name, sym_name); 5406 } 5407 return agent_entry_name; 5408 } 5409 5410 #else 5411 // Kernel32 API 5412 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5413 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5414 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5415 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5416 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5417 5418 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5419 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5420 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5421 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5422 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5423 5424 void os::Kernel32Dll::initialize() { 5425 if (!initialized) { 5426 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5427 assert(handle != NULL, "Just check"); 5428 5429 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5430 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5431 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5432 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5433 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5434 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5435 initializeCommon(); // resolve the functions that always need resolving 5436 5437 initialized = TRUE; 5438 } 5439 } 5440 5441 BOOL os::Kernel32Dll::SwitchToThread() { 5442 assert(initialized && _SwitchToThread != NULL, 5443 "SwitchToThreadAvailable() not yet called"); 5444 return _SwitchToThread(); 5445 } 5446 5447 5448 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5449 if (!initialized) { 5450 initialize(); 5451 } 5452 return _SwitchToThread != NULL; 5453 } 5454 5455 // Help tools 5456 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5457 if (!initialized) { 5458 initialize(); 5459 } 5460 return _CreateToolhelp32Snapshot != NULL && 5461 _Module32First != NULL && 5462 _Module32Next != NULL; 5463 } 5464 5465 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5466 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5467 "HelpToolsAvailable() not yet called"); 5468 5469 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5470 } 5471 5472 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5473 assert(initialized && _Module32First != NULL, 5474 "HelpToolsAvailable() not yet called"); 5475 5476 return _Module32First(hSnapshot, lpme); 5477 } 5478 5479 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5480 assert(initialized && _Module32Next != NULL, 5481 "HelpToolsAvailable() not yet called"); 5482 5483 return _Module32Next(hSnapshot, lpme); 5484 } 5485 5486 5487 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5488 if (!initialized) { 5489 initialize(); 5490 } 5491 return _GetNativeSystemInfo != NULL; 5492 } 5493 5494 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5495 assert(initialized && _GetNativeSystemInfo != NULL, 5496 "GetNativeSystemInfoAvailable() not yet called"); 5497 5498 _GetNativeSystemInfo(lpSystemInfo); 5499 } 5500 5501 // PSAPI API 5502 5503 5504 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5505 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5506 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5507 5508 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5509 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5510 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5511 BOOL os::PSApiDll::initialized = FALSE; 5512 5513 void os::PSApiDll::initialize() { 5514 if (!initialized) { 5515 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5516 if (handle != NULL) { 5517 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5518 "EnumProcessModules"); 5519 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5520 "GetModuleFileNameExA"); 5521 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5522 "GetModuleInformation"); 5523 } 5524 initialized = TRUE; 5525 } 5526 } 5527 5528 5529 5530 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5531 assert(initialized && _EnumProcessModules != NULL, 5532 "PSApiAvailable() not yet called"); 5533 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5534 } 5535 5536 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5537 assert(initialized && _GetModuleFileNameEx != NULL, 5538 "PSApiAvailable() not yet called"); 5539 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5540 } 5541 5542 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5543 assert(initialized && _GetModuleInformation != NULL, 5544 "PSApiAvailable() not yet called"); 5545 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5546 } 5547 5548 BOOL os::PSApiDll::PSApiAvailable() { 5549 if (!initialized) { 5550 initialize(); 5551 } 5552 return _EnumProcessModules != NULL && 5553 _GetModuleFileNameEx != NULL && 5554 _GetModuleInformation != NULL; 5555 } 5556 5557 5558 // WinSock2 API 5559 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5560 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5561 5562 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5563 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5564 BOOL os::WinSock2Dll::initialized = FALSE; 5565 5566 void os::WinSock2Dll::initialize() { 5567 if (!initialized) { 5568 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5569 if (handle != NULL) { 5570 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5571 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5572 } 5573 initialized = TRUE; 5574 } 5575 } 5576 5577 5578 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5579 assert(initialized && _WSAStartup != NULL, 5580 "WinSock2Available() not yet called"); 5581 return _WSAStartup(wVersionRequested, lpWSAData); 5582 } 5583 5584 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5585 assert(initialized && _gethostbyname != NULL, 5586 "WinSock2Available() not yet called"); 5587 return _gethostbyname(name); 5588 } 5589 5590 BOOL os::WinSock2Dll::WinSock2Available() { 5591 if (!initialized) { 5592 initialize(); 5593 } 5594 return _WSAStartup != NULL && 5595 _gethostbyname != NULL; 5596 } 5597 5598 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5599 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5600 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5601 5602 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5603 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5604 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5605 BOOL os::Advapi32Dll::initialized = FALSE; 5606 5607 void os::Advapi32Dll::initialize() { 5608 if (!initialized) { 5609 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5610 if (handle != NULL) { 5611 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5612 "AdjustTokenPrivileges"); 5613 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5614 "OpenProcessToken"); 5615 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5616 "LookupPrivilegeValueA"); 5617 } 5618 initialized = TRUE; 5619 } 5620 } 5621 5622 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5623 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5624 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5625 assert(initialized && _AdjustTokenPrivileges != NULL, 5626 "AdvapiAvailable() not yet called"); 5627 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5628 BufferLength, PreviousState, ReturnLength); 5629 } 5630 5631 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5632 PHANDLE TokenHandle) { 5633 assert(initialized && _OpenProcessToken != NULL, 5634 "AdvapiAvailable() not yet called"); 5635 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5636 } 5637 5638 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5639 assert(initialized && _LookupPrivilegeValue != NULL, 5640 "AdvapiAvailable() not yet called"); 5641 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5642 } 5643 5644 BOOL os::Advapi32Dll::AdvapiAvailable() { 5645 if (!initialized) { 5646 initialize(); 5647 } 5648 return _AdjustTokenPrivileges != NULL && 5649 _OpenProcessToken != NULL && 5650 _LookupPrivilegeValue != NULL; 5651 } 5652 5653 #endif 5654 5655 #ifndef PRODUCT 5656 5657 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5658 // contiguous memory block at a particular address. 5659 // The test first tries to find a good approximate address to allocate at by using the same 5660 // method to allocate some memory at any address. The test then tries to allocate memory in 5661 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5662 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5663 // the previously allocated memory is available for allocation. The only actual failure 5664 // that is reported is when the test tries to allocate at a particular location but gets a 5665 // different valid one. A NULL return value at this point is not considered an error but may 5666 // be legitimate. 5667 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5668 void TestReserveMemorySpecial_test() { 5669 if (!UseLargePages) { 5670 if (VerboseInternalVMTests) { 5671 gclog_or_tty->print("Skipping test because large pages are disabled"); 5672 } 5673 return; 5674 } 5675 // save current value of globals 5676 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5677 bool old_use_numa_interleaving = UseNUMAInterleaving; 5678 5679 // set globals to make sure we hit the correct code path 5680 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5681 5682 // do an allocation at an address selected by the OS to get a good one. 5683 const size_t large_allocation_size = os::large_page_size() * 4; 5684 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5685 if (result == NULL) { 5686 if (VerboseInternalVMTests) { 5687 gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.", 5688 large_allocation_size); 5689 } 5690 } else { 5691 os::release_memory_special(result, large_allocation_size); 5692 5693 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5694 // we managed to get it once. 5695 const size_t expected_allocation_size = os::large_page_size(); 5696 char* expected_location = result + os::large_page_size(); 5697 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5698 if (actual_location == NULL) { 5699 if (VerboseInternalVMTests) { 5700 gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.", 5701 expected_location, large_allocation_size); 5702 } 5703 } else { 5704 // release memory 5705 os::release_memory_special(actual_location, expected_allocation_size); 5706 // only now check, after releasing any memory to avoid any leaks. 5707 assert(actual_location == expected_location, 5708 err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead", 5709 expected_location, expected_allocation_size, actual_location)); 5710 } 5711 } 5712 5713 // restore globals 5714 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5715 UseNUMAInterleaving = old_use_numa_interleaving; 5716 } 5717 #endif // PRODUCT 5718