1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows 2000 or XP to use IsDebuggerPresent 26 #define _WIN32_WINNT 0x500 27 28 // no precompiled headers 29 #include "classfile/classLoader.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/icBuffer.hpp" 33 #include "code/vtableStubs.hpp" 34 #include "compiler/compileBroker.hpp" 35 #include "compiler/disassembler.hpp" 36 #include "interpreter/interpreter.hpp" 37 #include "jvm_windows.h" 38 #include "memory/allocation.inline.hpp" 39 #include "memory/filemap.hpp" 40 #include "mutex_windows.inline.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "prims/jniFastGetField.hpp" 44 #include "prims/jvm.h" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/extendedPC.hpp" 48 #include "runtime/globals.hpp" 49 #include "runtime/interfaceSupport.hpp" 50 #include "runtime/java.hpp" 51 #include "runtime/javaCalls.hpp" 52 #include "runtime/mutexLocker.hpp" 53 #include "runtime/objectMonitor.hpp" 54 #include "runtime/osThread.hpp" 55 #include "runtime/perfMemory.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/statSampler.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "runtime/thread.inline.hpp" 60 #include "runtime/threadCritical.hpp" 61 #include "runtime/timer.hpp" 62 #include "services/attachListener.hpp" 63 #include "services/runtimeService.hpp" 64 #include "utilities/decoder.hpp" 65 #include "utilities/defaultStream.hpp" 66 #include "utilities/events.hpp" 67 #include "utilities/growableArray.hpp" 68 #include "utilities/vmError.hpp" 69 70 #ifdef _DEBUG 71 #include <crtdbg.h> 72 #endif 73 74 75 #include <windows.h> 76 #include <sys/types.h> 77 #include <sys/stat.h> 78 #include <sys/timeb.h> 79 #include <objidl.h> 80 #include <shlobj.h> 81 82 #include <malloc.h> 83 #include <signal.h> 84 #include <direct.h> 85 #include <errno.h> 86 #include <fcntl.h> 87 #include <io.h> 88 #include <process.h> // For _beginthreadex(), _endthreadex() 89 #include <imagehlp.h> // For os::dll_address_to_function_name 90 /* for enumerating dll libraries */ 91 #include <vdmdbg.h> 92 93 // for timer info max values which include all bits 94 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF) 95 96 // For DLL loading/load error detection 97 // Values of PE COFF 98 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 99 #define IMAGE_FILE_SIGNATURE_LENGTH 4 100 101 static HANDLE main_process; 102 static HANDLE main_thread; 103 static int main_thread_id; 104 105 static FILETIME process_creation_time; 106 static FILETIME process_exit_time; 107 static FILETIME process_user_time; 108 static FILETIME process_kernel_time; 109 110 #ifdef _M_IA64 111 #define __CPU__ ia64 112 #elif _M_AMD64 113 #define __CPU__ amd64 114 #else 115 #define __CPU__ i486 116 #endif 117 118 // save DLL module handle, used by GetModuleFileName 119 120 HINSTANCE vm_lib_handle; 121 122 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 123 switch (reason) { 124 case DLL_PROCESS_ATTACH: 125 vm_lib_handle = hinst; 126 if(ForceTimeHighResolution) 127 timeBeginPeriod(1L); 128 break; 129 case DLL_PROCESS_DETACH: 130 if(ForceTimeHighResolution) 131 timeEndPeriod(1L); 132 break; 133 default: 134 break; 135 } 136 return true; 137 } 138 139 static inline double fileTimeAsDouble(FILETIME* time) { 140 const double high = (double) ((unsigned int) ~0); 141 const double split = 10000000.0; 142 double result = (time->dwLowDateTime / split) + 143 time->dwHighDateTime * (high/split); 144 return result; 145 } 146 147 // Implementation of os 148 149 bool os::getenv(const char* name, char* buffer, int len) { 150 int result = GetEnvironmentVariable(name, buffer, len); 151 return result > 0 && result < len; 152 } 153 154 155 // No setuid programs under Windows. 156 bool os::have_special_privileges() { 157 return false; 158 } 159 160 161 // This method is a periodic task to check for misbehaving JNI applications 162 // under CheckJNI, we can add any periodic checks here. 163 // For Windows at the moment does nothing 164 void os::run_periodic_checks() { 165 return; 166 } 167 168 #ifndef _WIN64 169 // previous UnhandledExceptionFilter, if there is one 170 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 171 172 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 173 #endif 174 void os::init_system_properties_values() { 175 /* sysclasspath, java_home, dll_dir */ 176 { 177 char *home_path; 178 char *dll_path; 179 char *pslash; 180 char *bin = "\\bin"; 181 char home_dir[MAX_PATH]; 182 183 if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) { 184 os::jvm_path(home_dir, sizeof(home_dir)); 185 // Found the full path to jvm[_g].dll. 186 // Now cut the path to <java_home>/jre if we can. 187 *(strrchr(home_dir, '\\')) = '\0'; /* get rid of \jvm.dll */ 188 pslash = strrchr(home_dir, '\\'); 189 if (pslash != NULL) { 190 *pslash = '\0'; /* get rid of \{client|server} */ 191 pslash = strrchr(home_dir, '\\'); 192 if (pslash != NULL) 193 *pslash = '\0'; /* get rid of \bin */ 194 } 195 } 196 197 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 198 if (home_path == NULL) 199 return; 200 strcpy(home_path, home_dir); 201 Arguments::set_java_home(home_path); 202 203 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, mtInternal); 204 if (dll_path == NULL) 205 return; 206 strcpy(dll_path, home_dir); 207 strcat(dll_path, bin); 208 Arguments::set_dll_dir(dll_path); 209 210 if (!set_boot_path('\\', ';')) 211 return; 212 } 213 214 /* library_path */ 215 #define EXT_DIR "\\lib\\ext" 216 #define BIN_DIR "\\bin" 217 #define PACKAGE_DIR "\\Sun\\Java" 218 { 219 /* Win32 library search order (See the documentation for LoadLibrary): 220 * 221 * 1. The directory from which application is loaded. 222 * 2. The system wide Java Extensions directory (Java only) 223 * 3. System directory (GetSystemDirectory) 224 * 4. Windows directory (GetWindowsDirectory) 225 * 5. The PATH environment variable 226 * 6. The current directory 227 */ 228 229 char *library_path; 230 char tmp[MAX_PATH]; 231 char *path_str = ::getenv("PATH"); 232 233 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 234 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 235 236 library_path[0] = '\0'; 237 238 GetModuleFileName(NULL, tmp, sizeof(tmp)); 239 *(strrchr(tmp, '\\')) = '\0'; 240 strcat(library_path, tmp); 241 242 GetWindowsDirectory(tmp, sizeof(tmp)); 243 strcat(library_path, ";"); 244 strcat(library_path, tmp); 245 strcat(library_path, PACKAGE_DIR BIN_DIR); 246 247 GetSystemDirectory(tmp, sizeof(tmp)); 248 strcat(library_path, ";"); 249 strcat(library_path, tmp); 250 251 GetWindowsDirectory(tmp, sizeof(tmp)); 252 strcat(library_path, ";"); 253 strcat(library_path, tmp); 254 255 if (path_str) { 256 strcat(library_path, ";"); 257 strcat(library_path, path_str); 258 } 259 260 strcat(library_path, ";."); 261 262 Arguments::set_library_path(library_path); 263 FREE_C_HEAP_ARRAY(char, library_path, mtInternal); 264 } 265 266 /* Default extensions directory */ 267 { 268 char path[MAX_PATH]; 269 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 270 GetWindowsDirectory(path, MAX_PATH); 271 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 272 path, PACKAGE_DIR, EXT_DIR); 273 Arguments::set_ext_dirs(buf); 274 } 275 #undef EXT_DIR 276 #undef BIN_DIR 277 #undef PACKAGE_DIR 278 279 /* Default endorsed standards directory. */ 280 { 281 #define ENDORSED_DIR "\\lib\\endorsed" 282 size_t len = strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR); 283 char * buf = NEW_C_HEAP_ARRAY(char, len, mtInternal); 284 sprintf(buf, "%s%s", Arguments::get_java_home(), ENDORSED_DIR); 285 Arguments::set_endorsed_dirs(buf); 286 #undef ENDORSED_DIR 287 } 288 289 #ifndef _WIN64 290 // set our UnhandledExceptionFilter and save any previous one 291 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 292 #endif 293 294 // Done 295 return; 296 } 297 298 void os::breakpoint() { 299 DebugBreak(); 300 } 301 302 // Invoked from the BREAKPOINT Macro 303 extern "C" void breakpoint() { 304 os::breakpoint(); 305 } 306 307 /* 308 * RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 309 * So far, this method is only used by Native Memory Tracking, which is 310 * only supported on Windows XP or later. 311 */ 312 address os::get_caller_pc(int n) { 313 #ifdef _NMT_NOINLINE_ 314 n ++; 315 #endif 316 address pc; 317 if (os::Kernel32Dll::RtlCaptureStackBackTrace(n + 1, 1, (PVOID*)&pc, NULL) == 1) { 318 return pc; 319 } 320 return NULL; 321 } 322 323 324 // os::current_stack_base() 325 // 326 // Returns the base of the stack, which is the stack's 327 // starting address. This function must be called 328 // while running on the stack of the thread being queried. 329 330 address os::current_stack_base() { 331 MEMORY_BASIC_INFORMATION minfo; 332 address stack_bottom; 333 size_t stack_size; 334 335 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 336 stack_bottom = (address)minfo.AllocationBase; 337 stack_size = minfo.RegionSize; 338 339 // Add up the sizes of all the regions with the same 340 // AllocationBase. 341 while( 1 ) 342 { 343 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 344 if ( stack_bottom == (address)minfo.AllocationBase ) 345 stack_size += minfo.RegionSize; 346 else 347 break; 348 } 349 350 #ifdef _M_IA64 351 // IA64 has memory and register stacks 352 stack_size = stack_size / 2; 353 #endif 354 return stack_bottom + stack_size; 355 } 356 357 size_t os::current_stack_size() { 358 size_t sz; 359 MEMORY_BASIC_INFORMATION minfo; 360 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 361 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 362 return sz; 363 } 364 365 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 366 const struct tm* time_struct_ptr = localtime(clock); 367 if (time_struct_ptr != NULL) { 368 *res = *time_struct_ptr; 369 return res; 370 } 371 return NULL; 372 } 373 374 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 375 376 // Thread start routine for all new Java threads 377 static unsigned __stdcall java_start(Thread* thread) { 378 // Try to randomize the cache line index of hot stack frames. 379 // This helps when threads of the same stack traces evict each other's 380 // cache lines. The threads can be either from the same JVM instance, or 381 // from different JVM instances. The benefit is especially true for 382 // processors with hyperthreading technology. 383 static int counter = 0; 384 int pid = os::current_process_id(); 385 _alloca(((pid ^ counter++) & 7) * 128); 386 387 OSThread* osthr = thread->osthread(); 388 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 389 390 if (UseNUMA) { 391 int lgrp_id = os::numa_get_group_id(); 392 if (lgrp_id != -1) { 393 thread->set_lgrp_id(lgrp_id); 394 } 395 } 396 397 398 // Install a win32 structured exception handler around every thread created 399 // by VM, so VM can genrate error dump when an exception occurred in non- 400 // Java thread (e.g. VM thread). 401 __try { 402 thread->run(); 403 } __except(topLevelExceptionFilter( 404 (_EXCEPTION_POINTERS*)_exception_info())) { 405 // Nothing to do. 406 } 407 408 // One less thread is executing 409 // When the VMThread gets here, the main thread may have already exited 410 // which frees the CodeHeap containing the Atomic::add code 411 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 412 Atomic::dec_ptr((intptr_t*)&os::win32::_os_thread_count); 413 } 414 415 return 0; 416 } 417 418 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, int thread_id) { 419 // Allocate the OSThread object 420 OSThread* osthread = new OSThread(NULL, NULL); 421 if (osthread == NULL) return NULL; 422 423 // Initialize support for Java interrupts 424 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 425 if (interrupt_event == NULL) { 426 delete osthread; 427 return NULL; 428 } 429 osthread->set_interrupt_event(interrupt_event); 430 431 // Store info on the Win32 thread into the OSThread 432 osthread->set_thread_handle(thread_handle); 433 osthread->set_thread_id(thread_id); 434 435 if (UseNUMA) { 436 int lgrp_id = os::numa_get_group_id(); 437 if (lgrp_id != -1) { 438 thread->set_lgrp_id(lgrp_id); 439 } 440 } 441 442 // Initial thread state is INITIALIZED, not SUSPENDED 443 osthread->set_state(INITIALIZED); 444 445 return osthread; 446 } 447 448 449 bool os::create_attached_thread(JavaThread* thread) { 450 #ifdef ASSERT 451 thread->verify_not_published(); 452 #endif 453 HANDLE thread_h; 454 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 455 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 456 fatal("DuplicateHandle failed\n"); 457 } 458 OSThread* osthread = create_os_thread(thread, thread_h, 459 (int)current_thread_id()); 460 if (osthread == NULL) { 461 return false; 462 } 463 464 // Initial thread state is RUNNABLE 465 osthread->set_state(RUNNABLE); 466 467 thread->set_osthread(osthread); 468 return true; 469 } 470 471 bool os::create_main_thread(JavaThread* thread) { 472 #ifdef ASSERT 473 thread->verify_not_published(); 474 #endif 475 if (_starting_thread == NULL) { 476 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 477 if (_starting_thread == NULL) { 478 return false; 479 } 480 } 481 482 // The primordial thread is runnable from the start) 483 _starting_thread->set_state(RUNNABLE); 484 485 thread->set_osthread(_starting_thread); 486 return true; 487 } 488 489 // Allocate and initialize a new OSThread 490 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) { 491 unsigned thread_id; 492 493 // Allocate the OSThread object 494 OSThread* osthread = new OSThread(NULL, NULL); 495 if (osthread == NULL) { 496 return false; 497 } 498 499 // Initialize support for Java interrupts 500 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 501 if (interrupt_event == NULL) { 502 delete osthread; 503 return NULL; 504 } 505 osthread->set_interrupt_event(interrupt_event); 506 osthread->set_interrupted(false); 507 508 thread->set_osthread(osthread); 509 510 if (stack_size == 0) { 511 switch (thr_type) { 512 case os::java_thread: 513 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 514 if (JavaThread::stack_size_at_create() > 0) 515 stack_size = JavaThread::stack_size_at_create(); 516 break; 517 case os::compiler_thread: 518 if (CompilerThreadStackSize > 0) { 519 stack_size = (size_t)(CompilerThreadStackSize * K); 520 break; 521 } // else fall through: 522 // use VMThreadStackSize if CompilerThreadStackSize is not defined 523 case os::vm_thread: 524 case os::pgc_thread: 525 case os::cgc_thread: 526 case os::watcher_thread: 527 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 528 break; 529 } 530 } 531 532 // Create the Win32 thread 533 // 534 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 535 // does not specify stack size. Instead, it specifies the size of 536 // initially committed space. The stack size is determined by 537 // PE header in the executable. If the committed "stack_size" is larger 538 // than default value in the PE header, the stack is rounded up to the 539 // nearest multiple of 1MB. For example if the launcher has default 540 // stack size of 320k, specifying any size less than 320k does not 541 // affect the actual stack size at all, it only affects the initial 542 // commitment. On the other hand, specifying 'stack_size' larger than 543 // default value may cause significant increase in memory usage, because 544 // not only the stack space will be rounded up to MB, but also the 545 // entire space is committed upfront. 546 // 547 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 548 // for CreateThread() that can treat 'stack_size' as stack size. However we 549 // are not supposed to call CreateThread() directly according to MSDN 550 // document because JVM uses C runtime library. The good news is that the 551 // flag appears to work with _beginthredex() as well. 552 553 #ifndef STACK_SIZE_PARAM_IS_A_RESERVATION 554 #define STACK_SIZE_PARAM_IS_A_RESERVATION (0x10000) 555 #endif 556 557 HANDLE thread_handle = 558 (HANDLE)_beginthreadex(NULL, 559 (unsigned)stack_size, 560 (unsigned (__stdcall *)(void*)) java_start, 561 thread, 562 CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 563 &thread_id); 564 if (thread_handle == NULL) { 565 // perhaps STACK_SIZE_PARAM_IS_A_RESERVATION is not supported, try again 566 // without the flag. 567 thread_handle = 568 (HANDLE)_beginthreadex(NULL, 569 (unsigned)stack_size, 570 (unsigned (__stdcall *)(void*)) java_start, 571 thread, 572 CREATE_SUSPENDED, 573 &thread_id); 574 } 575 if (thread_handle == NULL) { 576 // Need to clean up stuff we've allocated so far 577 CloseHandle(osthread->interrupt_event()); 578 thread->set_osthread(NULL); 579 delete osthread; 580 return NULL; 581 } 582 583 Atomic::inc_ptr((intptr_t*)&os::win32::_os_thread_count); 584 585 // Store info on the Win32 thread into the OSThread 586 osthread->set_thread_handle(thread_handle); 587 osthread->set_thread_id(thread_id); 588 589 // Initial thread state is INITIALIZED, not SUSPENDED 590 osthread->set_state(INITIALIZED); 591 592 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 593 return true; 594 } 595 596 597 // Free Win32 resources related to the OSThread 598 void os::free_thread(OSThread* osthread) { 599 assert(osthread != NULL, "osthread not set"); 600 CloseHandle(osthread->thread_handle()); 601 CloseHandle(osthread->interrupt_event()); 602 delete osthread; 603 } 604 605 606 static int has_performance_count = 0; 607 static jlong first_filetime; 608 static jlong initial_performance_count; 609 static jlong performance_frequency; 610 611 612 jlong as_long(LARGE_INTEGER x) { 613 jlong result = 0; // initialization to avoid warning 614 set_high(&result, x.HighPart); 615 set_low(&result, x.LowPart); 616 return result; 617 } 618 619 620 jlong os::elapsed_counter() { 621 LARGE_INTEGER count; 622 if (has_performance_count) { 623 QueryPerformanceCounter(&count); 624 return as_long(count) - initial_performance_count; 625 } else { 626 FILETIME wt; 627 GetSystemTimeAsFileTime(&wt); 628 return (jlong_from(wt.dwHighDateTime, wt.dwLowDateTime) - first_filetime); 629 } 630 } 631 632 633 jlong os::elapsed_frequency() { 634 if (has_performance_count) { 635 return performance_frequency; 636 } else { 637 // the FILETIME time is the number of 100-nanosecond intervals since January 1,1601. 638 return 10000000; 639 } 640 } 641 642 643 julong os::available_memory() { 644 return win32::available_memory(); 645 } 646 647 julong os::win32::available_memory() { 648 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 649 // value if total memory is larger than 4GB 650 MEMORYSTATUSEX ms; 651 ms.dwLength = sizeof(ms); 652 GlobalMemoryStatusEx(&ms); 653 654 return (julong)ms.ullAvailPhys; 655 } 656 657 julong os::physical_memory() { 658 return win32::physical_memory(); 659 } 660 661 julong os::allocatable_physical_memory(julong size) { 662 #ifdef _LP64 663 return size; 664 #else 665 // Limit to 1400m because of the 2gb address space wall 666 return MIN2(size, (julong)1400*M); 667 #endif 668 } 669 670 // VC6 lacks DWORD_PTR 671 #if _MSC_VER < 1300 672 typedef UINT_PTR DWORD_PTR; 673 #endif 674 675 int os::active_processor_count() { 676 DWORD_PTR lpProcessAffinityMask = 0; 677 DWORD_PTR lpSystemAffinityMask = 0; 678 int proc_count = processor_count(); 679 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 680 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 681 // Nof active processors is number of bits in process affinity mask 682 int bitcount = 0; 683 while (lpProcessAffinityMask != 0) { 684 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 685 bitcount++; 686 } 687 return bitcount; 688 } else { 689 return proc_count; 690 } 691 } 692 693 void os::set_native_thread_name(const char *name) { 694 // Not yet implemented. 695 return; 696 } 697 698 bool os::distribute_processes(uint length, uint* distribution) { 699 // Not yet implemented. 700 return false; 701 } 702 703 bool os::bind_to_processor(uint processor_id) { 704 // Not yet implemented. 705 return false; 706 } 707 708 static void initialize_performance_counter() { 709 LARGE_INTEGER count; 710 if (QueryPerformanceFrequency(&count)) { 711 has_performance_count = 1; 712 performance_frequency = as_long(count); 713 QueryPerformanceCounter(&count); 714 initial_performance_count = as_long(count); 715 } else { 716 has_performance_count = 0; 717 FILETIME wt; 718 GetSystemTimeAsFileTime(&wt); 719 first_filetime = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 720 } 721 } 722 723 724 double os::elapsedTime() { 725 return (double) elapsed_counter() / (double) elapsed_frequency(); 726 } 727 728 729 // Windows format: 730 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 731 // Java format: 732 // Java standards require the number of milliseconds since 1/1/1970 733 734 // Constant offset - calculated using offset() 735 static jlong _offset = 116444736000000000; 736 // Fake time counter for reproducible results when debugging 737 static jlong fake_time = 0; 738 739 #ifdef ASSERT 740 // Just to be safe, recalculate the offset in debug mode 741 static jlong _calculated_offset = 0; 742 static int _has_calculated_offset = 0; 743 744 jlong offset() { 745 if (_has_calculated_offset) return _calculated_offset; 746 SYSTEMTIME java_origin; 747 java_origin.wYear = 1970; 748 java_origin.wMonth = 1; 749 java_origin.wDayOfWeek = 0; // ignored 750 java_origin.wDay = 1; 751 java_origin.wHour = 0; 752 java_origin.wMinute = 0; 753 java_origin.wSecond = 0; 754 java_origin.wMilliseconds = 0; 755 FILETIME jot; 756 if (!SystemTimeToFileTime(&java_origin, &jot)) { 757 fatal(err_msg("Error = %d\nWindows error", GetLastError())); 758 } 759 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 760 _has_calculated_offset = 1; 761 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 762 return _calculated_offset; 763 } 764 #else 765 jlong offset() { 766 return _offset; 767 } 768 #endif 769 770 jlong windows_to_java_time(FILETIME wt) { 771 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 772 return (a - offset()) / 10000; 773 } 774 775 FILETIME java_to_windows_time(jlong l) { 776 jlong a = (l * 10000) + offset(); 777 FILETIME result; 778 result.dwHighDateTime = high(a); 779 result.dwLowDateTime = low(a); 780 return result; 781 } 782 783 // For now, we say that Windows does not support vtime. I have no idea 784 // whether it can actually be made to (DLD, 9/13/05). 785 786 bool os::supports_vtime() { return false; } 787 bool os::enable_vtime() { return false; } 788 bool os::vtime_enabled() { return false; } 789 double os::elapsedVTime() { 790 // better than nothing, but not much 791 return elapsedTime(); 792 } 793 794 jlong os::javaTimeMillis() { 795 if (UseFakeTimers) { 796 return fake_time++; 797 } else { 798 FILETIME wt; 799 GetSystemTimeAsFileTime(&wt); 800 return windows_to_java_time(wt); 801 } 802 } 803 804 jlong os::javaTimeNanos() { 805 if (!has_performance_count) { 806 return javaTimeMillis() * NANOSECS_PER_MILLISEC; // the best we can do. 807 } else { 808 LARGE_INTEGER current_count; 809 QueryPerformanceCounter(¤t_count); 810 double current = as_long(current_count); 811 double freq = performance_frequency; 812 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 813 return time; 814 } 815 } 816 817 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 818 if (!has_performance_count) { 819 // javaTimeMillis() doesn't have much percision, 820 // but it is not going to wrap -- so all 64 bits 821 info_ptr->max_value = ALL_64_BITS; 822 823 // this is a wall clock timer, so may skip 824 info_ptr->may_skip_backward = true; 825 info_ptr->may_skip_forward = true; 826 } else { 827 jlong freq = performance_frequency; 828 if (freq < NANOSECS_PER_SEC) { 829 // the performance counter is 64 bits and we will 830 // be multiplying it -- so no wrap in 64 bits 831 info_ptr->max_value = ALL_64_BITS; 832 } else if (freq > NANOSECS_PER_SEC) { 833 // use the max value the counter can reach to 834 // determine the max value which could be returned 835 julong max_counter = (julong)ALL_64_BITS; 836 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 837 } else { 838 // the performance counter is 64 bits and we will 839 // be using it directly -- so no wrap in 64 bits 840 info_ptr->max_value = ALL_64_BITS; 841 } 842 843 // using a counter, so no skipping 844 info_ptr->may_skip_backward = false; 845 info_ptr->may_skip_forward = false; 846 } 847 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 848 } 849 850 char* os::local_time_string(char *buf, size_t buflen) { 851 SYSTEMTIME st; 852 GetLocalTime(&st); 853 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 854 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 855 return buf; 856 } 857 858 bool os::getTimesSecs(double* process_real_time, 859 double* process_user_time, 860 double* process_system_time) { 861 HANDLE h_process = GetCurrentProcess(); 862 FILETIME create_time, exit_time, kernel_time, user_time; 863 BOOL result = GetProcessTimes(h_process, 864 &create_time, 865 &exit_time, 866 &kernel_time, 867 &user_time); 868 if (result != 0) { 869 FILETIME wt; 870 GetSystemTimeAsFileTime(&wt); 871 jlong rtc_millis = windows_to_java_time(wt); 872 jlong user_millis = windows_to_java_time(user_time); 873 jlong system_millis = windows_to_java_time(kernel_time); 874 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 875 *process_user_time = ((double) user_millis) / ((double) MILLIUNITS); 876 *process_system_time = ((double) system_millis) / ((double) MILLIUNITS); 877 return true; 878 } else { 879 return false; 880 } 881 } 882 883 void os::shutdown() { 884 885 // allow PerfMemory to attempt cleanup of any persistent resources 886 perfMemory_exit(); 887 888 // flush buffered output, finish log files 889 ostream_abort(); 890 891 // Check for abort hook 892 abort_hook_t abort_hook = Arguments::abort_hook(); 893 if (abort_hook != NULL) { 894 abort_hook(); 895 } 896 } 897 898 899 static BOOL (WINAPI *_MiniDumpWriteDump) ( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 900 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION); 901 902 void os::check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize) { 903 HINSTANCE dbghelp; 904 EXCEPTION_POINTERS ep; 905 MINIDUMP_EXCEPTION_INFORMATION mei; 906 MINIDUMP_EXCEPTION_INFORMATION* pmei; 907 908 HANDLE hProcess = GetCurrentProcess(); 909 DWORD processId = GetCurrentProcessId(); 910 HANDLE dumpFile; 911 MINIDUMP_TYPE dumpType; 912 static const char* cwd; 913 914 // If running on a client version of Windows and user has not explicitly enabled dumping 915 if (!os::win32::is_windows_server() && !CreateMinidumpOnCrash) { 916 VMError::report_coredump_status("Minidumps are not enabled by default on client versions of Windows", false); 917 return; 918 // If running on a server version of Windows and user has explictly disabled dumping 919 } else if (os::win32::is_windows_server() && !FLAG_IS_DEFAULT(CreateMinidumpOnCrash) && !CreateMinidumpOnCrash) { 920 VMError::report_coredump_status("Minidump has been disabled from the command line", false); 921 return; 922 } 923 924 dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0); 925 926 if (dbghelp == NULL) { 927 VMError::report_coredump_status("Failed to load dbghelp.dll", false); 928 return; 929 } 930 931 _MiniDumpWriteDump = CAST_TO_FN_PTR( 932 BOOL(WINAPI *)( HANDLE, DWORD, HANDLE, MINIDUMP_TYPE, PMINIDUMP_EXCEPTION_INFORMATION, 933 PMINIDUMP_USER_STREAM_INFORMATION, PMINIDUMP_CALLBACK_INFORMATION), 934 GetProcAddress(dbghelp, "MiniDumpWriteDump")); 935 936 if (_MiniDumpWriteDump == NULL) { 937 VMError::report_coredump_status("Failed to find MiniDumpWriteDump() in module dbghelp.dll", false); 938 return; 939 } 940 941 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData); 942 943 // Older versions of dbghelp.h doesn't contain all the dumptypes we want, dbghelp.h with 944 // API_VERSION_NUMBER 11 or higher contains the ones we want though 945 #if API_VERSION_NUMBER >= 11 946 dumpType = (MINIDUMP_TYPE)(dumpType | MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | 947 MiniDumpWithUnloadedModules); 948 #endif 949 950 cwd = get_current_directory(NULL, 0); 951 jio_snprintf(buffer, bufferSize, "%s\\hs_err_pid%u.mdmp",cwd, current_process_id()); 952 dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); 953 954 if (dumpFile == INVALID_HANDLE_VALUE) { 955 VMError::report_coredump_status("Failed to create file for dumping", false); 956 return; 957 } 958 if (exceptionRecord != NULL && contextRecord != NULL) { 959 ep.ContextRecord = (PCONTEXT) contextRecord; 960 ep.ExceptionRecord = (PEXCEPTION_RECORD) exceptionRecord; 961 962 mei.ThreadId = GetCurrentThreadId(); 963 mei.ExceptionPointers = &ep; 964 pmei = &mei; 965 } else { 966 pmei = NULL; 967 } 968 969 970 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 971 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 972 if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false && 973 _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) { 974 VMError::report_coredump_status("Call to MiniDumpWriteDump() failed", false); 975 } else { 976 VMError::report_coredump_status(buffer, true); 977 } 978 979 CloseHandle(dumpFile); 980 } 981 982 983 984 void os::abort(bool dump_core) 985 { 986 os::shutdown(); 987 // no core dump on Windows 988 ::exit(1); 989 } 990 991 // Die immediately, no exit hook, no abort hook, no cleanup. 992 void os::die() { 993 _exit(-1); 994 } 995 996 // Directory routines copied from src/win32/native/java/io/dirent_md.c 997 // * dirent_md.c 1.15 00/02/02 998 // 999 // The declarations for DIR and struct dirent are in jvm_win32.h. 1000 1001 /* Caller must have already run dirname through JVM_NativePath, which removes 1002 duplicate slashes and converts all instances of '/' into '\\'. */ 1003 1004 DIR * 1005 os::opendir(const char *dirname) 1006 { 1007 assert(dirname != NULL, "just checking"); // hotspot change 1008 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1009 DWORD fattr; // hotspot change 1010 char alt_dirname[4] = { 0, 0, 0, 0 }; 1011 1012 if (dirp == 0) { 1013 errno = ENOMEM; 1014 return 0; 1015 } 1016 1017 /* 1018 * Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1019 * as a directory in FindFirstFile(). We detect this case here and 1020 * prepend the current drive name. 1021 */ 1022 if (dirname[1] == '\0' && dirname[0] == '\\') { 1023 alt_dirname[0] = _getdrive() + 'A' - 1; 1024 alt_dirname[1] = ':'; 1025 alt_dirname[2] = '\\'; 1026 alt_dirname[3] = '\0'; 1027 dirname = alt_dirname; 1028 } 1029 1030 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1031 if (dirp->path == 0) { 1032 free(dirp, mtInternal); 1033 errno = ENOMEM; 1034 return 0; 1035 } 1036 strcpy(dirp->path, dirname); 1037 1038 fattr = GetFileAttributes(dirp->path); 1039 if (fattr == 0xffffffff) { 1040 free(dirp->path, mtInternal); 1041 free(dirp, mtInternal); 1042 errno = ENOENT; 1043 return 0; 1044 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1045 free(dirp->path, mtInternal); 1046 free(dirp, mtInternal); 1047 errno = ENOTDIR; 1048 return 0; 1049 } 1050 1051 /* Append "*.*", or possibly "\\*.*", to path */ 1052 if (dirp->path[1] == ':' 1053 && (dirp->path[2] == '\0' 1054 || (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1055 /* No '\\' needed for cases like "Z:" or "Z:\" */ 1056 strcat(dirp->path, "*.*"); 1057 } else { 1058 strcat(dirp->path, "\\*.*"); 1059 } 1060 1061 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1062 if (dirp->handle == INVALID_HANDLE_VALUE) { 1063 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1064 free(dirp->path, mtInternal); 1065 free(dirp, mtInternal); 1066 errno = EACCES; 1067 return 0; 1068 } 1069 } 1070 return dirp; 1071 } 1072 1073 /* parameter dbuf unused on Windows */ 1074 1075 struct dirent * 1076 os::readdir(DIR *dirp, dirent *dbuf) 1077 { 1078 assert(dirp != NULL, "just checking"); // hotspot change 1079 if (dirp->handle == INVALID_HANDLE_VALUE) { 1080 return 0; 1081 } 1082 1083 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1084 1085 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1086 if (GetLastError() == ERROR_INVALID_HANDLE) { 1087 errno = EBADF; 1088 return 0; 1089 } 1090 FindClose(dirp->handle); 1091 dirp->handle = INVALID_HANDLE_VALUE; 1092 } 1093 1094 return &dirp->dirent; 1095 } 1096 1097 int 1098 os::closedir(DIR *dirp) 1099 { 1100 assert(dirp != NULL, "just checking"); // hotspot change 1101 if (dirp->handle != INVALID_HANDLE_VALUE) { 1102 if (!FindClose(dirp->handle)) { 1103 errno = EBADF; 1104 return -1; 1105 } 1106 dirp->handle = INVALID_HANDLE_VALUE; 1107 } 1108 free(dirp->path, mtInternal); 1109 free(dirp, mtInternal); 1110 return 0; 1111 } 1112 1113 // This must be hard coded because it's the system's temporary 1114 // directory not the java application's temp directory, ala java.io.tmpdir. 1115 const char* os::get_temp_directory() { 1116 static char path_buf[MAX_PATH]; 1117 if (GetTempPath(MAX_PATH, path_buf)>0) 1118 return path_buf; 1119 else{ 1120 path_buf[0]='\0'; 1121 return path_buf; 1122 } 1123 } 1124 1125 static bool file_exists(const char* filename) { 1126 if (filename == NULL || strlen(filename) == 0) { 1127 return false; 1128 } 1129 return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES; 1130 } 1131 1132 bool os::dll_build_name(char *buffer, size_t buflen, 1133 const char* pname, const char* fname) { 1134 bool retval = false; 1135 const size_t pnamelen = pname ? strlen(pname) : 0; 1136 const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0; 1137 1138 // Return error on buffer overflow. 1139 if (pnamelen + strlen(fname) + 10 > buflen) { 1140 return retval; 1141 } 1142 1143 if (pnamelen == 0) { 1144 jio_snprintf(buffer, buflen, "%s.dll", fname); 1145 retval = true; 1146 } else if (c == ':' || c == '\\') { 1147 jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname); 1148 retval = true; 1149 } else if (strchr(pname, *os::path_separator()) != NULL) { 1150 int n; 1151 char** pelements = split_path(pname, &n); 1152 for (int i = 0 ; i < n ; i++) { 1153 char* path = pelements[i]; 1154 // Really shouldn't be NULL, but check can't hurt 1155 size_t plen = (path == NULL) ? 0 : strlen(path); 1156 if (plen == 0) { 1157 continue; // skip the empty path values 1158 } 1159 const char lastchar = path[plen - 1]; 1160 if (lastchar == ':' || lastchar == '\\') { 1161 jio_snprintf(buffer, buflen, "%s%s.dll", path, fname); 1162 } else { 1163 jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname); 1164 } 1165 if (file_exists(buffer)) { 1166 retval = true; 1167 break; 1168 } 1169 } 1170 // release the storage 1171 for (int i = 0 ; i < n ; i++) { 1172 if (pelements[i] != NULL) { 1173 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal); 1174 } 1175 } 1176 if (pelements != NULL) { 1177 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal); 1178 } 1179 } else { 1180 jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname); 1181 retval = true; 1182 } 1183 return retval; 1184 } 1185 1186 // Needs to be in os specific directory because windows requires another 1187 // header file <direct.h> 1188 const char* os::get_current_directory(char *buf, int buflen) { 1189 return _getcwd(buf, buflen); 1190 } 1191 1192 //----------------------------------------------------------- 1193 // Helper functions for fatal error handler 1194 #ifdef _WIN64 1195 // Helper routine which returns true if address in 1196 // within the NTDLL address space. 1197 // 1198 static bool _addr_in_ntdll( address addr ) 1199 { 1200 HMODULE hmod; 1201 MODULEINFO minfo; 1202 1203 hmod = GetModuleHandle("NTDLL.DLL"); 1204 if ( hmod == NULL ) return false; 1205 if ( !os::PSApiDll::GetModuleInformation( GetCurrentProcess(), hmod, 1206 &minfo, sizeof(MODULEINFO)) ) 1207 return false; 1208 1209 if ( (addr >= minfo.lpBaseOfDll) && 1210 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) 1211 return true; 1212 else 1213 return false; 1214 } 1215 #endif 1216 1217 1218 // Enumerate all modules for a given process ID 1219 // 1220 // Notice that Windows 95/98/Me and Windows NT/2000/XP have 1221 // different API for doing this. We use PSAPI.DLL on NT based 1222 // Windows and ToolHelp on 95/98/Me. 1223 1224 // Callback function that is called by enumerate_modules() on 1225 // every DLL module. 1226 // Input parameters: 1227 // int pid, 1228 // char* module_file_name, 1229 // address module_base_addr, 1230 // unsigned module_size, 1231 // void* param 1232 typedef int (*EnumModulesCallbackFunc)(int, char *, address, unsigned, void *); 1233 1234 // enumerate_modules for Windows NT, using PSAPI 1235 static int _enumerate_modules_winnt( int pid, EnumModulesCallbackFunc func, void * param) 1236 { 1237 HANDLE hProcess ; 1238 1239 # define MAX_NUM_MODULES 128 1240 HMODULE modules[MAX_NUM_MODULES]; 1241 static char filename[ MAX_PATH ]; 1242 int result = 0; 1243 1244 if (!os::PSApiDll::PSApiAvailable()) { 1245 return 0; 1246 } 1247 1248 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1249 FALSE, pid ) ; 1250 if (hProcess == NULL) return 0; 1251 1252 DWORD size_needed; 1253 if (!os::PSApiDll::EnumProcessModules(hProcess, modules, 1254 sizeof(modules), &size_needed)) { 1255 CloseHandle( hProcess ); 1256 return 0; 1257 } 1258 1259 // number of modules that are currently loaded 1260 int num_modules = size_needed / sizeof(HMODULE); 1261 1262 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1263 // Get Full pathname: 1264 if(!os::PSApiDll::GetModuleFileNameEx(hProcess, modules[i], 1265 filename, sizeof(filename))) { 1266 filename[0] = '\0'; 1267 } 1268 1269 MODULEINFO modinfo; 1270 if (!os::PSApiDll::GetModuleInformation(hProcess, modules[i], 1271 &modinfo, sizeof(modinfo))) { 1272 modinfo.lpBaseOfDll = NULL; 1273 modinfo.SizeOfImage = 0; 1274 } 1275 1276 // Invoke callback function 1277 result = func(pid, filename, (address)modinfo.lpBaseOfDll, 1278 modinfo.SizeOfImage, param); 1279 if (result) break; 1280 } 1281 1282 CloseHandle( hProcess ) ; 1283 return result; 1284 } 1285 1286 1287 // enumerate_modules for Windows 95/98/ME, using TOOLHELP 1288 static int _enumerate_modules_windows( int pid, EnumModulesCallbackFunc func, void *param) 1289 { 1290 HANDLE hSnapShot ; 1291 static MODULEENTRY32 modentry ; 1292 int result = 0; 1293 1294 if (!os::Kernel32Dll::HelpToolsAvailable()) { 1295 return 0; 1296 } 1297 1298 // Get a handle to a Toolhelp snapshot of the system 1299 hSnapShot = os::Kernel32Dll::CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, pid ) ; 1300 if( hSnapShot == INVALID_HANDLE_VALUE ) { 1301 return FALSE ; 1302 } 1303 1304 // iterate through all modules 1305 modentry.dwSize = sizeof(MODULEENTRY32) ; 1306 bool not_done = os::Kernel32Dll::Module32First( hSnapShot, &modentry ) != 0; 1307 1308 while( not_done ) { 1309 // invoke the callback 1310 result=func(pid, modentry.szExePath, (address)modentry.modBaseAddr, 1311 modentry.modBaseSize, param); 1312 if (result) break; 1313 1314 modentry.dwSize = sizeof(MODULEENTRY32) ; 1315 not_done = os::Kernel32Dll::Module32Next( hSnapShot, &modentry ) != 0; 1316 } 1317 1318 CloseHandle(hSnapShot); 1319 return result; 1320 } 1321 1322 int enumerate_modules( int pid, EnumModulesCallbackFunc func, void * param ) 1323 { 1324 // Get current process ID if caller doesn't provide it. 1325 if (!pid) pid = os::current_process_id(); 1326 1327 if (os::win32::is_nt()) return _enumerate_modules_winnt (pid, func, param); 1328 else return _enumerate_modules_windows(pid, func, param); 1329 } 1330 1331 struct _modinfo { 1332 address addr; 1333 char* full_path; // point to a char buffer 1334 int buflen; // size of the buffer 1335 address base_addr; 1336 }; 1337 1338 static int _locate_module_by_addr(int pid, char * mod_fname, address base_addr, 1339 unsigned size, void * param) { 1340 struct _modinfo *pmod = (struct _modinfo *)param; 1341 if (!pmod) return -1; 1342 1343 if (base_addr <= pmod->addr && 1344 base_addr+size > pmod->addr) { 1345 // if a buffer is provided, copy path name to the buffer 1346 if (pmod->full_path) { 1347 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1348 } 1349 pmod->base_addr = base_addr; 1350 return 1; 1351 } 1352 return 0; 1353 } 1354 1355 bool os::dll_address_to_library_name(address addr, char* buf, 1356 int buflen, int* offset) { 1357 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1358 // return the full path to the DLL file, sometimes it returns path 1359 // to the corresponding PDB file (debug info); sometimes it only 1360 // returns partial path, which makes life painful. 1361 1362 struct _modinfo mi; 1363 mi.addr = addr; 1364 mi.full_path = buf; 1365 mi.buflen = buflen; 1366 int pid = os::current_process_id(); 1367 if (enumerate_modules(pid, _locate_module_by_addr, (void *)&mi)) { 1368 // buf already contains path name 1369 if (offset) *offset = addr - mi.base_addr; 1370 return true; 1371 } else { 1372 if (buf) buf[0] = '\0'; 1373 if (offset) *offset = -1; 1374 return false; 1375 } 1376 } 1377 1378 bool os::dll_address_to_function_name(address addr, char *buf, 1379 int buflen, int *offset) { 1380 if (Decoder::decode(addr, buf, buflen, offset)) { 1381 return true; 1382 } 1383 if (offset != NULL) *offset = -1; 1384 if (buf != NULL) buf[0] = '\0'; 1385 return false; 1386 } 1387 1388 // save the start and end address of jvm.dll into param[0] and param[1] 1389 static int _locate_jvm_dll(int pid, char* mod_fname, address base_addr, 1390 unsigned size, void * param) { 1391 if (!param) return -1; 1392 1393 if (base_addr <= (address)_locate_jvm_dll && 1394 base_addr+size > (address)_locate_jvm_dll) { 1395 ((address*)param)[0] = base_addr; 1396 ((address*)param)[1] = base_addr + size; 1397 return 1; 1398 } 1399 return 0; 1400 } 1401 1402 address vm_lib_location[2]; // start and end address of jvm.dll 1403 1404 // check if addr is inside jvm.dll 1405 bool os::address_is_in_vm(address addr) { 1406 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1407 int pid = os::current_process_id(); 1408 if (!enumerate_modules(pid, _locate_jvm_dll, (void *)vm_lib_location)) { 1409 assert(false, "Can't find jvm module."); 1410 return false; 1411 } 1412 } 1413 1414 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1415 } 1416 1417 // print module info; param is outputStream* 1418 static int _print_module(int pid, char* fname, address base, 1419 unsigned size, void* param) { 1420 if (!param) return -1; 1421 1422 outputStream* st = (outputStream*)param; 1423 1424 address end_addr = base + size; 1425 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base, end_addr, fname); 1426 return 0; 1427 } 1428 1429 // Loads .dll/.so and 1430 // in case of error it checks if .dll/.so was built for the 1431 // same architecture as Hotspot is running on 1432 void * os::dll_load(const char *name, char *ebuf, int ebuflen) 1433 { 1434 void * result = LoadLibrary(name); 1435 if (result != NULL) 1436 { 1437 return result; 1438 } 1439 1440 DWORD errcode = GetLastError(); 1441 if (errcode == ERROR_MOD_NOT_FOUND) { 1442 strncpy(ebuf, "Can't find dependent libraries", ebuflen-1); 1443 ebuf[ebuflen-1]='\0'; 1444 return NULL; 1445 } 1446 1447 // Parsing dll below 1448 // If we can read dll-info and find that dll was built 1449 // for an architecture other than Hotspot is running in 1450 // - then print to buffer "DLL was built for a different architecture" 1451 // else call os::lasterror to obtain system error message 1452 1453 // Read system error message into ebuf 1454 // It may or may not be overwritten below (in the for loop and just above) 1455 lasterror(ebuf, (size_t) ebuflen); 1456 ebuf[ebuflen-1]='\0'; 1457 int file_descriptor=::open(name, O_RDONLY | O_BINARY, 0); 1458 if (file_descriptor<0) 1459 { 1460 return NULL; 1461 } 1462 1463 uint32_t signature_offset; 1464 uint16_t lib_arch=0; 1465 bool failed_to_get_lib_arch= 1466 ( 1467 //Go to position 3c in the dll 1468 (os::seek_to_file_offset(file_descriptor,IMAGE_FILE_PTR_TO_SIGNATURE)<0) 1469 || 1470 // Read loacation of signature 1471 (sizeof(signature_offset)!= 1472 (os::read(file_descriptor, (void*)&signature_offset,sizeof(signature_offset)))) 1473 || 1474 //Go to COFF File Header in dll 1475 //that is located after"signature" (4 bytes long) 1476 (os::seek_to_file_offset(file_descriptor, 1477 signature_offset+IMAGE_FILE_SIGNATURE_LENGTH)<0) 1478 || 1479 //Read field that contains code of architecture 1480 // that dll was build for 1481 (sizeof(lib_arch)!= 1482 (os::read(file_descriptor, (void*)&lib_arch,sizeof(lib_arch)))) 1483 ); 1484 1485 ::close(file_descriptor); 1486 if (failed_to_get_lib_arch) 1487 { 1488 // file i/o error - report os::lasterror(...) msg 1489 return NULL; 1490 } 1491 1492 typedef struct 1493 { 1494 uint16_t arch_code; 1495 char* arch_name; 1496 } arch_t; 1497 1498 static const arch_t arch_array[]={ 1499 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1500 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"}, 1501 {IMAGE_FILE_MACHINE_IA64, (char*)"IA 64"} 1502 }; 1503 #if (defined _M_IA64) 1504 static const uint16_t running_arch=IMAGE_FILE_MACHINE_IA64; 1505 #elif (defined _M_AMD64) 1506 static const uint16_t running_arch=IMAGE_FILE_MACHINE_AMD64; 1507 #elif (defined _M_IX86) 1508 static const uint16_t running_arch=IMAGE_FILE_MACHINE_I386; 1509 #else 1510 #error Method os::dll_load requires that one of following \ 1511 is defined :_M_IA64,_M_AMD64 or _M_IX86 1512 #endif 1513 1514 1515 // Obtain a string for printf operation 1516 // lib_arch_str shall contain string what platform this .dll was built for 1517 // running_arch_str shall string contain what platform Hotspot was built for 1518 char *running_arch_str=NULL,*lib_arch_str=NULL; 1519 for (unsigned int i=0;i<ARRAY_SIZE(arch_array);i++) 1520 { 1521 if (lib_arch==arch_array[i].arch_code) 1522 lib_arch_str=arch_array[i].arch_name; 1523 if (running_arch==arch_array[i].arch_code) 1524 running_arch_str=arch_array[i].arch_name; 1525 } 1526 1527 assert(running_arch_str, 1528 "Didn't find runing architecture code in arch_array"); 1529 1530 // If the architure is right 1531 // but some other error took place - report os::lasterror(...) msg 1532 if (lib_arch == running_arch) 1533 { 1534 return NULL; 1535 } 1536 1537 if (lib_arch_str!=NULL) 1538 { 1539 ::_snprintf(ebuf, ebuflen-1, 1540 "Can't load %s-bit .dll on a %s-bit platform", 1541 lib_arch_str,running_arch_str); 1542 } 1543 else 1544 { 1545 // don't know what architecture this dll was build for 1546 ::_snprintf(ebuf, ebuflen-1, 1547 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1548 lib_arch,running_arch_str); 1549 } 1550 1551 return NULL; 1552 } 1553 1554 1555 void os::print_dll_info(outputStream *st) { 1556 int pid = os::current_process_id(); 1557 st->print_cr("Dynamic libraries:"); 1558 enumerate_modules(pid, _print_module, (void *)st); 1559 } 1560 1561 void os::print_os_info_brief(outputStream* st) { 1562 os::print_os_info(st); 1563 } 1564 1565 void os::print_os_info(outputStream* st) { 1566 st->print("OS:"); 1567 1568 os::win32::print_windows_version(st); 1569 } 1570 1571 void os::win32::print_windows_version(outputStream* st) { 1572 OSVERSIONINFOEX osvi; 1573 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1574 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1575 1576 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1577 st->print_cr("N/A"); 1578 return; 1579 } 1580 1581 int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion; 1582 if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) { 1583 switch (os_vers) { 1584 case 3051: st->print(" Windows NT 3.51"); break; 1585 case 4000: st->print(" Windows NT 4.0"); break; 1586 case 5000: st->print(" Windows 2000"); break; 1587 case 5001: st->print(" Windows XP"); break; 1588 case 5002: 1589 case 6000: 1590 case 6001: 1591 case 6002: { 1592 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1593 // find out whether we are running on 64 bit processor or not. 1594 SYSTEM_INFO si; 1595 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1596 if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){ 1597 GetSystemInfo(&si); 1598 } else { 1599 os::Kernel32Dll::GetNativeSystemInfo(&si); 1600 } 1601 if (os_vers == 5002) { 1602 if (osvi.wProductType == VER_NT_WORKSTATION && 1603 si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 1604 st->print(" Windows XP x64 Edition"); 1605 else 1606 st->print(" Windows Server 2003 family"); 1607 } else if (os_vers == 6000) { 1608 if (osvi.wProductType == VER_NT_WORKSTATION) 1609 st->print(" Windows Vista"); 1610 else 1611 st->print(" Windows Server 2008"); 1612 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 1613 st->print(" , 64 bit"); 1614 } else if (os_vers == 6001) { 1615 if (osvi.wProductType == VER_NT_WORKSTATION) { 1616 st->print(" Windows 7"); 1617 } else { 1618 // Unrecognized windows, print out its major and minor versions 1619 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1620 } 1621 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 1622 st->print(" , 64 bit"); 1623 } else if (os_vers == 6002) { 1624 if (osvi.wProductType == VER_NT_WORKSTATION) { 1625 st->print(" Windows 8"); 1626 } else { 1627 st->print(" Windows Server 2012"); 1628 } 1629 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 1630 st->print(" , 64 bit"); 1631 } else { // future os 1632 // Unrecognized windows, print out its major and minor versions 1633 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1634 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 1635 st->print(" , 64 bit"); 1636 } 1637 break; 1638 } 1639 default: // future windows, print out its major and minor versions 1640 st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1641 } 1642 } else { 1643 switch (os_vers) { 1644 case 4000: st->print(" Windows 95"); break; 1645 case 4010: st->print(" Windows 98"); break; 1646 case 4090: st->print(" Windows Me"); break; 1647 default: // future windows, print out its major and minor versions 1648 st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 1649 } 1650 } 1651 st->print(" Build %d", osvi.dwBuildNumber); 1652 st->print(" %s", osvi.szCSDVersion); // service pack 1653 st->cr(); 1654 } 1655 1656 void os::pd_print_cpu_info(outputStream* st) { 1657 // Nothing to do for now. 1658 } 1659 1660 void os::print_memory_info(outputStream* st) { 1661 st->print("Memory:"); 1662 st->print(" %dk page", os::vm_page_size()>>10); 1663 1664 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1665 // value if total memory is larger than 4GB 1666 MEMORYSTATUSEX ms; 1667 ms.dwLength = sizeof(ms); 1668 GlobalMemoryStatusEx(&ms); 1669 1670 st->print(", physical %uk", os::physical_memory() >> 10); 1671 st->print("(%uk free)", os::available_memory() >> 10); 1672 1673 st->print(", swap %uk", ms.ullTotalPageFile >> 10); 1674 st->print("(%uk free)", ms.ullAvailPageFile >> 10); 1675 st->cr(); 1676 } 1677 1678 void os::print_siginfo(outputStream *st, void *siginfo) { 1679 EXCEPTION_RECORD* er = (EXCEPTION_RECORD*)siginfo; 1680 st->print("siginfo:"); 1681 st->print(" ExceptionCode=0x%x", er->ExceptionCode); 1682 1683 if (er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 1684 er->NumberParameters >= 2) { 1685 switch (er->ExceptionInformation[0]) { 1686 case 0: st->print(", reading address"); break; 1687 case 1: st->print(", writing address"); break; 1688 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1689 er->ExceptionInformation[0]); 1690 } 1691 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1692 } else if (er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR && 1693 er->NumberParameters >= 2 && UseSharedSpaces) { 1694 FileMapInfo* mapinfo = FileMapInfo::current_info(); 1695 if (mapinfo->is_in_shared_space((void*)er->ExceptionInformation[1])) { 1696 st->print("\n\nError accessing class data sharing archive." \ 1697 " Mapped file inaccessible during execution, " \ 1698 " possible disk/network problem."); 1699 } 1700 } else { 1701 int num = er->NumberParameters; 1702 if (num > 0) { 1703 st->print(", ExceptionInformation="); 1704 for (int i = 0; i < num; i++) { 1705 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1706 } 1707 } 1708 } 1709 st->cr(); 1710 } 1711 1712 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1713 // do nothing 1714 } 1715 1716 static char saved_jvm_path[MAX_PATH] = {0}; 1717 1718 // Find the full path to the current module, jvm.dll or jvm_g.dll 1719 void os::jvm_path(char *buf, jint buflen) { 1720 // Error checking. 1721 if (buflen < MAX_PATH) { 1722 assert(false, "must use a large-enough buffer"); 1723 buf[0] = '\0'; 1724 return; 1725 } 1726 // Lazy resolve the path to current module. 1727 if (saved_jvm_path[0] != 0) { 1728 strcpy(buf, saved_jvm_path); 1729 return; 1730 } 1731 1732 buf[0] = '\0'; 1733 if (Arguments::created_by_gamma_launcher()) { 1734 // Support for the gamma launcher. Check for an 1735 // JAVA_HOME environment variable 1736 // and fix up the path so it looks like 1737 // libjvm.so is installed there (append a fake suffix 1738 // hotspot/libjvm.so). 1739 char* java_home_var = ::getenv("JAVA_HOME"); 1740 if (java_home_var != NULL && java_home_var[0] != 0) { 1741 1742 strncpy(buf, java_home_var, buflen); 1743 1744 // determine if this is a legacy image or modules image 1745 // modules image doesn't have "jre" subdirectory 1746 size_t len = strlen(buf); 1747 char* jrebin_p = buf + len; 1748 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1749 if (0 != _access(buf, 0)) { 1750 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1751 } 1752 len = strlen(buf); 1753 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1754 } 1755 } 1756 1757 if(buf[0] == '\0') { 1758 GetModuleFileName(vm_lib_handle, buf, buflen); 1759 } 1760 strcpy(saved_jvm_path, buf); 1761 } 1762 1763 1764 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1765 #ifndef _WIN64 1766 st->print("_"); 1767 #endif 1768 } 1769 1770 1771 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1772 #ifndef _WIN64 1773 st->print("@%d", args_size * sizeof(int)); 1774 #endif 1775 } 1776 1777 // This method is a copy of JDK's sysGetLastErrorString 1778 // from src/windows/hpi/src/system_md.c 1779 1780 size_t os::lasterror(char* buf, size_t len) { 1781 DWORD errval; 1782 1783 if ((errval = GetLastError()) != 0) { 1784 // DOS error 1785 size_t n = (size_t)FormatMessage( 1786 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1787 NULL, 1788 errval, 1789 0, 1790 buf, 1791 (DWORD)len, 1792 NULL); 1793 if (n > 3) { 1794 // Drop final '.', CR, LF 1795 if (buf[n - 1] == '\n') n--; 1796 if (buf[n - 1] == '\r') n--; 1797 if (buf[n - 1] == '.') n--; 1798 buf[n] = '\0'; 1799 } 1800 return n; 1801 } 1802 1803 if (errno != 0) { 1804 // C runtime error that has no corresponding DOS error code 1805 const char* s = strerror(errno); 1806 size_t n = strlen(s); 1807 if (n >= len) n = len - 1; 1808 strncpy(buf, s, n); 1809 buf[n] = '\0'; 1810 return n; 1811 } 1812 1813 return 0; 1814 } 1815 1816 int os::get_last_error() { 1817 DWORD error = GetLastError(); 1818 if (error == 0) 1819 error = errno; 1820 return (int)error; 1821 } 1822 1823 // sun.misc.Signal 1824 // NOTE that this is a workaround for an apparent kernel bug where if 1825 // a signal handler for SIGBREAK is installed then that signal handler 1826 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1827 // See bug 4416763. 1828 static void (*sigbreakHandler)(int) = NULL; 1829 1830 static void UserHandler(int sig, void *siginfo, void *context) { 1831 os::signal_notify(sig); 1832 // We need to reinstate the signal handler each time... 1833 os::signal(sig, (void*)UserHandler); 1834 } 1835 1836 void* os::user_handler() { 1837 return (void*) UserHandler; 1838 } 1839 1840 void* os::signal(int signal_number, void* handler) { 1841 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1842 void (*oldHandler)(int) = sigbreakHandler; 1843 sigbreakHandler = (void (*)(int)) handler; 1844 return (void*) oldHandler; 1845 } else { 1846 return (void*)::signal(signal_number, (void (*)(int))handler); 1847 } 1848 } 1849 1850 void os::signal_raise(int signal_number) { 1851 raise(signal_number); 1852 } 1853 1854 // The Win32 C runtime library maps all console control events other than ^C 1855 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1856 // logoff, and shutdown events. We therefore install our own console handler 1857 // that raises SIGTERM for the latter cases. 1858 // 1859 static BOOL WINAPI consoleHandler(DWORD event) { 1860 switch(event) { 1861 case CTRL_C_EVENT: 1862 if (is_error_reported()) { 1863 // Ctrl-C is pressed during error reporting, likely because the error 1864 // handler fails to abort. Let VM die immediately. 1865 os::die(); 1866 } 1867 1868 os::signal_raise(SIGINT); 1869 return TRUE; 1870 break; 1871 case CTRL_BREAK_EVENT: 1872 if (sigbreakHandler != NULL) { 1873 (*sigbreakHandler)(SIGBREAK); 1874 } 1875 return TRUE; 1876 break; 1877 case CTRL_CLOSE_EVENT: 1878 case CTRL_LOGOFF_EVENT: 1879 case CTRL_SHUTDOWN_EVENT: 1880 os::signal_raise(SIGTERM); 1881 return TRUE; 1882 break; 1883 default: 1884 break; 1885 } 1886 return FALSE; 1887 } 1888 1889 /* 1890 * The following code is moved from os.cpp for making this 1891 * code platform specific, which it is by its very nature. 1892 */ 1893 1894 // Return maximum OS signal used + 1 for internal use only 1895 // Used as exit signal for signal_thread 1896 int os::sigexitnum_pd(){ 1897 return NSIG; 1898 } 1899 1900 // a counter for each possible signal value, including signal_thread exit signal 1901 static volatile jint pending_signals[NSIG+1] = { 0 }; 1902 static HANDLE sig_sem; 1903 1904 void os::signal_init_pd() { 1905 // Initialize signal structures 1906 memset((void*)pending_signals, 0, sizeof(pending_signals)); 1907 1908 sig_sem = ::CreateSemaphore(NULL, 0, NSIG+1, NULL); 1909 1910 // Programs embedding the VM do not want it to attempt to receive 1911 // events like CTRL_LOGOFF_EVENT, which are used to implement the 1912 // shutdown hooks mechanism introduced in 1.3. For example, when 1913 // the VM is run as part of a Windows NT service (i.e., a servlet 1914 // engine in a web server), the correct behavior is for any console 1915 // control handler to return FALSE, not TRUE, because the OS's 1916 // "final" handler for such events allows the process to continue if 1917 // it is a service (while terminating it if it is not a service). 1918 // To make this behavior uniform and the mechanism simpler, we 1919 // completely disable the VM's usage of these console events if -Xrs 1920 // (=ReduceSignalUsage) is specified. This means, for example, that 1921 // the CTRL-BREAK thread dump mechanism is also disabled in this 1922 // case. See bugs 4323062, 4345157, and related bugs. 1923 1924 if (!ReduceSignalUsage) { 1925 // Add a CTRL-C handler 1926 SetConsoleCtrlHandler(consoleHandler, TRUE); 1927 } 1928 } 1929 1930 void os::signal_notify(int signal_number) { 1931 BOOL ret; 1932 1933 Atomic::inc(&pending_signals[signal_number]); 1934 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1935 assert(ret != 0, "ReleaseSemaphore() failed"); 1936 } 1937 1938 static int check_pending_signals(bool wait_for_signal) { 1939 DWORD ret; 1940 while (true) { 1941 for (int i = 0; i < NSIG + 1; i++) { 1942 jint n = pending_signals[i]; 1943 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 1944 return i; 1945 } 1946 } 1947 if (!wait_for_signal) { 1948 return -1; 1949 } 1950 1951 JavaThread *thread = JavaThread::current(); 1952 1953 ThreadBlockInVM tbivm(thread); 1954 1955 bool threadIsSuspended; 1956 do { 1957 thread->set_suspend_equivalent(); 1958 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 1959 ret = ::WaitForSingleObject(sig_sem, INFINITE); 1960 assert(ret == WAIT_OBJECT_0, "WaitForSingleObject() failed"); 1961 1962 // were we externally suspended while we were waiting? 1963 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 1964 if (threadIsSuspended) { 1965 // 1966 // The semaphore has been incremented, but while we were waiting 1967 // another thread suspended us. We don't want to continue running 1968 // while suspended because that would surprise the thread that 1969 // suspended us. 1970 // 1971 ret = ::ReleaseSemaphore(sig_sem, 1, NULL); 1972 assert(ret != 0, "ReleaseSemaphore() failed"); 1973 1974 thread->java_suspend_self(); 1975 } 1976 } while (threadIsSuspended); 1977 } 1978 } 1979 1980 int os::signal_lookup() { 1981 return check_pending_signals(false); 1982 } 1983 1984 int os::signal_wait() { 1985 return check_pending_signals(true); 1986 } 1987 1988 // Implicit OS exception handling 1989 1990 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, address handler) { 1991 JavaThread* thread = JavaThread::current(); 1992 // Save pc in thread 1993 #ifdef _M_IA64 1994 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->StIIP); 1995 // Set pc to handler 1996 exceptionInfo->ContextRecord->StIIP = (DWORD64)handler; 1997 #elif _M_AMD64 1998 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Rip); 1999 // Set pc to handler 2000 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2001 #else 2002 thread->set_saved_exception_pc((address)exceptionInfo->ContextRecord->Eip); 2003 // Set pc to handler 2004 exceptionInfo->ContextRecord->Eip = (LONG)handler; 2005 #endif 2006 2007 // Continue the execution 2008 return EXCEPTION_CONTINUE_EXECUTION; 2009 } 2010 2011 2012 // Used for PostMortemDump 2013 extern "C" void safepoints(); 2014 extern "C" void find(int x); 2015 extern "C" void events(); 2016 2017 // According to Windows API documentation, an illegal instruction sequence should generate 2018 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2019 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2020 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2021 2022 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2023 2024 // From "Execution Protection in the Windows Operating System" draft 0.35 2025 // Once a system header becomes available, the "real" define should be 2026 // included or copied here. 2027 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2028 2029 #define def_excpt(val) #val, val 2030 2031 struct siglabel { 2032 char *name; 2033 int number; 2034 }; 2035 2036 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2037 // C++ compiler contain this error code. Because this is a compiler-generated 2038 // error, the code is not listed in the Win32 API header files. 2039 // The code is actually a cryptic mnemonic device, with the initial "E" 2040 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2041 // ASCII values of "msc". 2042 2043 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2044 2045 2046 struct siglabel exceptlabels[] = { 2047 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2048 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2049 def_excpt(EXCEPTION_BREAKPOINT), 2050 def_excpt(EXCEPTION_SINGLE_STEP), 2051 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2052 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2053 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2054 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2055 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2056 def_excpt(EXCEPTION_FLT_OVERFLOW), 2057 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2058 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2059 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2060 def_excpt(EXCEPTION_INT_OVERFLOW), 2061 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2062 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2063 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2064 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2065 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2066 def_excpt(EXCEPTION_STACK_OVERFLOW), 2067 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2068 def_excpt(EXCEPTION_GUARD_PAGE), 2069 def_excpt(EXCEPTION_INVALID_HANDLE), 2070 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2071 NULL, 0 2072 }; 2073 2074 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2075 for (int i = 0; exceptlabels[i].name != NULL; i++) { 2076 if (exceptlabels[i].number == exception_code) { 2077 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2078 return buf; 2079 } 2080 } 2081 2082 return NULL; 2083 } 2084 2085 //----------------------------------------------------------------------------- 2086 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2087 // handle exception caused by idiv; should only happen for -MinInt/-1 2088 // (division by zero is handled explicitly) 2089 #ifdef _M_IA64 2090 assert(0, "Fix Handle_IDiv_Exception"); 2091 #elif _M_AMD64 2092 PCONTEXT ctx = exceptionInfo->ContextRecord; 2093 address pc = (address)ctx->Rip; 2094 assert(pc[0] == 0xF7, "not an idiv opcode"); 2095 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2096 assert(ctx->Rax == min_jint, "unexpected idiv exception"); 2097 // set correct result values and continue after idiv instruction 2098 ctx->Rip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2099 ctx->Rax = (DWORD)min_jint; // result 2100 ctx->Rdx = (DWORD)0; // remainder 2101 // Continue the execution 2102 #else 2103 PCONTEXT ctx = exceptionInfo->ContextRecord; 2104 address pc = (address)ctx->Eip; 2105 assert(pc[0] == 0xF7, "not an idiv opcode"); 2106 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2107 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2108 // set correct result values and continue after idiv instruction 2109 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2110 ctx->Eax = (DWORD)min_jint; // result 2111 ctx->Edx = (DWORD)0; // remainder 2112 // Continue the execution 2113 #endif 2114 return EXCEPTION_CONTINUE_EXECUTION; 2115 } 2116 2117 #ifndef _WIN64 2118 //----------------------------------------------------------------------------- 2119 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2120 // handle exception caused by native method modifying control word 2121 PCONTEXT ctx = exceptionInfo->ContextRecord; 2122 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2123 2124 switch (exception_code) { 2125 case EXCEPTION_FLT_DENORMAL_OPERAND: 2126 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2127 case EXCEPTION_FLT_INEXACT_RESULT: 2128 case EXCEPTION_FLT_INVALID_OPERATION: 2129 case EXCEPTION_FLT_OVERFLOW: 2130 case EXCEPTION_FLT_STACK_CHECK: 2131 case EXCEPTION_FLT_UNDERFLOW: 2132 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2133 if (fp_control_word != ctx->FloatSave.ControlWord) { 2134 // Restore FPCW and mask out FLT exceptions 2135 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2136 // Mask out pending FLT exceptions 2137 ctx->FloatSave.StatusWord &= 0xffffff00; 2138 return EXCEPTION_CONTINUE_EXECUTION; 2139 } 2140 } 2141 2142 if (prev_uef_handler != NULL) { 2143 // We didn't handle this exception so pass it to the previous 2144 // UnhandledExceptionFilter. 2145 return (prev_uef_handler)(exceptionInfo); 2146 } 2147 2148 return EXCEPTION_CONTINUE_SEARCH; 2149 } 2150 #else //_WIN64 2151 /* 2152 On Windows, the mxcsr control bits are non-volatile across calls 2153 See also CR 6192333 2154 If EXCEPTION_FLT_* happened after some native method modified 2155 mxcsr - it is not a jvm fault. 2156 However should we decide to restore of mxcsr after a faulty 2157 native method we can uncomment following code 2158 jint MxCsr = INITIAL_MXCSR; 2159 // we can't use StubRoutines::addr_mxcsr_std() 2160 // because in Win64 mxcsr is not saved there 2161 if (MxCsr != ctx->MxCsr) { 2162 ctx->MxCsr = MxCsr; 2163 return EXCEPTION_CONTINUE_EXECUTION; 2164 } 2165 2166 */ 2167 #endif //_WIN64 2168 2169 2170 // Fatal error reporting is single threaded so we can make this a 2171 // static and preallocated. If it's more than MAX_PATH silently ignore 2172 // it. 2173 static char saved_error_file[MAX_PATH] = {0}; 2174 2175 void os::set_error_file(const char *logfile) { 2176 if (strlen(logfile) <= MAX_PATH) { 2177 strncpy(saved_error_file, logfile, MAX_PATH); 2178 } 2179 } 2180 2181 static inline void report_error(Thread* t, DWORD exception_code, 2182 address addr, void* siginfo, void* context) { 2183 VMError err(t, exception_code, addr, siginfo, context); 2184 err.report_and_die(); 2185 2186 // If UseOsErrorReporting, this will return here and save the error file 2187 // somewhere where we can find it in the minidump. 2188 } 2189 2190 //----------------------------------------------------------------------------- 2191 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2192 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2193 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2194 #ifdef _M_IA64 2195 address pc = (address) exceptionInfo->ContextRecord->StIIP; 2196 #elif _M_AMD64 2197 address pc = (address) exceptionInfo->ContextRecord->Rip; 2198 #else 2199 address pc = (address) exceptionInfo->ContextRecord->Eip; 2200 #endif 2201 Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady 2202 2203 #ifndef _WIN64 2204 // Execution protection violation - win32 running on AMD64 only 2205 // Handled first to avoid misdiagnosis as a "normal" access violation; 2206 // This is safe to do because we have a new/unique ExceptionInformation 2207 // code for this condition. 2208 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2209 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2210 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2211 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2212 2213 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2214 int page_size = os::vm_page_size(); 2215 2216 // Make sure the pc and the faulting address are sane. 2217 // 2218 // If an instruction spans a page boundary, and the page containing 2219 // the beginning of the instruction is executable but the following 2220 // page is not, the pc and the faulting address might be slightly 2221 // different - we still want to unguard the 2nd page in this case. 2222 // 2223 // 15 bytes seems to be a (very) safe value for max instruction size. 2224 bool pc_is_near_addr = 2225 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2226 bool instr_spans_page_boundary = 2227 (align_size_down((intptr_t) pc ^ (intptr_t) addr, 2228 (intptr_t) page_size) > 0); 2229 2230 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2231 static volatile address last_addr = 2232 (address) os::non_memory_address_word(); 2233 2234 // In conservative mode, don't unguard unless the address is in the VM 2235 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2236 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2237 2238 // Set memory to RWX and retry 2239 address page_start = 2240 (address) align_size_down((intptr_t) addr, (intptr_t) page_size); 2241 bool res = os::protect_memory((char*) page_start, page_size, 2242 os::MEM_PROT_RWX); 2243 2244 if (PrintMiscellaneous && Verbose) { 2245 char buf[256]; 2246 jio_snprintf(buf, sizeof(buf), "Execution protection violation " 2247 "at " INTPTR_FORMAT 2248 ", unguarding " INTPTR_FORMAT ": %s", addr, 2249 page_start, (res ? "success" : strerror(errno))); 2250 tty->print_raw_cr(buf); 2251 } 2252 2253 // Set last_addr so if we fault again at the same address, we don't 2254 // end up in an endless loop. 2255 // 2256 // There are two potential complications here. Two threads trapping 2257 // at the same address at the same time could cause one of the 2258 // threads to think it already unguarded, and abort the VM. Likely 2259 // very rare. 2260 // 2261 // The other race involves two threads alternately trapping at 2262 // different addresses and failing to unguard the page, resulting in 2263 // an endless loop. This condition is probably even more unlikely 2264 // than the first. 2265 // 2266 // Although both cases could be avoided by using locks or thread 2267 // local last_addr, these solutions are unnecessary complication: 2268 // this handler is a best-effort safety net, not a complete solution. 2269 // It is disabled by default and should only be used as a workaround 2270 // in case we missed any no-execute-unsafe VM code. 2271 2272 last_addr = addr; 2273 2274 return EXCEPTION_CONTINUE_EXECUTION; 2275 } 2276 } 2277 2278 // Last unguard failed or not unguarding 2279 tty->print_raw_cr("Execution protection violation"); 2280 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2281 exceptionInfo->ContextRecord); 2282 return EXCEPTION_CONTINUE_SEARCH; 2283 } 2284 } 2285 #endif // _WIN64 2286 2287 // Check to see if we caught the safepoint code in the 2288 // process of write protecting the memory serialization page. 2289 // It write enables the page immediately after protecting it 2290 // so just return. 2291 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 2292 JavaThread* thread = (JavaThread*) t; 2293 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2294 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2295 if ( os::is_memory_serialize_page(thread, addr) ) { 2296 // Block current thread until the memory serialize page permission restored. 2297 os::block_on_serialize_page_trap(); 2298 return EXCEPTION_CONTINUE_EXECUTION; 2299 } 2300 } 2301 2302 if (t != NULL && t->is_Java_thread()) { 2303 JavaThread* thread = (JavaThread*) t; 2304 bool in_java = thread->thread_state() == _thread_in_Java; 2305 2306 // Handle potential stack overflows up front. 2307 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2308 if (os::uses_stack_guard_pages()) { 2309 #ifdef _M_IA64 2310 // 2311 // If it's a legal stack address continue, Windows will map it in. 2312 // 2313 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2314 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2315 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) 2316 return EXCEPTION_CONTINUE_EXECUTION; 2317 2318 // The register save area is the same size as the memory stack 2319 // and starts at the page just above the start of the memory stack. 2320 // If we get a fault in this area, we've run out of register 2321 // stack. If we are in java, try throwing a stack overflow exception. 2322 if (addr > thread->stack_base() && 2323 addr <= (thread->stack_base()+thread->stack_size()) ) { 2324 char buf[256]; 2325 jio_snprintf(buf, sizeof(buf), 2326 "Register stack overflow, addr:%p, stack_base:%p\n", 2327 addr, thread->stack_base() ); 2328 tty->print_raw_cr(buf); 2329 // If not in java code, return and hope for the best. 2330 return in_java ? Handle_Exception(exceptionInfo, 2331 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2332 : EXCEPTION_CONTINUE_EXECUTION; 2333 } 2334 #endif 2335 if (thread->stack_yellow_zone_enabled()) { 2336 // Yellow zone violation. The o/s has unprotected the first yellow 2337 // zone page for us. Note: must call disable_stack_yellow_zone to 2338 // update the enabled status, even if the zone contains only one page. 2339 thread->disable_stack_yellow_zone(); 2340 // If not in java code, return and hope for the best. 2341 return in_java ? Handle_Exception(exceptionInfo, 2342 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2343 : EXCEPTION_CONTINUE_EXECUTION; 2344 } else { 2345 // Fatal red zone violation. 2346 thread->disable_stack_red_zone(); 2347 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2348 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2349 exceptionInfo->ContextRecord); 2350 return EXCEPTION_CONTINUE_SEARCH; 2351 } 2352 } else if (in_java) { 2353 // JVM-managed guard pages cannot be used on win95/98. The o/s provides 2354 // a one-time-only guard page, which it has released to us. The next 2355 // stack overflow on this thread will result in an ACCESS_VIOLATION. 2356 return Handle_Exception(exceptionInfo, 2357 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2358 } else { 2359 // Can only return and hope for the best. Further stack growth will 2360 // result in an ACCESS_VIOLATION. 2361 return EXCEPTION_CONTINUE_EXECUTION; 2362 } 2363 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2364 // Either stack overflow or null pointer exception. 2365 if (in_java) { 2366 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2367 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2368 address stack_end = thread->stack_base() - thread->stack_size(); 2369 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2370 // Stack overflow. 2371 assert(!os::uses_stack_guard_pages(), 2372 "should be caught by red zone code above."); 2373 return Handle_Exception(exceptionInfo, 2374 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2375 } 2376 // 2377 // Check for safepoint polling and implicit null 2378 // We only expect null pointers in the stubs (vtable) 2379 // the rest are checked explicitly now. 2380 // 2381 CodeBlob* cb = CodeCache::find_blob(pc); 2382 if (cb != NULL) { 2383 if (os::is_poll_address(addr)) { 2384 address stub = SharedRuntime::get_poll_stub(pc); 2385 return Handle_Exception(exceptionInfo, stub); 2386 } 2387 } 2388 { 2389 #ifdef _WIN64 2390 // 2391 // If it's a legal stack address map the entire region in 2392 // 2393 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2394 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2395 if (addr > thread->stack_yellow_zone_base() && addr < thread->stack_base() ) { 2396 addr = (address)((uintptr_t)addr & 2397 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2398 os::commit_memory((char *)addr, thread->stack_base() - addr, 2399 false ); 2400 return EXCEPTION_CONTINUE_EXECUTION; 2401 } 2402 else 2403 #endif 2404 { 2405 // Null pointer exception. 2406 #ifdef _M_IA64 2407 // We catch register stack overflows in compiled code by doing 2408 // an explicit compare and executing a st8(G0, G0) if the 2409 // BSP enters into our guard area. We test for the overflow 2410 // condition and fall into the normal null pointer exception 2411 // code if BSP hasn't overflowed. 2412 if ( in_java ) { 2413 if(thread->register_stack_overflow()) { 2414 assert((address)exceptionInfo->ContextRecord->IntS3 == 2415 thread->register_stack_limit(), 2416 "GR7 doesn't contain register_stack_limit"); 2417 // Disable the yellow zone which sets the state that 2418 // we've got a stack overflow problem. 2419 if (thread->stack_yellow_zone_enabled()) { 2420 thread->disable_stack_yellow_zone(); 2421 } 2422 // Give us some room to process the exception 2423 thread->disable_register_stack_guard(); 2424 // Update GR7 with the new limit so we can continue running 2425 // compiled code. 2426 exceptionInfo->ContextRecord->IntS3 = 2427 (ULONGLONG)thread->register_stack_limit(); 2428 return Handle_Exception(exceptionInfo, 2429 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2430 } else { 2431 // 2432 // Check for implicit null 2433 // We only expect null pointers in the stubs (vtable) 2434 // the rest are checked explicitly now. 2435 // 2436 if (((uintptr_t)addr) < os::vm_page_size() ) { 2437 // an access to the first page of VM--assume it is a null pointer 2438 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2439 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2440 } 2441 } 2442 } // in_java 2443 2444 // IA64 doesn't use implicit null checking yet. So we shouldn't 2445 // get here. 2446 tty->print_raw_cr("Access violation, possible null pointer exception"); 2447 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2448 exceptionInfo->ContextRecord); 2449 return EXCEPTION_CONTINUE_SEARCH; 2450 #else /* !IA64 */ 2451 2452 // Windows 98 reports faulting addresses incorrectly 2453 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr) || 2454 !os::win32::is_nt()) { 2455 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2456 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2457 } 2458 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2459 exceptionInfo->ContextRecord); 2460 return EXCEPTION_CONTINUE_SEARCH; 2461 #endif 2462 } 2463 } 2464 } 2465 2466 #ifdef _WIN64 2467 // Special care for fast JNI field accessors. 2468 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2469 // in and the heap gets shrunk before the field access. 2470 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2471 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2472 if (addr != (address)-1) { 2473 return Handle_Exception(exceptionInfo, addr); 2474 } 2475 } 2476 #endif 2477 2478 // Stack overflow or null pointer exception in native code. 2479 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2480 exceptionInfo->ContextRecord); 2481 return EXCEPTION_CONTINUE_SEARCH; 2482 } 2483 2484 if (in_java) { 2485 switch (exception_code) { 2486 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2487 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2488 2489 case EXCEPTION_INT_OVERFLOW: 2490 return Handle_IDiv_Exception(exceptionInfo); 2491 2492 } // switch 2493 } 2494 #ifndef _WIN64 2495 if (((thread->thread_state() == _thread_in_Java) || 2496 (thread->thread_state() == _thread_in_native)) && 2497 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 2498 { 2499 LONG result=Handle_FLT_Exception(exceptionInfo); 2500 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2501 } 2502 #endif //_WIN64 2503 } 2504 2505 if (exception_code != EXCEPTION_BREAKPOINT) { 2506 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2507 exceptionInfo->ContextRecord); 2508 } 2509 return EXCEPTION_CONTINUE_SEARCH; 2510 } 2511 2512 #ifndef _WIN64 2513 // Special care for fast JNI accessors. 2514 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2515 // the heap gets shrunk before the field access. 2516 // Need to install our own structured exception handler since native code may 2517 // install its own. 2518 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2519 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2520 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2521 address pc = (address) exceptionInfo->ContextRecord->Eip; 2522 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2523 if (addr != (address)-1) { 2524 return Handle_Exception(exceptionInfo, addr); 2525 } 2526 } 2527 return EXCEPTION_CONTINUE_SEARCH; 2528 } 2529 2530 #define DEFINE_FAST_GETFIELD(Return,Fieldname,Result) \ 2531 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, jobject obj, jfieldID fieldID) { \ 2532 __try { \ 2533 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, obj, fieldID); \ 2534 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)_exception_info())) { \ 2535 } \ 2536 return 0; \ 2537 } 2538 2539 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2540 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2541 DEFINE_FAST_GETFIELD(jchar, char, Char) 2542 DEFINE_FAST_GETFIELD(jshort, short, Short) 2543 DEFINE_FAST_GETFIELD(jint, int, Int) 2544 DEFINE_FAST_GETFIELD(jlong, long, Long) 2545 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2546 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2547 2548 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2549 switch (type) { 2550 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2551 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2552 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2553 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2554 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2555 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2556 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2557 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2558 default: ShouldNotReachHere(); 2559 } 2560 return (address)-1; 2561 } 2562 #endif 2563 2564 // Virtual Memory 2565 2566 int os::vm_page_size() { return os::win32::vm_page_size(); } 2567 int os::vm_allocation_granularity() { 2568 return os::win32::vm_allocation_granularity(); 2569 } 2570 2571 // Windows large page support is available on Windows 2003. In order to use 2572 // large page memory, the administrator must first assign additional privilege 2573 // to the user: 2574 // + select Control Panel -> Administrative Tools -> Local Security Policy 2575 // + select Local Policies -> User Rights Assignment 2576 // + double click "Lock pages in memory", add users and/or groups 2577 // + reboot 2578 // Note the above steps are needed for administrator as well, as administrators 2579 // by default do not have the privilege to lock pages in memory. 2580 // 2581 // Note about Windows 2003: although the API supports committing large page 2582 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2583 // scenario, I found through experiment it only uses large page if the entire 2584 // memory region is reserved and committed in a single VirtualAlloc() call. 2585 // This makes Windows large page support more or less like Solaris ISM, in 2586 // that the entire heap must be committed upfront. This probably will change 2587 // in the future, if so the code below needs to be revisited. 2588 2589 #ifndef MEM_LARGE_PAGES 2590 #define MEM_LARGE_PAGES 0x20000000 2591 #endif 2592 2593 static HANDLE _hProcess; 2594 static HANDLE _hToken; 2595 2596 // Container for NUMA node list info 2597 class NUMANodeListHolder { 2598 private: 2599 int *_numa_used_node_list; // allocated below 2600 int _numa_used_node_count; 2601 2602 void free_node_list() { 2603 if (_numa_used_node_list != NULL) { 2604 FREE_C_HEAP_ARRAY(int, _numa_used_node_list, mtInternal); 2605 } 2606 } 2607 2608 public: 2609 NUMANodeListHolder() { 2610 _numa_used_node_count = 0; 2611 _numa_used_node_list = NULL; 2612 // do rest of initialization in build routine (after function pointers are set up) 2613 } 2614 2615 ~NUMANodeListHolder() { 2616 free_node_list(); 2617 } 2618 2619 bool build() { 2620 DWORD_PTR proc_aff_mask; 2621 DWORD_PTR sys_aff_mask; 2622 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2623 ULONG highest_node_number; 2624 if (!os::Kernel32Dll::GetNumaHighestNodeNumber(&highest_node_number)) return false; 2625 free_node_list(); 2626 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2627 for (unsigned int i = 0; i <= highest_node_number; i++) { 2628 ULONGLONG proc_mask_numa_node; 2629 if (!os::Kernel32Dll::GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2630 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2631 _numa_used_node_list[_numa_used_node_count++] = i; 2632 } 2633 } 2634 return (_numa_used_node_count > 1); 2635 } 2636 2637 int get_count() {return _numa_used_node_count;} 2638 int get_node_list_entry(int n) { 2639 // for indexes out of range, returns -1 2640 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2641 } 2642 2643 } numa_node_list_holder; 2644 2645 2646 2647 static size_t _large_page_size = 0; 2648 2649 static bool resolve_functions_for_large_page_init() { 2650 return os::Kernel32Dll::GetLargePageMinimumAvailable() && 2651 os::Advapi32Dll::AdvapiAvailable(); 2652 } 2653 2654 static bool request_lock_memory_privilege() { 2655 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2656 os::current_process_id()); 2657 2658 LUID luid; 2659 if (_hProcess != NULL && 2660 os::Advapi32Dll::OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2661 os::Advapi32Dll::LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2662 2663 TOKEN_PRIVILEGES tp; 2664 tp.PrivilegeCount = 1; 2665 tp.Privileges[0].Luid = luid; 2666 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2667 2668 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2669 // privilege. Check GetLastError() too. See MSDN document. 2670 if (os::Advapi32Dll::AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2671 (GetLastError() == ERROR_SUCCESS)) { 2672 return true; 2673 } 2674 } 2675 2676 return false; 2677 } 2678 2679 static void cleanup_after_large_page_init() { 2680 if (_hProcess) CloseHandle(_hProcess); 2681 _hProcess = NULL; 2682 if (_hToken) CloseHandle(_hToken); 2683 _hToken = NULL; 2684 } 2685 2686 static bool numa_interleaving_init() { 2687 bool success = false; 2688 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2689 2690 // print a warning if UseNUMAInterleaving flag is specified on command line 2691 bool warn_on_failure = use_numa_interleaving_specified; 2692 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2693 2694 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2695 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2696 NUMAInterleaveGranularity = align_size_up(NUMAInterleaveGranularity, min_interleave_granularity); 2697 2698 if (os::Kernel32Dll::NumaCallsAvailable()) { 2699 if (numa_node_list_holder.build()) { 2700 if (PrintMiscellaneous && Verbose) { 2701 tty->print("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2702 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2703 tty->print("%d ", numa_node_list_holder.get_node_list_entry(i)); 2704 } 2705 tty->print("\n"); 2706 } 2707 success = true; 2708 } else { 2709 WARN("Process does not cover multiple NUMA nodes."); 2710 } 2711 } else { 2712 WARN("NUMA Interleaving is not supported by the operating system."); 2713 } 2714 if (!success) { 2715 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2716 } 2717 return success; 2718 #undef WARN 2719 } 2720 2721 // this routine is used whenever we need to reserve a contiguous VA range 2722 // but we need to make separate VirtualAlloc calls for each piece of the range 2723 // Reasons for doing this: 2724 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2725 // * UseNUMAInterleaving requires a separate node for each piece 2726 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, DWORD prot, 2727 bool should_inject_error=false) { 2728 char * p_buf; 2729 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2730 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2731 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2732 2733 // first reserve enough address space in advance since we want to be 2734 // able to break a single contiguous virtual address range into multiple 2735 // large page commits but WS2003 does not allow reserving large page space 2736 // so we just use 4K pages for reserve, this gives us a legal contiguous 2737 // address space. then we will deallocate that reservation, and re alloc 2738 // using large pages 2739 const size_t size_of_reserve = bytes + chunk_size; 2740 if (bytes > size_of_reserve) { 2741 // Overflowed. 2742 return NULL; 2743 } 2744 p_buf = (char *) VirtualAlloc(addr, 2745 size_of_reserve, // size of Reserve 2746 MEM_RESERVE, 2747 PAGE_READWRITE); 2748 // If reservation failed, return NULL 2749 if (p_buf == NULL) return NULL; 2750 2751 os::release_memory(p_buf, bytes + chunk_size); 2752 2753 // we still need to round up to a page boundary (in case we are using large pages) 2754 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2755 // instead we handle this in the bytes_to_rq computation below 2756 p_buf = (char *) align_size_up((size_t)p_buf, page_size); 2757 2758 // now go through and allocate one chunk at a time until all bytes are 2759 // allocated 2760 size_t bytes_remaining = bytes; 2761 // An overflow of align_size_up() would have been caught above 2762 // in the calculation of size_of_reserve. 2763 char * next_alloc_addr = p_buf; 2764 HANDLE hProc = GetCurrentProcess(); 2765 2766 #ifdef ASSERT 2767 // Variable for the failure injection 2768 long ran_num = os::random(); 2769 size_t fail_after = ran_num % bytes; 2770 #endif 2771 2772 int count=0; 2773 while (bytes_remaining) { 2774 // select bytes_to_rq to get to the next chunk_size boundary 2775 2776 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2777 // Note allocate and commit 2778 char * p_new; 2779 2780 #ifdef ASSERT 2781 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2782 #else 2783 const bool inject_error_now = false; 2784 #endif 2785 2786 if (inject_error_now) { 2787 p_new = NULL; 2788 } else { 2789 if (!UseNUMAInterleaving) { 2790 p_new = (char *) VirtualAlloc(next_alloc_addr, 2791 bytes_to_rq, 2792 flags, 2793 prot); 2794 } else { 2795 // get the next node to use from the used_node_list 2796 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2797 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2798 p_new = (char *)os::Kernel32Dll::VirtualAllocExNuma(hProc, 2799 next_alloc_addr, 2800 bytes_to_rq, 2801 flags, 2802 prot, 2803 node); 2804 } 2805 } 2806 2807 if (p_new == NULL) { 2808 // Free any allocated pages 2809 if (next_alloc_addr > p_buf) { 2810 // Some memory was committed so release it. 2811 size_t bytes_to_release = bytes - bytes_remaining; 2812 os::release_memory(p_buf, bytes_to_release); 2813 } 2814 #ifdef ASSERT 2815 if (should_inject_error) { 2816 if (TracePageSizes && Verbose) { 2817 tty->print_cr("Reserving pages individually failed."); 2818 } 2819 } 2820 #endif 2821 return NULL; 2822 } 2823 bytes_remaining -= bytes_to_rq; 2824 next_alloc_addr += bytes_to_rq; 2825 count++; 2826 } 2827 // made it this far, success 2828 return p_buf; 2829 } 2830 2831 2832 2833 void os::large_page_init() { 2834 if (!UseLargePages) return; 2835 2836 // print a warning if any large page related flag is specified on command line 2837 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2838 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2839 bool success = false; 2840 2841 # define WARN(msg) if (warn_on_failure) { warning(msg); } 2842 if (resolve_functions_for_large_page_init()) { 2843 if (request_lock_memory_privilege()) { 2844 size_t s = os::Kernel32Dll::GetLargePageMinimum(); 2845 if (s) { 2846 #if defined(IA32) || defined(AMD64) 2847 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2848 WARN("JVM cannot use large pages bigger than 4mb."); 2849 } else { 2850 #endif 2851 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2852 _large_page_size = LargePageSizeInBytes; 2853 } else { 2854 _large_page_size = s; 2855 } 2856 success = true; 2857 #if defined(IA32) || defined(AMD64) 2858 } 2859 #endif 2860 } else { 2861 WARN("Large page is not supported by the processor."); 2862 } 2863 } else { 2864 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2865 } 2866 } else { 2867 WARN("Large page is not supported by the operating system."); 2868 } 2869 #undef WARN 2870 2871 const size_t default_page_size = (size_t) vm_page_size(); 2872 if (success && _large_page_size > default_page_size) { 2873 _page_sizes[0] = _large_page_size; 2874 _page_sizes[1] = default_page_size; 2875 _page_sizes[2] = 0; 2876 } 2877 2878 cleanup_after_large_page_init(); 2879 UseLargePages = success; 2880 } 2881 2882 // On win32, one cannot release just a part of reserved memory, it's an 2883 // all or nothing deal. When we split a reservation, we must break the 2884 // reservation into two reservations. 2885 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 2886 bool realloc) { 2887 if (size > 0) { 2888 release_memory(base, size); 2889 if (realloc) { 2890 reserve_memory(split, base); 2891 } 2892 if (size != split) { 2893 reserve_memory(size - split, base + split); 2894 } 2895 } 2896 } 2897 2898 char* os::reserve_memory_aligned(size_t size, size_t alignment) { 2899 assert(alignment & (os::vm_allocation_granularity() - 1) == 0, 2900 "Alignment must be a multiple of allocation granularity (page size)"); 2901 assert(size & (alignment -1) == 0, "size must be 'alignment' aligned"); 2902 size_t extra_size = size + alignment; 2903 char* aligned_base = NULL; 2904 2905 do { 2906 char* extra_base = os::reserve_memory(extra_size, NULL, alignment); 2907 if (extra_base == NULL) { 2908 return NULL; 2909 } 2910 // Do manual alignment 2911 aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment); 2912 2913 os::release_memory(extra_base, extra_size); 2914 2915 aligned_base = os::reserve_memory(size, aligned_base); 2916 2917 } while (aligned_base == NULL); 2918 2919 return aligned_base; 2920 } 2921 2922 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 2923 assert((size_t)addr % os::vm_allocation_granularity() == 0, 2924 "reserve alignment"); 2925 assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size"); 2926 char* res; 2927 // note that if UseLargePages is on, all the areas that require interleaving 2928 // will go thru reserve_memory_special rather than thru here. 2929 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 2930 if (!use_individual) { 2931 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 2932 } else { 2933 elapsedTimer reserveTimer; 2934 if( Verbose && PrintMiscellaneous ) reserveTimer.start(); 2935 // in numa interleaving, we have to allocate pages individually 2936 // (well really chunks of NUMAInterleaveGranularity size) 2937 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 2938 if (res == NULL) { 2939 warning("NUMA page allocation failed"); 2940 } 2941 if( Verbose && PrintMiscellaneous ) { 2942 reserveTimer.stop(); 2943 tty->print_cr("reserve_memory of %Ix bytes took %ld ms (%ld ticks)", bytes, 2944 reserveTimer.milliseconds(), reserveTimer.ticks()); 2945 } 2946 } 2947 assert(res == NULL || addr == NULL || addr == res, 2948 "Unexpected address from reserve."); 2949 2950 return res; 2951 } 2952 2953 // Reserve memory at an arbitrary address, only if that area is 2954 // available (and not reserved for something else). 2955 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 2956 // Windows os::reserve_memory() fails of the requested address range is 2957 // not avilable. 2958 return reserve_memory(bytes, requested_addr); 2959 } 2960 2961 size_t os::large_page_size() { 2962 return _large_page_size; 2963 } 2964 2965 bool os::can_commit_large_page_memory() { 2966 // Windows only uses large page memory when the entire region is reserved 2967 // and committed in a single VirtualAlloc() call. This may change in the 2968 // future, but with Windows 2003 it's not possible to commit on demand. 2969 return false; 2970 } 2971 2972 bool os::can_execute_large_page_memory() { 2973 return true; 2974 } 2975 2976 char* os::reserve_memory_special(size_t bytes, char* addr, bool exec) { 2977 2978 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 2979 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 2980 2981 // with large pages, there are two cases where we need to use Individual Allocation 2982 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 2983 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 2984 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 2985 if (TracePageSizes && Verbose) { 2986 tty->print_cr("Reserving large pages individually."); 2987 } 2988 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 2989 if (p_buf == NULL) { 2990 // give an appropriate warning message 2991 if (UseNUMAInterleaving) { 2992 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 2993 } 2994 if (UseLargePagesIndividualAllocation) { 2995 warning("Individually allocated large pages failed, " 2996 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 2997 } 2998 return NULL; 2999 } 3000 3001 return p_buf; 3002 3003 } else { 3004 // normal policy just allocate it all at once 3005 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3006 char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot); 3007 return res; 3008 } 3009 } 3010 3011 bool os::release_memory_special(char* base, size_t bytes) { 3012 return release_memory(base, bytes); 3013 } 3014 3015 void os::print_statistics() { 3016 } 3017 3018 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3019 if (bytes == 0) { 3020 // Don't bother the OS with noops. 3021 return true; 3022 } 3023 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3024 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3025 // Don't attempt to print anything if the OS call fails. We're 3026 // probably low on resources, so the print itself may cause crashes. 3027 3028 // unless we have NUMAInterleaving enabled, the range of a commit 3029 // is always within a reserve covered by a single VirtualAlloc 3030 // in that case we can just do a single commit for the requested size 3031 if (!UseNUMAInterleaving) { 3032 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false; 3033 if (exec) { 3034 DWORD oldprot; 3035 // Windows doc says to use VirtualProtect to get execute permissions 3036 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false; 3037 } 3038 return true; 3039 } else { 3040 3041 // when NUMAInterleaving is enabled, the commit might cover a range that 3042 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3043 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3044 // returns represents the number of bytes that can be committed in one step. 3045 size_t bytes_remaining = bytes; 3046 char * next_alloc_addr = addr; 3047 while (bytes_remaining > 0) { 3048 MEMORY_BASIC_INFORMATION alloc_info; 3049 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3050 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3051 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL) 3052 return false; 3053 if (exec) { 3054 DWORD oldprot; 3055 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot)) 3056 return false; 3057 } 3058 bytes_remaining -= bytes_to_rq; 3059 next_alloc_addr += bytes_to_rq; 3060 } 3061 } 3062 // if we made it this far, return true 3063 return true; 3064 } 3065 3066 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3067 bool exec) { 3068 return commit_memory(addr, size, exec); 3069 } 3070 3071 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3072 if (bytes == 0) { 3073 // Don't bother the OS with noops. 3074 return true; 3075 } 3076 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3077 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3078 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3079 } 3080 3081 bool os::pd_release_memory(char* addr, size_t bytes) { 3082 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3083 } 3084 3085 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3086 return os::commit_memory(addr, size); 3087 } 3088 3089 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3090 return os::uncommit_memory(addr, size); 3091 } 3092 3093 // Set protections specified 3094 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3095 bool is_committed) { 3096 unsigned int p = 0; 3097 switch (prot) { 3098 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3099 case MEM_PROT_READ: p = PAGE_READONLY; break; 3100 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3101 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3102 default: 3103 ShouldNotReachHere(); 3104 } 3105 3106 DWORD old_status; 3107 3108 // Strange enough, but on Win32 one can change protection only for committed 3109 // memory, not a big deal anyway, as bytes less or equal than 64K 3110 if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) { 3111 fatal("cannot commit protection page"); 3112 } 3113 // One cannot use os::guard_memory() here, as on Win32 guard page 3114 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3115 // 3116 // Pages in the region become guard pages. Any attempt to access a guard page 3117 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3118 // the guard page status. Guard pages thus act as a one-time access alarm. 3119 return VirtualProtect(addr, bytes, p, &old_status) != 0; 3120 } 3121 3122 bool os::guard_memory(char* addr, size_t bytes) { 3123 DWORD old_status; 3124 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3125 } 3126 3127 bool os::unguard_memory(char* addr, size_t bytes) { 3128 DWORD old_status; 3129 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3130 } 3131 3132 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3133 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3134 void os::numa_make_global(char *addr, size_t bytes) { } 3135 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3136 bool os::numa_topology_changed() { return false; } 3137 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3138 int os::numa_get_group_id() { return 0; } 3139 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3140 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3141 // Provide an answer for UMA systems 3142 ids[0] = 0; 3143 return 1; 3144 } else { 3145 // check for size bigger than actual groups_num 3146 size = MIN2(size, numa_get_groups_num()); 3147 for (int i = 0; i < (int)size; i++) { 3148 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3149 } 3150 return size; 3151 } 3152 } 3153 3154 bool os::get_page_info(char *start, page_info* info) { 3155 return false; 3156 } 3157 3158 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) { 3159 return end; 3160 } 3161 3162 char* os::non_memory_address_word() { 3163 // Must never look like an address returned by reserve_memory, 3164 // even in its subfields (as defined by the CPU immediate fields, 3165 // if the CPU splits constants across multiple instructions). 3166 return (char*)-1; 3167 } 3168 3169 #define MAX_ERROR_COUNT 100 3170 #define SYS_THREAD_ERROR 0xffffffffUL 3171 3172 void os::pd_start_thread(Thread* thread) { 3173 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3174 // Returns previous suspend state: 3175 // 0: Thread was not suspended 3176 // 1: Thread is running now 3177 // >1: Thread is still suspended. 3178 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3179 } 3180 3181 class HighResolutionInterval { 3182 // The default timer resolution seems to be 10 milliseconds. 3183 // (Where is this written down?) 3184 // If someone wants to sleep for only a fraction of the default, 3185 // then we set the timer resolution down to 1 millisecond for 3186 // the duration of their interval. 3187 // We carefully set the resolution back, since otherwise we 3188 // seem to incur an overhead (3%?) that we don't need. 3189 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3190 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3191 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3192 // timeBeginPeriod() if the relative error exceeded some threshold. 3193 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3194 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3195 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3196 // resolution timers running. 3197 private: 3198 jlong resolution; 3199 public: 3200 HighResolutionInterval(jlong ms) { 3201 resolution = ms % 10L; 3202 if (resolution != 0) { 3203 MMRESULT result = timeBeginPeriod(1L); 3204 } 3205 } 3206 ~HighResolutionInterval() { 3207 if (resolution != 0) { 3208 MMRESULT result = timeEndPeriod(1L); 3209 } 3210 resolution = 0L; 3211 } 3212 }; 3213 3214 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3215 jlong limit = (jlong) MAXDWORD; 3216 3217 while(ms > limit) { 3218 int res; 3219 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) 3220 return res; 3221 ms -= limit; 3222 } 3223 3224 assert(thread == Thread::current(), "thread consistency check"); 3225 OSThread* osthread = thread->osthread(); 3226 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3227 int result; 3228 if (interruptable) { 3229 assert(thread->is_Java_thread(), "must be java thread"); 3230 JavaThread *jt = (JavaThread *) thread; 3231 ThreadBlockInVM tbivm(jt); 3232 3233 jt->set_suspend_equivalent(); 3234 // cleared by handle_special_suspend_equivalent_condition() or 3235 // java_suspend_self() via check_and_wait_while_suspended() 3236 3237 HANDLE events[1]; 3238 events[0] = osthread->interrupt_event(); 3239 HighResolutionInterval *phri=NULL; 3240 if(!ForceTimeHighResolution) 3241 phri = new HighResolutionInterval( ms ); 3242 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3243 result = OS_TIMEOUT; 3244 } else { 3245 ResetEvent(osthread->interrupt_event()); 3246 osthread->set_interrupted(false); 3247 result = OS_INTRPT; 3248 } 3249 delete phri; //if it is NULL, harmless 3250 3251 // were we externally suspended while we were waiting? 3252 jt->check_and_wait_while_suspended(); 3253 } else { 3254 assert(!thread->is_Java_thread(), "must not be java thread"); 3255 Sleep((long) ms); 3256 result = OS_TIMEOUT; 3257 } 3258 return result; 3259 } 3260 3261 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3262 void os::infinite_sleep() { 3263 while (true) { // sleep forever ... 3264 Sleep(100000); // ... 100 seconds at a time 3265 } 3266 } 3267 3268 typedef BOOL (WINAPI * STTSignature)(void) ; 3269 3270 os::YieldResult os::NakedYield() { 3271 // Use either SwitchToThread() or Sleep(0) 3272 // Consider passing back the return value from SwitchToThread(). 3273 if (os::Kernel32Dll::SwitchToThreadAvailable()) { 3274 return SwitchToThread() ? os::YIELD_SWITCHED : os::YIELD_NONEREADY ; 3275 } else { 3276 Sleep(0); 3277 } 3278 return os::YIELD_UNKNOWN ; 3279 } 3280 3281 void os::yield() { os::NakedYield(); } 3282 3283 void os::yield_all(int attempts) { 3284 // Yields to all threads, including threads with lower priorities 3285 Sleep(1); 3286 } 3287 3288 // Win32 only gives you access to seven real priorities at a time, 3289 // so we compress Java's ten down to seven. It would be better 3290 // if we dynamically adjusted relative priorities. 3291 3292 int os::java_to_os_priority[CriticalPriority + 1] = { 3293 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3294 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3295 THREAD_PRIORITY_LOWEST, // 2 3296 THREAD_PRIORITY_BELOW_NORMAL, // 3 3297 THREAD_PRIORITY_BELOW_NORMAL, // 4 3298 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3299 THREAD_PRIORITY_NORMAL, // 6 3300 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3301 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3302 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3303 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3304 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3305 }; 3306 3307 int prio_policy1[CriticalPriority + 1] = { 3308 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3309 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3310 THREAD_PRIORITY_LOWEST, // 2 3311 THREAD_PRIORITY_BELOW_NORMAL, // 3 3312 THREAD_PRIORITY_BELOW_NORMAL, // 4 3313 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3314 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3315 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3316 THREAD_PRIORITY_HIGHEST, // 8 3317 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3318 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3319 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3320 }; 3321 3322 static int prio_init() { 3323 // If ThreadPriorityPolicy is 1, switch tables 3324 if (ThreadPriorityPolicy == 1) { 3325 int i; 3326 for (i = 0; i < CriticalPriority + 1; i++) { 3327 os::java_to_os_priority[i] = prio_policy1[i]; 3328 } 3329 } 3330 if (UseCriticalJavaThreadPriority) { 3331 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority] ; 3332 } 3333 return 0; 3334 } 3335 3336 OSReturn os::set_native_priority(Thread* thread, int priority) { 3337 if (!UseThreadPriorities) return OS_OK; 3338 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3339 return ret ? OS_OK : OS_ERR; 3340 } 3341 3342 OSReturn os::get_native_priority(const Thread* const thread, int* priority_ptr) { 3343 if ( !UseThreadPriorities ) { 3344 *priority_ptr = java_to_os_priority[NormPriority]; 3345 return OS_OK; 3346 } 3347 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3348 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3349 assert(false, "GetThreadPriority failed"); 3350 return OS_ERR; 3351 } 3352 *priority_ptr = os_prio; 3353 return OS_OK; 3354 } 3355 3356 3357 // Hint to the underlying OS that a task switch would not be good. 3358 // Void return because it's a hint and can fail. 3359 void os::hint_no_preempt() {} 3360 3361 void os::interrupt(Thread* thread) { 3362 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3363 "possibility of dangling Thread pointer"); 3364 3365 OSThread* osthread = thread->osthread(); 3366 osthread->set_interrupted(true); 3367 // More than one thread can get here with the same value of osthread, 3368 // resulting in multiple notifications. We do, however, want the store 3369 // to interrupted() to be visible to other threads before we post 3370 // the interrupt event. 3371 OrderAccess::release(); 3372 SetEvent(osthread->interrupt_event()); 3373 // For JSR166: unpark after setting status 3374 if (thread->is_Java_thread()) 3375 ((JavaThread*)thread)->parker()->unpark(); 3376 3377 ParkEvent * ev = thread->_ParkEvent ; 3378 if (ev != NULL) ev->unpark() ; 3379 3380 } 3381 3382 3383 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3384 assert(!thread->is_Java_thread() || Thread::current() == thread || Threads_lock->owned_by_self(), 3385 "possibility of dangling Thread pointer"); 3386 3387 OSThread* osthread = thread->osthread(); 3388 bool interrupted = osthread->interrupted(); 3389 // There is no synchronization between the setting of the interrupt 3390 // and it being cleared here. It is critical - see 6535709 - that 3391 // we only clear the interrupt state, and reset the interrupt event, 3392 // if we are going to report that we were indeed interrupted - else 3393 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3394 // depending on the timing 3395 if (interrupted && clear_interrupted) { 3396 osthread->set_interrupted(false); 3397 ResetEvent(osthread->interrupt_event()); 3398 } // Otherwise leave the interrupted state alone 3399 3400 return interrupted; 3401 } 3402 3403 // Get's a pc (hint) for a running thread. Currently used only for profiling. 3404 ExtendedPC os::get_thread_pc(Thread* thread) { 3405 CONTEXT context; 3406 context.ContextFlags = CONTEXT_CONTROL; 3407 HANDLE handle = thread->osthread()->thread_handle(); 3408 #ifdef _M_IA64 3409 assert(0, "Fix get_thread_pc"); 3410 return ExtendedPC(NULL); 3411 #else 3412 if (GetThreadContext(handle, &context)) { 3413 #ifdef _M_AMD64 3414 return ExtendedPC((address) context.Rip); 3415 #else 3416 return ExtendedPC((address) context.Eip); 3417 #endif 3418 } else { 3419 return ExtendedPC(NULL); 3420 } 3421 #endif 3422 } 3423 3424 // GetCurrentThreadId() returns DWORD 3425 intx os::current_thread_id() { return GetCurrentThreadId(); } 3426 3427 static int _initial_pid = 0; 3428 3429 int os::current_process_id() 3430 { 3431 return (_initial_pid ? _initial_pid : _getpid()); 3432 } 3433 3434 int os::win32::_vm_page_size = 0; 3435 int os::win32::_vm_allocation_granularity = 0; 3436 int os::win32::_processor_type = 0; 3437 // Processor level is not available on non-NT systems, use vm_version instead 3438 int os::win32::_processor_level = 0; 3439 julong os::win32::_physical_memory = 0; 3440 size_t os::win32::_default_stack_size = 0; 3441 3442 intx os::win32::_os_thread_limit = 0; 3443 volatile intx os::win32::_os_thread_count = 0; 3444 3445 bool os::win32::_is_nt = false; 3446 bool os::win32::_is_windows_2003 = false; 3447 bool os::win32::_is_windows_server = false; 3448 3449 void os::win32::initialize_system_info() { 3450 SYSTEM_INFO si; 3451 GetSystemInfo(&si); 3452 _vm_page_size = si.dwPageSize; 3453 _vm_allocation_granularity = si.dwAllocationGranularity; 3454 _processor_type = si.dwProcessorType; 3455 _processor_level = si.wProcessorLevel; 3456 set_processor_count(si.dwNumberOfProcessors); 3457 3458 MEMORYSTATUSEX ms; 3459 ms.dwLength = sizeof(ms); 3460 3461 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3462 // dwMemoryLoad (% of memory in use) 3463 GlobalMemoryStatusEx(&ms); 3464 _physical_memory = ms.ullTotalPhys; 3465 3466 OSVERSIONINFOEX oi; 3467 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3468 GetVersionEx((OSVERSIONINFO*)&oi); 3469 switch(oi.dwPlatformId) { 3470 case VER_PLATFORM_WIN32_WINDOWS: _is_nt = false; break; 3471 case VER_PLATFORM_WIN32_NT: 3472 _is_nt = true; 3473 { 3474 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3475 if (os_vers == 5002) { 3476 _is_windows_2003 = true; 3477 } 3478 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3479 oi.wProductType == VER_NT_SERVER) { 3480 _is_windows_server = true; 3481 } 3482 } 3483 break; 3484 default: fatal("Unknown platform"); 3485 } 3486 3487 _default_stack_size = os::current_stack_size(); 3488 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3489 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3490 "stack size not a multiple of page size"); 3491 3492 initialize_performance_counter(); 3493 3494 // Win95/Win98 scheduler bug work-around. The Win95/98 scheduler is 3495 // known to deadlock the system, if the VM issues to thread operations with 3496 // a too high frequency, e.g., such as changing the priorities. 3497 // The 6000 seems to work well - no deadlocks has been notices on the test 3498 // programs that we have seen experience this problem. 3499 if (!os::win32::is_nt()) { 3500 StarvationMonitorInterval = 6000; 3501 } 3502 } 3503 3504 3505 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, int ebuflen) { 3506 char path[MAX_PATH]; 3507 DWORD size; 3508 DWORD pathLen = (DWORD)sizeof(path); 3509 HINSTANCE result = NULL; 3510 3511 // only allow library name without path component 3512 assert(strchr(name, '\\') == NULL, "path not allowed"); 3513 assert(strchr(name, ':') == NULL, "path not allowed"); 3514 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3515 jio_snprintf(ebuf, ebuflen, 3516 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3517 return NULL; 3518 } 3519 3520 // search system directory 3521 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3522 strcat(path, "\\"); 3523 strcat(path, name); 3524 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3525 return result; 3526 } 3527 } 3528 3529 // try Windows directory 3530 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3531 strcat(path, "\\"); 3532 strcat(path, name); 3533 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3534 return result; 3535 } 3536 } 3537 3538 jio_snprintf(ebuf, ebuflen, 3539 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3540 return NULL; 3541 } 3542 3543 void os::win32::setmode_streams() { 3544 _setmode(_fileno(stdin), _O_BINARY); 3545 _setmode(_fileno(stdout), _O_BINARY); 3546 _setmode(_fileno(stderr), _O_BINARY); 3547 } 3548 3549 3550 bool os::is_debugger_attached() { 3551 return IsDebuggerPresent() ? true : false; 3552 } 3553 3554 3555 void os::wait_for_keypress_at_exit(void) { 3556 if (PauseAtExit) { 3557 fprintf(stderr, "Press any key to continue...\n"); 3558 fgetc(stdin); 3559 } 3560 } 3561 3562 3563 int os::message_box(const char* title, const char* message) { 3564 int result = MessageBox(NULL, message, title, 3565 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3566 return result == IDYES; 3567 } 3568 3569 int os::allocate_thread_local_storage() { 3570 return TlsAlloc(); 3571 } 3572 3573 3574 void os::free_thread_local_storage(int index) { 3575 TlsFree(index); 3576 } 3577 3578 3579 void os::thread_local_storage_at_put(int index, void* value) { 3580 TlsSetValue(index, value); 3581 assert(thread_local_storage_at(index) == value, "Just checking"); 3582 } 3583 3584 3585 void* os::thread_local_storage_at(int index) { 3586 return TlsGetValue(index); 3587 } 3588 3589 3590 #ifndef PRODUCT 3591 #ifndef _WIN64 3592 // Helpers to check whether NX protection is enabled 3593 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3594 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3595 pex->ExceptionRecord->NumberParameters > 0 && 3596 pex->ExceptionRecord->ExceptionInformation[0] == 3597 EXCEPTION_INFO_EXEC_VIOLATION) { 3598 return EXCEPTION_EXECUTE_HANDLER; 3599 } 3600 return EXCEPTION_CONTINUE_SEARCH; 3601 } 3602 3603 void nx_check_protection() { 3604 // If NX is enabled we'll get an exception calling into code on the stack 3605 char code[] = { (char)0xC3 }; // ret 3606 void *code_ptr = (void *)code; 3607 __try { 3608 __asm call code_ptr 3609 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 3610 tty->print_raw_cr("NX protection detected."); 3611 } 3612 } 3613 #endif // _WIN64 3614 #endif // PRODUCT 3615 3616 // this is called _before_ the global arguments have been parsed 3617 void os::init(void) { 3618 _initial_pid = _getpid(); 3619 3620 init_random(1234567); 3621 3622 win32::initialize_system_info(); 3623 win32::setmode_streams(); 3624 init_page_sizes((size_t) win32::vm_page_size()); 3625 3626 // For better scalability on MP systems (must be called after initialize_system_info) 3627 #ifndef PRODUCT 3628 if (is_MP()) { 3629 NoYieldsInMicrolock = true; 3630 } 3631 #endif 3632 // This may be overridden later when argument processing is done. 3633 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, 3634 os::win32::is_windows_2003()); 3635 3636 // Initialize main_process and main_thread 3637 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 3638 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 3639 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 3640 fatal("DuplicateHandle failed\n"); 3641 } 3642 main_thread_id = (int) GetCurrentThreadId(); 3643 } 3644 3645 // To install functions for atexit processing 3646 extern "C" { 3647 static void perfMemory_exit_helper() { 3648 perfMemory_exit(); 3649 } 3650 } 3651 3652 // this is called _after_ the global arguments have been parsed 3653 jint os::init_2(void) { 3654 // Allocate a single page and mark it as readable for safepoint polling 3655 address polling_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READONLY); 3656 guarantee( polling_page != NULL, "Reserve Failed for polling page"); 3657 3658 address return_page = (address)VirtualAlloc(polling_page, os::vm_page_size(), MEM_COMMIT, PAGE_READONLY); 3659 guarantee( return_page != NULL, "Commit Failed for polling page"); 3660 3661 os::set_polling_page( polling_page ); 3662 3663 #ifndef PRODUCT 3664 if( Verbose && PrintMiscellaneous ) 3665 tty->print("[SafePoint Polling address: " INTPTR_FORMAT "]\n", (intptr_t)polling_page); 3666 #endif 3667 3668 if (!UseMembar) { 3669 address mem_serialize_page = (address)VirtualAlloc(NULL, os::vm_page_size(), MEM_RESERVE, PAGE_READWRITE); 3670 guarantee( mem_serialize_page != NULL, "Reserve Failed for memory serialize page"); 3671 3672 return_page = (address)VirtualAlloc(mem_serialize_page, os::vm_page_size(), MEM_COMMIT, PAGE_READWRITE); 3673 guarantee( return_page != NULL, "Commit Failed for memory serialize page"); 3674 3675 os::set_memory_serialize_page( mem_serialize_page ); 3676 3677 #ifndef PRODUCT 3678 if(Verbose && PrintMiscellaneous) 3679 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page); 3680 #endif 3681 } 3682 3683 os::large_page_init(); 3684 3685 // Setup Windows Exceptions 3686 3687 // for debugging float code generation bugs 3688 if (ForceFloatExceptions) { 3689 #ifndef _WIN64 3690 static long fp_control_word = 0; 3691 __asm { fstcw fp_control_word } 3692 // see Intel PPro Manual, Vol. 2, p 7-16 3693 const long precision = 0x20; 3694 const long underflow = 0x10; 3695 const long overflow = 0x08; 3696 const long zero_div = 0x04; 3697 const long denorm = 0x02; 3698 const long invalid = 0x01; 3699 fp_control_word |= invalid; 3700 __asm { fldcw fp_control_word } 3701 #endif 3702 } 3703 3704 // If stack_commit_size is 0, windows will reserve the default size, 3705 // but only commit a small portion of it. 3706 size_t stack_commit_size = round_to(ThreadStackSize*K, os::vm_page_size()); 3707 size_t default_reserve_size = os::win32::default_stack_size(); 3708 size_t actual_reserve_size = stack_commit_size; 3709 if (stack_commit_size < default_reserve_size) { 3710 // If stack_commit_size == 0, we want this too 3711 actual_reserve_size = default_reserve_size; 3712 } 3713 3714 // Check minimum allowable stack size for thread creation and to initialize 3715 // the java system classes, including StackOverflowError - depends on page 3716 // size. Add a page for compiler2 recursion in main thread. 3717 // Add in 2*BytesPerWord times page size to account for VM stack during 3718 // class initialization depending on 32 or 64 bit VM. 3719 size_t min_stack_allowed = 3720 (size_t)(StackYellowPages+StackRedPages+StackShadowPages+ 3721 2*BytesPerWord COMPILER2_PRESENT(+1)) * os::vm_page_size(); 3722 if (actual_reserve_size < min_stack_allowed) { 3723 tty->print_cr("\nThe stack size specified is too small, " 3724 "Specify at least %dk", 3725 min_stack_allowed / K); 3726 return JNI_ERR; 3727 } 3728 3729 JavaThread::set_stack_size_at_create(stack_commit_size); 3730 3731 // Calculate theoretical max. size of Threads to guard gainst artifical 3732 // out-of-memory situations, where all available address-space has been 3733 // reserved by thread stacks. 3734 assert(actual_reserve_size != 0, "Must have a stack"); 3735 3736 // Calculate the thread limit when we should start doing Virtual Memory 3737 // banging. Currently when the threads will have used all but 200Mb of space. 3738 // 3739 // TODO: consider performing a similar calculation for commit size instead 3740 // as reserve size, since on a 64-bit platform we'll run into that more 3741 // often than running out of virtual memory space. We can use the 3742 // lower value of the two calculations as the os_thread_limit. 3743 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 3744 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 3745 3746 // at exit methods are called in the reverse order of their registration. 3747 // there is no limit to the number of functions registered. atexit does 3748 // not set errno. 3749 3750 if (PerfAllowAtExitRegistration) { 3751 // only register atexit functions if PerfAllowAtExitRegistration is set. 3752 // atexit functions can be delayed until process exit time, which 3753 // can be problematic for embedded VM situations. Embedded VMs should 3754 // call DestroyJavaVM() to assure that VM resources are released. 3755 3756 // note: perfMemory_exit_helper atexit function may be removed in 3757 // the future if the appropriate cleanup code can be added to the 3758 // VM_Exit VMOperation's doit method. 3759 if (atexit(perfMemory_exit_helper) != 0) { 3760 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 3761 } 3762 } 3763 3764 #ifndef _WIN64 3765 // Print something if NX is enabled (win32 on AMD64) 3766 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 3767 #endif 3768 3769 // initialize thread priority policy 3770 prio_init(); 3771 3772 if (UseNUMA && !ForceNUMA) { 3773 UseNUMA = false; // We don't fully support this yet 3774 } 3775 3776 if (UseNUMAInterleaving) { 3777 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 3778 bool success = numa_interleaving_init(); 3779 if (!success) UseNUMAInterleaving = false; 3780 } 3781 3782 return JNI_OK; 3783 } 3784 3785 void os::init_3(void) { 3786 return; 3787 } 3788 3789 // Mark the polling page as unreadable 3790 void os::make_polling_page_unreadable(void) { 3791 DWORD old_status; 3792 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_NOACCESS, &old_status) ) 3793 fatal("Could not disable polling page"); 3794 }; 3795 3796 // Mark the polling page as readable 3797 void os::make_polling_page_readable(void) { 3798 DWORD old_status; 3799 if( !VirtualProtect((char *)_polling_page, os::vm_page_size(), PAGE_READONLY, &old_status) ) 3800 fatal("Could not enable polling page"); 3801 }; 3802 3803 3804 int os::stat(const char *path, struct stat *sbuf) { 3805 char pathbuf[MAX_PATH]; 3806 if (strlen(path) > MAX_PATH - 1) { 3807 errno = ENAMETOOLONG; 3808 return -1; 3809 } 3810 os::native_path(strcpy(pathbuf, path)); 3811 int ret = ::stat(pathbuf, sbuf); 3812 if (sbuf != NULL && UseUTCFileTimestamp) { 3813 // Fix for 6539723. st_mtime returned from stat() is dependent on 3814 // the system timezone and so can return different values for the 3815 // same file if/when daylight savings time changes. This adjustment 3816 // makes sure the same timestamp is returned regardless of the TZ. 3817 // 3818 // See: 3819 // http://msdn.microsoft.com/library/ 3820 // default.asp?url=/library/en-us/sysinfo/base/ 3821 // time_zone_information_str.asp 3822 // and 3823 // http://msdn.microsoft.com/library/default.asp?url= 3824 // /library/en-us/sysinfo/base/settimezoneinformation.asp 3825 // 3826 // NOTE: there is a insidious bug here: If the timezone is changed 3827 // after the call to stat() but before 'GetTimeZoneInformation()', then 3828 // the adjustment we do here will be wrong and we'll return the wrong 3829 // value (which will likely end up creating an invalid class data 3830 // archive). Absent a better API for this, or some time zone locking 3831 // mechanism, we'll have to live with this risk. 3832 TIME_ZONE_INFORMATION tz; 3833 DWORD tzid = GetTimeZoneInformation(&tz); 3834 int daylightBias = 3835 (tzid == TIME_ZONE_ID_DAYLIGHT) ? tz.DaylightBias : tz.StandardBias; 3836 sbuf->st_mtime += (tz.Bias + daylightBias) * 60; 3837 } 3838 return ret; 3839 } 3840 3841 3842 #define FT2INT64(ft) \ 3843 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 3844 3845 3846 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 3847 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 3848 // of a thread. 3849 // 3850 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 3851 // the fast estimate available on the platform. 3852 3853 // current_thread_cpu_time() is not optimized for Windows yet 3854 jlong os::current_thread_cpu_time() { 3855 // return user + sys since the cost is the same 3856 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 3857 } 3858 3859 jlong os::thread_cpu_time(Thread* thread) { 3860 // consistent with what current_thread_cpu_time() returns. 3861 return os::thread_cpu_time(thread, true /* user+sys */); 3862 } 3863 3864 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 3865 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 3866 } 3867 3868 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 3869 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 3870 // If this function changes, os::is_thread_cpu_time_supported() should too 3871 if (os::win32::is_nt()) { 3872 FILETIME CreationTime; 3873 FILETIME ExitTime; 3874 FILETIME KernelTime; 3875 FILETIME UserTime; 3876 3877 if ( GetThreadTimes(thread->osthread()->thread_handle(), 3878 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 3879 return -1; 3880 else 3881 if (user_sys_cpu_time) { 3882 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 3883 } else { 3884 return FT2INT64(UserTime) * 100; 3885 } 3886 } else { 3887 return (jlong) timeGetTime() * 1000000; 3888 } 3889 } 3890 3891 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 3892 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 3893 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 3894 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 3895 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 3896 } 3897 3898 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 3899 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 3900 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 3901 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 3902 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 3903 } 3904 3905 bool os::is_thread_cpu_time_supported() { 3906 // see os::thread_cpu_time 3907 if (os::win32::is_nt()) { 3908 FILETIME CreationTime; 3909 FILETIME ExitTime; 3910 FILETIME KernelTime; 3911 FILETIME UserTime; 3912 3913 if ( GetThreadTimes(GetCurrentThread(), 3914 &CreationTime, &ExitTime, &KernelTime, &UserTime) == 0) 3915 return false; 3916 else 3917 return true; 3918 } else { 3919 return false; 3920 } 3921 } 3922 3923 // Windows does't provide a loadavg primitive so this is stubbed out for now. 3924 // It does have primitives (PDH API) to get CPU usage and run queue length. 3925 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 3926 // If we wanted to implement loadavg on Windows, we have a few options: 3927 // 3928 // a) Query CPU usage and run queue length and "fake" an answer by 3929 // returning the CPU usage if it's under 100%, and the run queue 3930 // length otherwise. It turns out that querying is pretty slow 3931 // on Windows, on the order of 200 microseconds on a fast machine. 3932 // Note that on the Windows the CPU usage value is the % usage 3933 // since the last time the API was called (and the first call 3934 // returns 100%), so we'd have to deal with that as well. 3935 // 3936 // b) Sample the "fake" answer using a sampling thread and store 3937 // the answer in a global variable. The call to loadavg would 3938 // just return the value of the global, avoiding the slow query. 3939 // 3940 // c) Sample a better answer using exponential decay to smooth the 3941 // value. This is basically the algorithm used by UNIX kernels. 3942 // 3943 // Note that sampling thread starvation could affect both (b) and (c). 3944 int os::loadavg(double loadavg[], int nelem) { 3945 return -1; 3946 } 3947 3948 3949 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 3950 bool os::dont_yield() { 3951 return DontYieldALot; 3952 } 3953 3954 // This method is a slightly reworked copy of JDK's sysOpen 3955 // from src/windows/hpi/src/sys_api_md.c 3956 3957 int os::open(const char *path, int oflag, int mode) { 3958 char pathbuf[MAX_PATH]; 3959 3960 if (strlen(path) > MAX_PATH - 1) { 3961 errno = ENAMETOOLONG; 3962 return -1; 3963 } 3964 os::native_path(strcpy(pathbuf, path)); 3965 return ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 3966 } 3967 3968 // Is a (classpath) directory empty? 3969 bool os::dir_is_empty(const char* path) { 3970 WIN32_FIND_DATA fd; 3971 HANDLE f = FindFirstFile(path, &fd); 3972 if (f == INVALID_HANDLE_VALUE) { 3973 return true; 3974 } 3975 FindClose(f); 3976 return false; 3977 } 3978 3979 // create binary file, rewriting existing file if required 3980 int os::create_binary_file(const char* path, bool rewrite_existing) { 3981 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 3982 if (!rewrite_existing) { 3983 oflags |= _O_EXCL; 3984 } 3985 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 3986 } 3987 3988 // return current position of file pointer 3989 jlong os::current_file_offset(int fd) { 3990 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 3991 } 3992 3993 // move file pointer to the specified offset 3994 jlong os::seek_to_file_offset(int fd, jlong offset) { 3995 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 3996 } 3997 3998 3999 jlong os::lseek(int fd, jlong offset, int whence) { 4000 return (jlong) ::_lseeki64(fd, offset, whence); 4001 } 4002 4003 // This method is a slightly reworked copy of JDK's sysNativePath 4004 // from src/windows/hpi/src/path_md.c 4005 4006 /* Convert a pathname to native format. On win32, this involves forcing all 4007 separators to be '\\' rather than '/' (both are legal inputs, but Win95 4008 sometimes rejects '/') and removing redundant separators. The input path is 4009 assumed to have been converted into the character encoding used by the local 4010 system. Because this might be a double-byte encoding, care is taken to 4011 treat double-byte lead characters correctly. 4012 4013 This procedure modifies the given path in place, as the result is never 4014 longer than the original. There is no error return; this operation always 4015 succeeds. */ 4016 char * os::native_path(char *path) { 4017 char *src = path, *dst = path, *end = path; 4018 char *colon = NULL; /* If a drive specifier is found, this will 4019 point to the colon following the drive 4020 letter */ 4021 4022 /* Assumption: '/', '\\', ':', and drive letters are never lead bytes */ 4023 assert(((!::IsDBCSLeadByte('/')) 4024 && (!::IsDBCSLeadByte('\\')) 4025 && (!::IsDBCSLeadByte(':'))), 4026 "Illegal lead byte"); 4027 4028 /* Check for leading separators */ 4029 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4030 while (isfilesep(*src)) { 4031 src++; 4032 } 4033 4034 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4035 /* Remove leading separators if followed by drive specifier. This 4036 hack is necessary to support file URLs containing drive 4037 specifiers (e.g., "file://c:/path"). As a side effect, 4038 "/c:/path" can be used as an alternative to "c:/path". */ 4039 *dst++ = *src++; 4040 colon = dst; 4041 *dst++ = ':'; 4042 src++; 4043 } else { 4044 src = path; 4045 if (isfilesep(src[0]) && isfilesep(src[1])) { 4046 /* UNC pathname: Retain first separator; leave src pointed at 4047 second separator so that further separators will be collapsed 4048 into the second separator. The result will be a pathname 4049 beginning with "\\\\" followed (most likely) by a host name. */ 4050 src = dst = path + 1; 4051 path[0] = '\\'; /* Force first separator to '\\' */ 4052 } 4053 } 4054 4055 end = dst; 4056 4057 /* Remove redundant separators from remainder of path, forcing all 4058 separators to be '\\' rather than '/'. Also, single byte space 4059 characters are removed from the end of the path because those 4060 are not legal ending characters on this operating system. 4061 */ 4062 while (*src != '\0') { 4063 if (isfilesep(*src)) { 4064 *dst++ = '\\'; src++; 4065 while (isfilesep(*src)) src++; 4066 if (*src == '\0') { 4067 /* Check for trailing separator */ 4068 end = dst; 4069 if (colon == dst - 2) break; /* "z:\\" */ 4070 if (dst == path + 1) break; /* "\\" */ 4071 if (dst == path + 2 && isfilesep(path[0])) { 4072 /* "\\\\" is not collapsed to "\\" because "\\\\" marks the 4073 beginning of a UNC pathname. Even though it is not, by 4074 itself, a valid UNC pathname, we leave it as is in order 4075 to be consistent with the path canonicalizer as well 4076 as the win32 APIs, which treat this case as an invalid 4077 UNC pathname rather than as an alias for the root 4078 directory of the current drive. */ 4079 break; 4080 } 4081 end = --dst; /* Path does not denote a root directory, so 4082 remove trailing separator */ 4083 break; 4084 } 4085 end = dst; 4086 } else { 4087 if (::IsDBCSLeadByte(*src)) { /* Copy a double-byte character */ 4088 *dst++ = *src++; 4089 if (*src) *dst++ = *src++; 4090 end = dst; 4091 } else { /* Copy a single-byte character */ 4092 char c = *src++; 4093 *dst++ = c; 4094 /* Space is not a legal ending character */ 4095 if (c != ' ') end = dst; 4096 } 4097 } 4098 } 4099 4100 *end = '\0'; 4101 4102 /* For "z:", add "." to work around a bug in the C runtime library */ 4103 if (colon == dst - 1) { 4104 path[2] = '.'; 4105 path[3] = '\0'; 4106 } 4107 4108 #ifdef DEBUG 4109 jio_fprintf(stderr, "sysNativePath: %s\n", path); 4110 #endif DEBUG 4111 return path; 4112 } 4113 4114 // This code is a copy of JDK's sysSetLength 4115 // from src/windows/hpi/src/sys_api_md.c 4116 4117 int os::ftruncate(int fd, jlong length) { 4118 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4119 long high = (long)(length >> 32); 4120 DWORD ret; 4121 4122 if (h == (HANDLE)(-1)) { 4123 return -1; 4124 } 4125 4126 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4127 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4128 return -1; 4129 } 4130 4131 if (::SetEndOfFile(h) == FALSE) { 4132 return -1; 4133 } 4134 4135 return 0; 4136 } 4137 4138 4139 // This code is a copy of JDK's sysSync 4140 // from src/windows/hpi/src/sys_api_md.c 4141 // except for the legacy workaround for a bug in Win 98 4142 4143 int os::fsync(int fd) { 4144 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4145 4146 if ( (!::FlushFileBuffers(handle)) && 4147 (GetLastError() != ERROR_ACCESS_DENIED) ) { 4148 /* from winerror.h */ 4149 return -1; 4150 } 4151 return 0; 4152 } 4153 4154 static int nonSeekAvailable(int, long *); 4155 static int stdinAvailable(int, long *); 4156 4157 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4158 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4159 4160 // This code is a copy of JDK's sysAvailable 4161 // from src/windows/hpi/src/sys_api_md.c 4162 4163 int os::available(int fd, jlong *bytes) { 4164 jlong cur, end; 4165 struct _stati64 stbuf64; 4166 4167 if (::_fstati64(fd, &stbuf64) >= 0) { 4168 int mode = stbuf64.st_mode; 4169 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4170 int ret; 4171 long lpbytes; 4172 if (fd == 0) { 4173 ret = stdinAvailable(fd, &lpbytes); 4174 } else { 4175 ret = nonSeekAvailable(fd, &lpbytes); 4176 } 4177 (*bytes) = (jlong)(lpbytes); 4178 return ret; 4179 } 4180 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4181 return FALSE; 4182 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4183 return FALSE; 4184 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4185 return FALSE; 4186 } 4187 *bytes = end - cur; 4188 return TRUE; 4189 } else { 4190 return FALSE; 4191 } 4192 } 4193 4194 // This code is a copy of JDK's nonSeekAvailable 4195 // from src/windows/hpi/src/sys_api_md.c 4196 4197 static int nonSeekAvailable(int fd, long *pbytes) { 4198 /* This is used for available on non-seekable devices 4199 * (like both named and anonymous pipes, such as pipes 4200 * connected to an exec'd process). 4201 * Standard Input is a special case. 4202 * 4203 */ 4204 HANDLE han; 4205 4206 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4207 return FALSE; 4208 } 4209 4210 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4211 /* PeekNamedPipe fails when at EOF. In that case we 4212 * simply make *pbytes = 0 which is consistent with the 4213 * behavior we get on Solaris when an fd is at EOF. 4214 * The only alternative is to raise an Exception, 4215 * which isn't really warranted. 4216 */ 4217 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4218 return FALSE; 4219 } 4220 *pbytes = 0; 4221 } 4222 return TRUE; 4223 } 4224 4225 #define MAX_INPUT_EVENTS 2000 4226 4227 // This code is a copy of JDK's stdinAvailable 4228 // from src/windows/hpi/src/sys_api_md.c 4229 4230 static int stdinAvailable(int fd, long *pbytes) { 4231 HANDLE han; 4232 DWORD numEventsRead = 0; /* Number of events read from buffer */ 4233 DWORD numEvents = 0; /* Number of events in buffer */ 4234 DWORD i = 0; /* Loop index */ 4235 DWORD curLength = 0; /* Position marker */ 4236 DWORD actualLength = 0; /* Number of bytes readable */ 4237 BOOL error = FALSE; /* Error holder */ 4238 INPUT_RECORD *lpBuffer; /* Pointer to records of input events */ 4239 4240 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4241 return FALSE; 4242 } 4243 4244 /* Construct an array of input records in the console buffer */ 4245 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4246 if (error == 0) { 4247 return nonSeekAvailable(fd, pbytes); 4248 } 4249 4250 /* lpBuffer must fit into 64K or else PeekConsoleInput fails */ 4251 if (numEvents > MAX_INPUT_EVENTS) { 4252 numEvents = MAX_INPUT_EVENTS; 4253 } 4254 4255 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4256 if (lpBuffer == NULL) { 4257 return FALSE; 4258 } 4259 4260 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4261 if (error == 0) { 4262 os::free(lpBuffer, mtInternal); 4263 return FALSE; 4264 } 4265 4266 /* Examine input records for the number of bytes available */ 4267 for(i=0; i<numEvents; i++) { 4268 if (lpBuffer[i].EventType == KEY_EVENT) { 4269 4270 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4271 &(lpBuffer[i].Event); 4272 if (keyRecord->bKeyDown == TRUE) { 4273 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4274 curLength++; 4275 if (*keyPressed == '\r') { 4276 actualLength = curLength; 4277 } 4278 } 4279 } 4280 } 4281 4282 if(lpBuffer != NULL) { 4283 os::free(lpBuffer, mtInternal); 4284 } 4285 4286 *pbytes = (long) actualLength; 4287 return TRUE; 4288 } 4289 4290 // Map a block of memory. 4291 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4292 char *addr, size_t bytes, bool read_only, 4293 bool allow_exec) { 4294 HANDLE hFile; 4295 char* base; 4296 4297 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4298 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4299 if (hFile == NULL) { 4300 if (PrintMiscellaneous && Verbose) { 4301 DWORD err = GetLastError(); 4302 tty->print_cr("CreateFile() failed: GetLastError->%ld."); 4303 } 4304 return NULL; 4305 } 4306 4307 if (allow_exec) { 4308 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4309 // unless it comes from a PE image (which the shared archive is not.) 4310 // Even VirtualProtect refuses to give execute access to mapped memory 4311 // that was not previously executable. 4312 // 4313 // Instead, stick the executable region in anonymous memory. Yuck. 4314 // Penalty is that ~4 pages will not be shareable - in the future 4315 // we might consider DLLizing the shared archive with a proper PE 4316 // header so that mapping executable + sharing is possible. 4317 4318 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4319 PAGE_READWRITE); 4320 if (base == NULL) { 4321 if (PrintMiscellaneous && Verbose) { 4322 DWORD err = GetLastError(); 4323 tty->print_cr("VirtualAlloc() failed: GetLastError->%ld.", err); 4324 } 4325 CloseHandle(hFile); 4326 return NULL; 4327 } 4328 4329 DWORD bytes_read; 4330 OVERLAPPED overlapped; 4331 overlapped.Offset = (DWORD)file_offset; 4332 overlapped.OffsetHigh = 0; 4333 overlapped.hEvent = NULL; 4334 // ReadFile guarantees that if the return value is true, the requested 4335 // number of bytes were read before returning. 4336 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4337 if (!res) { 4338 if (PrintMiscellaneous && Verbose) { 4339 DWORD err = GetLastError(); 4340 tty->print_cr("ReadFile() failed: GetLastError->%ld.", err); 4341 } 4342 release_memory(base, bytes); 4343 CloseHandle(hFile); 4344 return NULL; 4345 } 4346 } else { 4347 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4348 NULL /*file_name*/); 4349 if (hMap == NULL) { 4350 if (PrintMiscellaneous && Verbose) { 4351 DWORD err = GetLastError(); 4352 tty->print_cr("CreateFileMapping() failed: GetLastError->%ld."); 4353 } 4354 CloseHandle(hFile); 4355 return NULL; 4356 } 4357 4358 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4359 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4360 (DWORD)bytes, addr); 4361 if (base == NULL) { 4362 if (PrintMiscellaneous && Verbose) { 4363 DWORD err = GetLastError(); 4364 tty->print_cr("MapViewOfFileEx() failed: GetLastError->%ld.", err); 4365 } 4366 CloseHandle(hMap); 4367 CloseHandle(hFile); 4368 return NULL; 4369 } 4370 4371 if (CloseHandle(hMap) == 0) { 4372 if (PrintMiscellaneous && Verbose) { 4373 DWORD err = GetLastError(); 4374 tty->print_cr("CloseHandle(hMap) failed: GetLastError->%ld.", err); 4375 } 4376 CloseHandle(hFile); 4377 return base; 4378 } 4379 } 4380 4381 if (allow_exec) { 4382 DWORD old_protect; 4383 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4384 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4385 4386 if (!res) { 4387 if (PrintMiscellaneous && Verbose) { 4388 DWORD err = GetLastError(); 4389 tty->print_cr("VirtualProtect() failed: GetLastError->%ld.", err); 4390 } 4391 // Don't consider this a hard error, on IA32 even if the 4392 // VirtualProtect fails, we should still be able to execute 4393 CloseHandle(hFile); 4394 return base; 4395 } 4396 } 4397 4398 if (CloseHandle(hFile) == 0) { 4399 if (PrintMiscellaneous && Verbose) { 4400 DWORD err = GetLastError(); 4401 tty->print_cr("CloseHandle(hFile) failed: GetLastError->%ld.", err); 4402 } 4403 return base; 4404 } 4405 4406 return base; 4407 } 4408 4409 4410 // Remap a block of memory. 4411 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4412 char *addr, size_t bytes, bool read_only, 4413 bool allow_exec) { 4414 // This OS does not allow existing memory maps to be remapped so we 4415 // have to unmap the memory before we remap it. 4416 if (!os::unmap_memory(addr, bytes)) { 4417 return NULL; 4418 } 4419 4420 // There is a very small theoretical window between the unmap_memory() 4421 // call above and the map_memory() call below where a thread in native 4422 // code may be able to access an address that is no longer mapped. 4423 4424 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4425 read_only, allow_exec); 4426 } 4427 4428 4429 // Unmap a block of memory. 4430 // Returns true=success, otherwise false. 4431 4432 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4433 BOOL result = UnmapViewOfFile(addr); 4434 if (result == 0) { 4435 if (PrintMiscellaneous && Verbose) { 4436 DWORD err = GetLastError(); 4437 tty->print_cr("UnmapViewOfFile() failed: GetLastError->%ld.", err); 4438 } 4439 return false; 4440 } 4441 return true; 4442 } 4443 4444 void os::pause() { 4445 char filename[MAX_PATH]; 4446 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4447 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4448 } else { 4449 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4450 } 4451 4452 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4453 if (fd != -1) { 4454 struct stat buf; 4455 ::close(fd); 4456 while (::stat(filename, &buf) == 0) { 4457 Sleep(100); 4458 } 4459 } else { 4460 jio_fprintf(stderr, 4461 "Could not open pause file '%s', continuing immediately.\n", filename); 4462 } 4463 } 4464 4465 // An Event wraps a win32 "CreateEvent" kernel handle. 4466 // 4467 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 4468 // 4469 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 4470 // field, and call CloseHandle() on the win32 event handle. Unpark() would 4471 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 4472 // In addition, an unpark() operation might fetch the handle field, but the 4473 // event could recycle between the fetch and the SetEvent() operation. 4474 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 4475 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 4476 // on an stale but recycled handle would be harmless, but in practice this might 4477 // confuse other non-Sun code, so it's not a viable approach. 4478 // 4479 // 2: Once a win32 event handle is associated with an Event, it remains associated 4480 // with the Event. The event handle is never closed. This could be construed 4481 // as handle leakage, but only up to the maximum # of threads that have been extant 4482 // at any one time. This shouldn't be an issue, as windows platforms typically 4483 // permit a process to have hundreds of thousands of open handles. 4484 // 4485 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 4486 // and release unused handles. 4487 // 4488 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 4489 // It's not clear, however, that we wouldn't be trading one type of leak for another. 4490 // 4491 // 5. Use an RCU-like mechanism (Read-Copy Update). 4492 // Or perhaps something similar to Maged Michael's "Hazard pointers". 4493 // 4494 // We use (2). 4495 // 4496 // TODO-FIXME: 4497 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 4498 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 4499 // to recover from (or at least detect) the dreaded Windows 841176 bug. 4500 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 4501 // into a single win32 CreateEvent() handle. 4502 // 4503 // _Event transitions in park() 4504 // -1 => -1 : illegal 4505 // 1 => 0 : pass - return immediately 4506 // 0 => -1 : block 4507 // 4508 // _Event serves as a restricted-range semaphore : 4509 // -1 : thread is blocked 4510 // 0 : neutral - thread is running or ready 4511 // 1 : signaled - thread is running or ready 4512 // 4513 // Another possible encoding of _Event would be 4514 // with explicit "PARKED" and "SIGNALED" bits. 4515 4516 int os::PlatformEvent::park (jlong Millis) { 4517 guarantee (_ParkHandle != NULL , "Invariant") ; 4518 guarantee (Millis > 0 , "Invariant") ; 4519 int v ; 4520 4521 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 4522 // the initial park() operation. 4523 4524 for (;;) { 4525 v = _Event ; 4526 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4527 } 4528 guarantee ((v == 0) || (v == 1), "invariant") ; 4529 if (v != 0) return OS_OK ; 4530 4531 // Do this the hard way by blocking ... 4532 // TODO: consider a brief spin here, gated on the success of recent 4533 // spin attempts by this thread. 4534 // 4535 // We decompose long timeouts into series of shorter timed waits. 4536 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 4537 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 4538 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 4539 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 4540 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 4541 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 4542 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 4543 // for the already waited time. This policy does not admit any new outcomes. 4544 // In the future, however, we might want to track the accumulated wait time and 4545 // adjust Millis accordingly if we encounter a spurious wakeup. 4546 4547 const int MAXTIMEOUT = 0x10000000 ; 4548 DWORD rv = WAIT_TIMEOUT ; 4549 while (_Event < 0 && Millis > 0) { 4550 DWORD prd = Millis ; // set prd = MAX (Millis, MAXTIMEOUT) 4551 if (Millis > MAXTIMEOUT) { 4552 prd = MAXTIMEOUT ; 4553 } 4554 rv = ::WaitForSingleObject (_ParkHandle, prd) ; 4555 assert (rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed") ; 4556 if (rv == WAIT_TIMEOUT) { 4557 Millis -= prd ; 4558 } 4559 } 4560 v = _Event ; 4561 _Event = 0 ; 4562 OrderAccess::fence() ; 4563 // If we encounter a nearly simultanous timeout expiry and unpark() 4564 // we return OS_OK indicating we awoke via unpark(). 4565 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 4566 return (v >= 0) ? OS_OK : OS_TIMEOUT ; 4567 } 4568 4569 void os::PlatformEvent::park () { 4570 guarantee (_ParkHandle != NULL, "Invariant") ; 4571 // Invariant: Only the thread associated with the Event/PlatformEvent 4572 // may call park(). 4573 int v ; 4574 for (;;) { 4575 v = _Event ; 4576 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break ; 4577 } 4578 guarantee ((v == 0) || (v == 1), "invariant") ; 4579 if (v != 0) return ; 4580 4581 // Do this the hard way by blocking ... 4582 // TODO: consider a brief spin here, gated on the success of recent 4583 // spin attempts by this thread. 4584 while (_Event < 0) { 4585 DWORD rv = ::WaitForSingleObject (_ParkHandle, INFINITE) ; 4586 assert (rv == WAIT_OBJECT_0, "WaitForSingleObject failed") ; 4587 } 4588 4589 // Usually we'll find _Event == 0 at this point, but as 4590 // an optional optimization we clear it, just in case can 4591 // multiple unpark() operations drove _Event up to 1. 4592 _Event = 0 ; 4593 OrderAccess::fence() ; 4594 guarantee (_Event >= 0, "invariant") ; 4595 } 4596 4597 void os::PlatformEvent::unpark() { 4598 guarantee (_ParkHandle != NULL, "Invariant") ; 4599 int v ; 4600 for (;;) { 4601 v = _Event ; // Increment _Event if it's < 1. 4602 if (v > 0) { 4603 // If it's already signaled just return. 4604 // The LD of _Event could have reordered or be satisfied 4605 // by a read-aside from this processor's write buffer. 4606 // To avoid problems execute a barrier and then 4607 // ratify the value. A degenerate CAS() would also work. 4608 // Viz., CAS (v+0, &_Event, v) == v). 4609 OrderAccess::fence() ; 4610 if (_Event == v) return ; 4611 continue ; 4612 } 4613 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break ; 4614 } 4615 if (v < 0) { 4616 ::SetEvent (_ParkHandle) ; 4617 } 4618 } 4619 4620 4621 // JSR166 4622 // ------------------------------------------------------- 4623 4624 /* 4625 * The Windows implementation of Park is very straightforward: Basic 4626 * operations on Win32 Events turn out to have the right semantics to 4627 * use them directly. We opportunistically resuse the event inherited 4628 * from Monitor. 4629 */ 4630 4631 4632 void Parker::park(bool isAbsolute, jlong time) { 4633 guarantee (_ParkEvent != NULL, "invariant") ; 4634 // First, demultiplex/decode time arguments 4635 if (time < 0) { // don't wait 4636 return; 4637 } 4638 else if (time == 0 && !isAbsolute) { 4639 time = INFINITE; 4640 } 4641 else if (isAbsolute) { 4642 time -= os::javaTimeMillis(); // convert to relative time 4643 if (time <= 0) // already elapsed 4644 return; 4645 } 4646 else { // relative 4647 time /= 1000000; // Must coarsen from nanos to millis 4648 if (time == 0) // Wait for the minimal time unit if zero 4649 time = 1; 4650 } 4651 4652 JavaThread* thread = (JavaThread*)(Thread::current()); 4653 assert(thread->is_Java_thread(), "Must be JavaThread"); 4654 JavaThread *jt = (JavaThread *)thread; 4655 4656 // Don't wait if interrupted or already triggered 4657 if (Thread::is_interrupted(thread, false) || 4658 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 4659 ResetEvent(_ParkEvent); 4660 return; 4661 } 4662 else { 4663 ThreadBlockInVM tbivm(jt); 4664 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 4665 jt->set_suspend_equivalent(); 4666 4667 WaitForSingleObject(_ParkEvent, time); 4668 ResetEvent(_ParkEvent); 4669 4670 // If externally suspended while waiting, re-suspend 4671 if (jt->handle_special_suspend_equivalent_condition()) { 4672 jt->java_suspend_self(); 4673 } 4674 } 4675 } 4676 4677 void Parker::unpark() { 4678 guarantee (_ParkEvent != NULL, "invariant") ; 4679 SetEvent(_ParkEvent); 4680 } 4681 4682 // Run the specified command in a separate process. Return its exit value, 4683 // or -1 on failure (e.g. can't create a new process). 4684 int os::fork_and_exec(char* cmd) { 4685 STARTUPINFO si; 4686 PROCESS_INFORMATION pi; 4687 4688 memset(&si, 0, sizeof(si)); 4689 si.cb = sizeof(si); 4690 memset(&pi, 0, sizeof(pi)); 4691 BOOL rslt = CreateProcess(NULL, // executable name - use command line 4692 cmd, // command line 4693 NULL, // process security attribute 4694 NULL, // thread security attribute 4695 TRUE, // inherits system handles 4696 0, // no creation flags 4697 NULL, // use parent's environment block 4698 NULL, // use parent's starting directory 4699 &si, // (in) startup information 4700 &pi); // (out) process information 4701 4702 if (rslt) { 4703 // Wait until child process exits. 4704 WaitForSingleObject(pi.hProcess, INFINITE); 4705 4706 DWORD exit_code; 4707 GetExitCodeProcess(pi.hProcess, &exit_code); 4708 4709 // Close process and thread handles. 4710 CloseHandle(pi.hProcess); 4711 CloseHandle(pi.hThread); 4712 4713 return (int)exit_code; 4714 } else { 4715 return -1; 4716 } 4717 } 4718 4719 //-------------------------------------------------------------------------------------------------- 4720 // Non-product code 4721 4722 static int mallocDebugIntervalCounter = 0; 4723 static int mallocDebugCounter = 0; 4724 bool os::check_heap(bool force) { 4725 if (++mallocDebugCounter < MallocVerifyStart && !force) return true; 4726 if (++mallocDebugIntervalCounter >= MallocVerifyInterval || force) { 4727 // Note: HeapValidate executes two hardware breakpoints when it finds something 4728 // wrong; at these points, eax contains the address of the offending block (I think). 4729 // To get to the exlicit error message(s) below, just continue twice. 4730 HANDLE heap = GetProcessHeap(); 4731 { HeapLock(heap); 4732 PROCESS_HEAP_ENTRY phe; 4733 phe.lpData = NULL; 4734 while (HeapWalk(heap, &phe) != 0) { 4735 if ((phe.wFlags & PROCESS_HEAP_ENTRY_BUSY) && 4736 !HeapValidate(heap, 0, phe.lpData)) { 4737 tty->print_cr("C heap has been corrupted (time: %d allocations)", mallocDebugCounter); 4738 tty->print_cr("corrupted block near address %#x, length %d", phe.lpData, phe.cbData); 4739 fatal("corrupted C heap"); 4740 } 4741 } 4742 DWORD err = GetLastError(); 4743 if (err != ERROR_NO_MORE_ITEMS && err != ERROR_CALL_NOT_IMPLEMENTED) { 4744 fatal(err_msg("heap walk aborted with error %d", err)); 4745 } 4746 HeapUnlock(heap); 4747 } 4748 mallocDebugIntervalCounter = 0; 4749 } 4750 return true; 4751 } 4752 4753 4754 bool os::find(address addr, outputStream* st) { 4755 // Nothing yet 4756 return false; 4757 } 4758 4759 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 4760 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 4761 4762 if ( exception_code == EXCEPTION_ACCESS_VIOLATION ) { 4763 JavaThread* thread = (JavaThread*)ThreadLocalStorage::get_thread_slow(); 4764 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 4765 address addr = (address) exceptionRecord->ExceptionInformation[1]; 4766 4767 if (os::is_memory_serialize_page(thread, addr)) 4768 return EXCEPTION_CONTINUE_EXECUTION; 4769 } 4770 4771 return EXCEPTION_CONTINUE_SEARCH; 4772 } 4773 4774 // We don't build a headless jre for Windows 4775 bool os::is_headless_jre() { return false; } 4776 4777 4778 typedef CRITICAL_SECTION mutex_t; 4779 #define mutexInit(m) InitializeCriticalSection(m) 4780 #define mutexDestroy(m) DeleteCriticalSection(m) 4781 #define mutexLock(m) EnterCriticalSection(m) 4782 #define mutexUnlock(m) LeaveCriticalSection(m) 4783 4784 static bool sock_initialized = FALSE; 4785 static mutex_t sockFnTableMutex; 4786 4787 static void initSock() { 4788 WSADATA wsadata; 4789 4790 if (!os::WinSock2Dll::WinSock2Available()) { 4791 jio_fprintf(stderr, "Could not load Winsock 2 (error: %d)\n", 4792 ::GetLastError()); 4793 return; 4794 } 4795 if (sock_initialized == TRUE) return; 4796 4797 ::mutexInit(&sockFnTableMutex); 4798 ::mutexLock(&sockFnTableMutex); 4799 if (os::WinSock2Dll::WSAStartup(MAKEWORD(1,1), &wsadata) != 0) { 4800 jio_fprintf(stderr, "Could not initialize Winsock\n"); 4801 } 4802 sock_initialized = TRUE; 4803 ::mutexUnlock(&sockFnTableMutex); 4804 } 4805 4806 struct hostent* os::get_host_by_name(char* name) { 4807 if (!sock_initialized) { 4808 initSock(); 4809 } 4810 if (!os::WinSock2Dll::WinSock2Available()) { 4811 return NULL; 4812 } 4813 return (struct hostent*)os::WinSock2Dll::gethostbyname(name); 4814 } 4815 4816 int os::socket_close(int fd) { 4817 return ::closesocket(fd); 4818 } 4819 4820 int os::socket_available(int fd, jint *pbytes) { 4821 int ret = ::ioctlsocket(fd, FIONREAD, (u_long*)pbytes); 4822 return (ret < 0) ? 0 : 1; 4823 } 4824 4825 int os::socket(int domain, int type, int protocol) { 4826 return ::socket(domain, type, protocol); 4827 } 4828 4829 int os::listen(int fd, int count) { 4830 return ::listen(fd, count); 4831 } 4832 4833 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 4834 return ::connect(fd, him, len); 4835 } 4836 4837 int os::accept(int fd, struct sockaddr* him, socklen_t* len) { 4838 return ::accept(fd, him, len); 4839 } 4840 4841 int os::sendto(int fd, char* buf, size_t len, uint flags, 4842 struct sockaddr* to, socklen_t tolen) { 4843 4844 return ::sendto(fd, buf, (int)len, flags, to, tolen); 4845 } 4846 4847 int os::recvfrom(int fd, char *buf, size_t nBytes, uint flags, 4848 sockaddr* from, socklen_t* fromlen) { 4849 4850 return ::recvfrom(fd, buf, (int)nBytes, flags, from, fromlen); 4851 } 4852 4853 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 4854 return ::recv(fd, buf, (int)nBytes, flags); 4855 } 4856 4857 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 4858 return ::send(fd, buf, (int)nBytes, flags); 4859 } 4860 4861 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 4862 return ::send(fd, buf, (int)nBytes, flags); 4863 } 4864 4865 int os::timeout(int fd, long timeout) { 4866 fd_set tbl; 4867 struct timeval t; 4868 4869 t.tv_sec = timeout / 1000; 4870 t.tv_usec = (timeout % 1000) * 1000; 4871 4872 tbl.fd_count = 1; 4873 tbl.fd_array[0] = fd; 4874 4875 return ::select(1, &tbl, 0, 0, &t); 4876 } 4877 4878 int os::get_host_name(char* name, int namelen) { 4879 return ::gethostname(name, namelen); 4880 } 4881 4882 int os::socket_shutdown(int fd, int howto) { 4883 return ::shutdown(fd, howto); 4884 } 4885 4886 int os::bind(int fd, struct sockaddr* him, socklen_t len) { 4887 return ::bind(fd, him, len); 4888 } 4889 4890 int os::get_sock_name(int fd, struct sockaddr* him, socklen_t* len) { 4891 return ::getsockname(fd, him, len); 4892 } 4893 4894 int os::get_sock_opt(int fd, int level, int optname, 4895 char* optval, socklen_t* optlen) { 4896 return ::getsockopt(fd, level, optname, optval, optlen); 4897 } 4898 4899 int os::set_sock_opt(int fd, int level, int optname, 4900 const char* optval, socklen_t optlen) { 4901 return ::setsockopt(fd, level, optname, optval, optlen); 4902 } 4903 4904 4905 // Kernel32 API 4906 typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void); 4907 typedef LPVOID (WINAPI *VirtualAllocExNuma_Fn) (HANDLE, LPVOID, SIZE_T, DWORD, DWORD, DWORD); 4908 typedef BOOL (WINAPI *GetNumaHighestNodeNumber_Fn) (PULONG); 4909 typedef BOOL (WINAPI *GetNumaNodeProcessorMask_Fn) (UCHAR, PULONGLONG); 4910 typedef USHORT (WINAPI* RtlCaptureStackBackTrace_Fn)(ULONG, ULONG, PVOID*, PULONG); 4911 4912 GetLargePageMinimum_Fn os::Kernel32Dll::_GetLargePageMinimum = NULL; 4913 VirtualAllocExNuma_Fn os::Kernel32Dll::_VirtualAllocExNuma = NULL; 4914 GetNumaHighestNodeNumber_Fn os::Kernel32Dll::_GetNumaHighestNodeNumber = NULL; 4915 GetNumaNodeProcessorMask_Fn os::Kernel32Dll::_GetNumaNodeProcessorMask = NULL; 4916 RtlCaptureStackBackTrace_Fn os::Kernel32Dll::_RtlCaptureStackBackTrace = NULL; 4917 4918 4919 BOOL os::Kernel32Dll::initialized = FALSE; 4920 SIZE_T os::Kernel32Dll::GetLargePageMinimum() { 4921 assert(initialized && _GetLargePageMinimum != NULL, 4922 "GetLargePageMinimumAvailable() not yet called"); 4923 return _GetLargePageMinimum(); 4924 } 4925 4926 BOOL os::Kernel32Dll::GetLargePageMinimumAvailable() { 4927 if (!initialized) { 4928 initialize(); 4929 } 4930 return _GetLargePageMinimum != NULL; 4931 } 4932 4933 BOOL os::Kernel32Dll::NumaCallsAvailable() { 4934 if (!initialized) { 4935 initialize(); 4936 } 4937 return _VirtualAllocExNuma != NULL; 4938 } 4939 4940 LPVOID os::Kernel32Dll::VirtualAllocExNuma(HANDLE hProc, LPVOID addr, SIZE_T bytes, DWORD flags, DWORD prot, DWORD node) { 4941 assert(initialized && _VirtualAllocExNuma != NULL, 4942 "NUMACallsAvailable() not yet called"); 4943 4944 return _VirtualAllocExNuma(hProc, addr, bytes, flags, prot, node); 4945 } 4946 4947 BOOL os::Kernel32Dll::GetNumaHighestNodeNumber(PULONG ptr_highest_node_number) { 4948 assert(initialized && _GetNumaHighestNodeNumber != NULL, 4949 "NUMACallsAvailable() not yet called"); 4950 4951 return _GetNumaHighestNodeNumber(ptr_highest_node_number); 4952 } 4953 4954 BOOL os::Kernel32Dll::GetNumaNodeProcessorMask(UCHAR node, PULONGLONG proc_mask) { 4955 assert(initialized && _GetNumaNodeProcessorMask != NULL, 4956 "NUMACallsAvailable() not yet called"); 4957 4958 return _GetNumaNodeProcessorMask(node, proc_mask); 4959 } 4960 4961 USHORT os::Kernel32Dll::RtlCaptureStackBackTrace(ULONG FrameToSkip, 4962 ULONG FrameToCapture, PVOID* BackTrace, PULONG BackTraceHash) { 4963 if (!initialized) { 4964 initialize(); 4965 } 4966 4967 if (_RtlCaptureStackBackTrace != NULL) { 4968 return _RtlCaptureStackBackTrace(FrameToSkip, FrameToCapture, 4969 BackTrace, BackTraceHash); 4970 } else { 4971 return 0; 4972 } 4973 } 4974 4975 void os::Kernel32Dll::initializeCommon() { 4976 if (!initialized) { 4977 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 4978 assert(handle != NULL, "Just check"); 4979 _GetLargePageMinimum = (GetLargePageMinimum_Fn)::GetProcAddress(handle, "GetLargePageMinimum"); 4980 _VirtualAllocExNuma = (VirtualAllocExNuma_Fn)::GetProcAddress(handle, "VirtualAllocExNuma"); 4981 _GetNumaHighestNodeNumber = (GetNumaHighestNodeNumber_Fn)::GetProcAddress(handle, "GetNumaHighestNodeNumber"); 4982 _GetNumaNodeProcessorMask = (GetNumaNodeProcessorMask_Fn)::GetProcAddress(handle, "GetNumaNodeProcessorMask"); 4983 _RtlCaptureStackBackTrace = (RtlCaptureStackBackTrace_Fn)::GetProcAddress(handle, "RtlCaptureStackBackTrace"); 4984 initialized = TRUE; 4985 } 4986 } 4987 4988 4989 4990 #ifndef JDK6_OR_EARLIER 4991 4992 void os::Kernel32Dll::initialize() { 4993 initializeCommon(); 4994 } 4995 4996 4997 // Kernel32 API 4998 inline BOOL os::Kernel32Dll::SwitchToThread() { 4999 return ::SwitchToThread(); 5000 } 5001 5002 inline BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5003 return true; 5004 } 5005 5006 // Help tools 5007 inline BOOL os::Kernel32Dll::HelpToolsAvailable() { 5008 return true; 5009 } 5010 5011 inline HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5012 return ::CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5013 } 5014 5015 inline BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5016 return ::Module32First(hSnapshot, lpme); 5017 } 5018 5019 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5020 return ::Module32Next(hSnapshot, lpme); 5021 } 5022 5023 5024 inline BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5025 return true; 5026 } 5027 5028 inline void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5029 ::GetNativeSystemInfo(lpSystemInfo); 5030 } 5031 5032 // PSAPI API 5033 inline BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5034 return ::EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5035 } 5036 5037 inline DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5038 return ::GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5039 } 5040 5041 inline BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5042 return ::GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5043 } 5044 5045 inline BOOL os::PSApiDll::PSApiAvailable() { 5046 return true; 5047 } 5048 5049 5050 // WinSock2 API 5051 inline BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5052 return ::WSAStartup(wVersionRequested, lpWSAData); 5053 } 5054 5055 inline struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5056 return ::gethostbyname(name); 5057 } 5058 5059 inline BOOL os::WinSock2Dll::WinSock2Available() { 5060 return true; 5061 } 5062 5063 // Advapi API 5064 inline BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5065 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5066 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5067 return ::AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5068 BufferLength, PreviousState, ReturnLength); 5069 } 5070 5071 inline BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5072 PHANDLE TokenHandle) { 5073 return ::OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5074 } 5075 5076 inline BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5077 return ::LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5078 } 5079 5080 inline BOOL os::Advapi32Dll::AdvapiAvailable() { 5081 return true; 5082 } 5083 5084 #else 5085 // Kernel32 API 5086 typedef BOOL (WINAPI* SwitchToThread_Fn)(void); 5087 typedef HANDLE (WINAPI* CreateToolhelp32Snapshot_Fn)(DWORD,DWORD); 5088 typedef BOOL (WINAPI* Module32First_Fn)(HANDLE,LPMODULEENTRY32); 5089 typedef BOOL (WINAPI* Module32Next_Fn)(HANDLE,LPMODULEENTRY32); 5090 typedef void (WINAPI* GetNativeSystemInfo_Fn)(LPSYSTEM_INFO); 5091 5092 SwitchToThread_Fn os::Kernel32Dll::_SwitchToThread = NULL; 5093 CreateToolhelp32Snapshot_Fn os::Kernel32Dll::_CreateToolhelp32Snapshot = NULL; 5094 Module32First_Fn os::Kernel32Dll::_Module32First = NULL; 5095 Module32Next_Fn os::Kernel32Dll::_Module32Next = NULL; 5096 GetNativeSystemInfo_Fn os::Kernel32Dll::_GetNativeSystemInfo = NULL; 5097 5098 void os::Kernel32Dll::initialize() { 5099 if (!initialized) { 5100 HMODULE handle = ::GetModuleHandle("Kernel32.dll"); 5101 assert(handle != NULL, "Just check"); 5102 5103 _SwitchToThread = (SwitchToThread_Fn)::GetProcAddress(handle, "SwitchToThread"); 5104 _CreateToolhelp32Snapshot = (CreateToolhelp32Snapshot_Fn) 5105 ::GetProcAddress(handle, "CreateToolhelp32Snapshot"); 5106 _Module32First = (Module32First_Fn)::GetProcAddress(handle, "Module32First"); 5107 _Module32Next = (Module32Next_Fn)::GetProcAddress(handle, "Module32Next"); 5108 _GetNativeSystemInfo = (GetNativeSystemInfo_Fn)::GetProcAddress(handle, "GetNativeSystemInfo"); 5109 initializeCommon(); // resolve the functions that always need resolving 5110 5111 initialized = TRUE; 5112 } 5113 } 5114 5115 BOOL os::Kernel32Dll::SwitchToThread() { 5116 assert(initialized && _SwitchToThread != NULL, 5117 "SwitchToThreadAvailable() not yet called"); 5118 return _SwitchToThread(); 5119 } 5120 5121 5122 BOOL os::Kernel32Dll::SwitchToThreadAvailable() { 5123 if (!initialized) { 5124 initialize(); 5125 } 5126 return _SwitchToThread != NULL; 5127 } 5128 5129 // Help tools 5130 BOOL os::Kernel32Dll::HelpToolsAvailable() { 5131 if (!initialized) { 5132 initialize(); 5133 } 5134 return _CreateToolhelp32Snapshot != NULL && 5135 _Module32First != NULL && 5136 _Module32Next != NULL; 5137 } 5138 5139 HANDLE os::Kernel32Dll::CreateToolhelp32Snapshot(DWORD dwFlags,DWORD th32ProcessId) { 5140 assert(initialized && _CreateToolhelp32Snapshot != NULL, 5141 "HelpToolsAvailable() not yet called"); 5142 5143 return _CreateToolhelp32Snapshot(dwFlags, th32ProcessId); 5144 } 5145 5146 BOOL os::Kernel32Dll::Module32First(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5147 assert(initialized && _Module32First != NULL, 5148 "HelpToolsAvailable() not yet called"); 5149 5150 return _Module32First(hSnapshot, lpme); 5151 } 5152 5153 inline BOOL os::Kernel32Dll::Module32Next(HANDLE hSnapshot,LPMODULEENTRY32 lpme) { 5154 assert(initialized && _Module32Next != NULL, 5155 "HelpToolsAvailable() not yet called"); 5156 5157 return _Module32Next(hSnapshot, lpme); 5158 } 5159 5160 5161 BOOL os::Kernel32Dll::GetNativeSystemInfoAvailable() { 5162 if (!initialized) { 5163 initialize(); 5164 } 5165 return _GetNativeSystemInfo != NULL; 5166 } 5167 5168 void os::Kernel32Dll::GetNativeSystemInfo(LPSYSTEM_INFO lpSystemInfo) { 5169 assert(initialized && _GetNativeSystemInfo != NULL, 5170 "GetNativeSystemInfoAvailable() not yet called"); 5171 5172 _GetNativeSystemInfo(lpSystemInfo); 5173 } 5174 5175 // PSAPI API 5176 5177 5178 typedef BOOL (WINAPI *EnumProcessModules_Fn)(HANDLE, HMODULE *, DWORD, LPDWORD); 5179 typedef BOOL (WINAPI *GetModuleFileNameEx_Fn)(HANDLE, HMODULE, LPTSTR, DWORD);; 5180 typedef BOOL (WINAPI *GetModuleInformation_Fn)(HANDLE, HMODULE, LPMODULEINFO, DWORD); 5181 5182 EnumProcessModules_Fn os::PSApiDll::_EnumProcessModules = NULL; 5183 GetModuleFileNameEx_Fn os::PSApiDll::_GetModuleFileNameEx = NULL; 5184 GetModuleInformation_Fn os::PSApiDll::_GetModuleInformation = NULL; 5185 BOOL os::PSApiDll::initialized = FALSE; 5186 5187 void os::PSApiDll::initialize() { 5188 if (!initialized) { 5189 HMODULE handle = os::win32::load_Windows_dll("PSAPI.DLL", NULL, 0); 5190 if (handle != NULL) { 5191 _EnumProcessModules = (EnumProcessModules_Fn)::GetProcAddress(handle, 5192 "EnumProcessModules"); 5193 _GetModuleFileNameEx = (GetModuleFileNameEx_Fn)::GetProcAddress(handle, 5194 "GetModuleFileNameExA"); 5195 _GetModuleInformation = (GetModuleInformation_Fn)::GetProcAddress(handle, 5196 "GetModuleInformation"); 5197 } 5198 initialized = TRUE; 5199 } 5200 } 5201 5202 5203 5204 BOOL os::PSApiDll::EnumProcessModules(HANDLE hProcess, HMODULE *lpModule, DWORD cb, LPDWORD lpcbNeeded) { 5205 assert(initialized && _EnumProcessModules != NULL, 5206 "PSApiAvailable() not yet called"); 5207 return _EnumProcessModules(hProcess, lpModule, cb, lpcbNeeded); 5208 } 5209 5210 DWORD os::PSApiDll::GetModuleFileNameEx(HANDLE hProcess, HMODULE hModule, LPTSTR lpFilename, DWORD nSize) { 5211 assert(initialized && _GetModuleFileNameEx != NULL, 5212 "PSApiAvailable() not yet called"); 5213 return _GetModuleFileNameEx(hProcess, hModule, lpFilename, nSize); 5214 } 5215 5216 BOOL os::PSApiDll::GetModuleInformation(HANDLE hProcess, HMODULE hModule, LPMODULEINFO lpmodinfo, DWORD cb) { 5217 assert(initialized && _GetModuleInformation != NULL, 5218 "PSApiAvailable() not yet called"); 5219 return _GetModuleInformation(hProcess, hModule, lpmodinfo, cb); 5220 } 5221 5222 BOOL os::PSApiDll::PSApiAvailable() { 5223 if (!initialized) { 5224 initialize(); 5225 } 5226 return _EnumProcessModules != NULL && 5227 _GetModuleFileNameEx != NULL && 5228 _GetModuleInformation != NULL; 5229 } 5230 5231 5232 // WinSock2 API 5233 typedef int (PASCAL FAR* WSAStartup_Fn)(WORD, LPWSADATA); 5234 typedef struct hostent *(PASCAL FAR *gethostbyname_Fn)(...); 5235 5236 WSAStartup_Fn os::WinSock2Dll::_WSAStartup = NULL; 5237 gethostbyname_Fn os::WinSock2Dll::_gethostbyname = NULL; 5238 BOOL os::WinSock2Dll::initialized = FALSE; 5239 5240 void os::WinSock2Dll::initialize() { 5241 if (!initialized) { 5242 HMODULE handle = os::win32::load_Windows_dll("ws2_32.dll", NULL, 0); 5243 if (handle != NULL) { 5244 _WSAStartup = (WSAStartup_Fn)::GetProcAddress(handle, "WSAStartup"); 5245 _gethostbyname = (gethostbyname_Fn)::GetProcAddress(handle, "gethostbyname"); 5246 } 5247 initialized = TRUE; 5248 } 5249 } 5250 5251 5252 BOOL os::WinSock2Dll::WSAStartup(WORD wVersionRequested, LPWSADATA lpWSAData) { 5253 assert(initialized && _WSAStartup != NULL, 5254 "WinSock2Available() not yet called"); 5255 return _WSAStartup(wVersionRequested, lpWSAData); 5256 } 5257 5258 struct hostent* os::WinSock2Dll::gethostbyname(const char *name) { 5259 assert(initialized && _gethostbyname != NULL, 5260 "WinSock2Available() not yet called"); 5261 return _gethostbyname(name); 5262 } 5263 5264 BOOL os::WinSock2Dll::WinSock2Available() { 5265 if (!initialized) { 5266 initialize(); 5267 } 5268 return _WSAStartup != NULL && 5269 _gethostbyname != NULL; 5270 } 5271 5272 typedef BOOL (WINAPI *AdjustTokenPrivileges_Fn)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD); 5273 typedef BOOL (WINAPI *OpenProcessToken_Fn)(HANDLE, DWORD, PHANDLE); 5274 typedef BOOL (WINAPI *LookupPrivilegeValue_Fn)(LPCTSTR, LPCTSTR, PLUID); 5275 5276 AdjustTokenPrivileges_Fn os::Advapi32Dll::_AdjustTokenPrivileges = NULL; 5277 OpenProcessToken_Fn os::Advapi32Dll::_OpenProcessToken = NULL; 5278 LookupPrivilegeValue_Fn os::Advapi32Dll::_LookupPrivilegeValue = NULL; 5279 BOOL os::Advapi32Dll::initialized = FALSE; 5280 5281 void os::Advapi32Dll::initialize() { 5282 if (!initialized) { 5283 HMODULE handle = os::win32::load_Windows_dll("advapi32.dll", NULL, 0); 5284 if (handle != NULL) { 5285 _AdjustTokenPrivileges = (AdjustTokenPrivileges_Fn)::GetProcAddress(handle, 5286 "AdjustTokenPrivileges"); 5287 _OpenProcessToken = (OpenProcessToken_Fn)::GetProcAddress(handle, 5288 "OpenProcessToken"); 5289 _LookupPrivilegeValue = (LookupPrivilegeValue_Fn)::GetProcAddress(handle, 5290 "LookupPrivilegeValueA"); 5291 } 5292 initialized = TRUE; 5293 } 5294 } 5295 5296 BOOL os::Advapi32Dll::AdjustTokenPrivileges(HANDLE TokenHandle, 5297 BOOL DisableAllPrivileges, PTOKEN_PRIVILEGES NewState, DWORD BufferLength, 5298 PTOKEN_PRIVILEGES PreviousState, PDWORD ReturnLength) { 5299 assert(initialized && _AdjustTokenPrivileges != NULL, 5300 "AdvapiAvailable() not yet called"); 5301 return _AdjustTokenPrivileges(TokenHandle, DisableAllPrivileges, NewState, 5302 BufferLength, PreviousState, ReturnLength); 5303 } 5304 5305 BOOL os::Advapi32Dll::OpenProcessToken(HANDLE ProcessHandle, DWORD DesiredAccess, 5306 PHANDLE TokenHandle) { 5307 assert(initialized && _OpenProcessToken != NULL, 5308 "AdvapiAvailable() not yet called"); 5309 return _OpenProcessToken(ProcessHandle, DesiredAccess, TokenHandle); 5310 } 5311 5312 BOOL os::Advapi32Dll::LookupPrivilegeValue(LPCTSTR lpSystemName, LPCTSTR lpName, PLUID lpLuid) { 5313 assert(initialized && _LookupPrivilegeValue != NULL, 5314 "AdvapiAvailable() not yet called"); 5315 return _LookupPrivilegeValue(lpSystemName, lpName, lpLuid); 5316 } 5317 5318 BOOL os::Advapi32Dll::AdvapiAvailable() { 5319 if (!initialized) { 5320 initialize(); 5321 } 5322 return _AdjustTokenPrivileges != NULL && 5323 _OpenProcessToken != NULL && 5324 _LookupPrivilegeValue != NULL; 5325 } 5326 5327 #endif