1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "jvm.h" 30 #include "classfile/classLoader.hpp" 31 #include "classfile/systemDictionary.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/vtableStubs.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/disassembler.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.hpp" 48 #include "runtime/extendedPC.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/interfaceSupport.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/orderAccess.hpp" 56 #include "runtime/osThread.hpp" 57 #include "runtime/perfMemory.hpp" 58 #include "runtime/sharedRuntime.hpp" 59 #include "runtime/statSampler.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "runtime/thread.inline.hpp" 62 #include "runtime/threadCritical.hpp" 63 #include "runtime/timer.hpp" 64 #include "runtime/vm_version.hpp" 65 #include "services/attachListener.hpp" 66 #include "services/memTracker.hpp" 67 #include "services/runtimeService.hpp" 68 #include "utilities/align.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/macros.hpp" 74 #include "utilities/vmError.hpp" 75 #include "symbolengine.hpp" 76 #include "windbghelp.hpp" 77 78 79 #ifdef _DEBUG 80 #include <crtdbg.h> 81 #endif 82 83 84 #include <windows.h> 85 #include <sys/types.h> 86 #include <sys/stat.h> 87 #include <sys/timeb.h> 88 #include <objidl.h> 89 #include <shlobj.h> 90 91 #include <malloc.h> 92 #include <signal.h> 93 #include <direct.h> 94 #include <errno.h> 95 #include <fcntl.h> 96 #include <io.h> 97 #include <process.h> // For _beginthreadex(), _endthreadex() 98 #include <imagehlp.h> // For os::dll_address_to_function_name 99 // for enumerating dll libraries 100 #include <vdmdbg.h> 101 #include <psapi.h> 102 #include <mmsystem.h> 103 #include <winsock2.h> 104 105 // for timer info max values which include all bits 106 #define ALL_64_BITS CONST64(-1) 107 108 // For DLL loading/load error detection 109 // Values of PE COFF 110 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 111 #define IMAGE_FILE_SIGNATURE_LENGTH 4 112 113 static HANDLE main_process; 114 static HANDLE main_thread; 115 static int main_thread_id; 116 117 static FILETIME process_creation_time; 118 static FILETIME process_exit_time; 119 static FILETIME process_user_time; 120 static FILETIME process_kernel_time; 121 122 #ifdef _M_AMD64 123 #define __CPU__ amd64 124 #else 125 #define __CPU__ i486 126 #endif 127 128 // save DLL module handle, used by GetModuleFileName 129 130 HINSTANCE vm_lib_handle; 131 132 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 133 switch (reason) { 134 case DLL_PROCESS_ATTACH: 135 vm_lib_handle = hinst; 136 if (ForceTimeHighResolution) { 137 timeBeginPeriod(1L); 138 } 139 WindowsDbgHelp::pre_initialize(); 140 SymbolEngine::pre_initialize(); 141 break; 142 case DLL_PROCESS_DETACH: 143 if (ForceTimeHighResolution) { 144 timeEndPeriod(1L); 145 } 146 break; 147 default: 148 break; 149 } 150 return true; 151 } 152 153 static inline double fileTimeAsDouble(FILETIME* time) { 154 const double high = (double) ((unsigned int) ~0); 155 const double split = 10000000.0; 156 double result = (time->dwLowDateTime / split) + 157 time->dwHighDateTime * (high/split); 158 return result; 159 } 160 161 // Implementation of os 162 163 bool os::unsetenv(const char* name) { 164 assert(name != NULL, "Null pointer"); 165 return (SetEnvironmentVariable(name, NULL) == TRUE); 166 } 167 168 // No setuid programs under Windows. 169 bool os::have_special_privileges() { 170 return false; 171 } 172 173 174 // This method is a periodic task to check for misbehaving JNI applications 175 // under CheckJNI, we can add any periodic checks here. 176 // For Windows at the moment does nothing 177 void os::run_periodic_checks() { 178 return; 179 } 180 181 // previous UnhandledExceptionFilter, if there is one 182 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 183 184 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 185 186 void os::init_system_properties_values() { 187 // sysclasspath, java_home, dll_dir 188 { 189 char *home_path; 190 char *dll_path; 191 char *pslash; 192 char *bin = "\\bin"; 193 char home_dir[MAX_PATH + 1]; 194 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 195 196 if (alt_home_dir != NULL) { 197 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 198 home_dir[MAX_PATH] = '\0'; 199 } else { 200 os::jvm_path(home_dir, sizeof(home_dir)); 201 // Found the full path to jvm.dll. 202 // Now cut the path to <java_home>/jre if we can. 203 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 204 pslash = strrchr(home_dir, '\\'); 205 if (pslash != NULL) { 206 *pslash = '\0'; // get rid of \{client|server} 207 pslash = strrchr(home_dir, '\\'); 208 if (pslash != NULL) { 209 *pslash = '\0'; // get rid of \bin 210 } 211 } 212 } 213 214 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 215 if (home_path == NULL) { 216 return; 217 } 218 strcpy(home_path, home_dir); 219 Arguments::set_java_home(home_path); 220 FREE_C_HEAP_ARRAY(char, home_path); 221 222 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 223 mtInternal); 224 if (dll_path == NULL) { 225 return; 226 } 227 strcpy(dll_path, home_dir); 228 strcat(dll_path, bin); 229 Arguments::set_dll_dir(dll_path); 230 FREE_C_HEAP_ARRAY(char, dll_path); 231 232 if (!set_boot_path('\\', ';')) { 233 vm_exit_during_initialization("Failed setting boot class path.", NULL); 234 } 235 } 236 237 // library_path 238 #define EXT_DIR "\\lib\\ext" 239 #define BIN_DIR "\\bin" 240 #define PACKAGE_DIR "\\Sun\\Java" 241 { 242 // Win32 library search order (See the documentation for LoadLibrary): 243 // 244 // 1. The directory from which application is loaded. 245 // 2. The system wide Java Extensions directory (Java only) 246 // 3. System directory (GetSystemDirectory) 247 // 4. Windows directory (GetWindowsDirectory) 248 // 5. The PATH environment variable 249 // 6. The current directory 250 251 char *library_path; 252 char tmp[MAX_PATH]; 253 char *path_str = ::getenv("PATH"); 254 255 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 256 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 257 258 library_path[0] = '\0'; 259 260 GetModuleFileName(NULL, tmp, sizeof(tmp)); 261 *(strrchr(tmp, '\\')) = '\0'; 262 strcat(library_path, tmp); 263 264 GetWindowsDirectory(tmp, sizeof(tmp)); 265 strcat(library_path, ";"); 266 strcat(library_path, tmp); 267 strcat(library_path, PACKAGE_DIR BIN_DIR); 268 269 GetSystemDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 GetWindowsDirectory(tmp, sizeof(tmp)); 274 strcat(library_path, ";"); 275 strcat(library_path, tmp); 276 277 if (path_str) { 278 strcat(library_path, ";"); 279 strcat(library_path, path_str); 280 } 281 282 strcat(library_path, ";."); 283 284 Arguments::set_library_path(library_path); 285 FREE_C_HEAP_ARRAY(char, library_path); 286 } 287 288 // Default extensions directory 289 { 290 char path[MAX_PATH]; 291 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 292 GetWindowsDirectory(path, MAX_PATH); 293 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 294 path, PACKAGE_DIR, EXT_DIR); 295 Arguments::set_ext_dirs(buf); 296 } 297 #undef EXT_DIR 298 #undef BIN_DIR 299 #undef PACKAGE_DIR 300 301 #ifndef _WIN64 302 // set our UnhandledExceptionFilter and save any previous one 303 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 304 #endif 305 306 // Done 307 return; 308 } 309 310 void os::breakpoint() { 311 DebugBreak(); 312 } 313 314 // Invoked from the BREAKPOINT Macro 315 extern "C" void breakpoint() { 316 os::breakpoint(); 317 } 318 319 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 320 // So far, this method is only used by Native Memory Tracking, which is 321 // only supported on Windows XP or later. 322 // 323 int os::get_native_stack(address* stack, int frames, int toSkip) { 324 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 325 for (int index = captured; index < frames; index ++) { 326 stack[index] = NULL; 327 } 328 return captured; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) { 350 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 351 if (stack_bottom == (address)minfo.AllocationBase) { 352 stack_size += minfo.RegionSize; 353 } else { 354 break; 355 } 356 } 357 return stack_bottom + stack_size; 358 } 359 360 size_t os::current_stack_size() { 361 size_t sz; 362 MEMORY_BASIC_INFORMATION minfo; 363 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 364 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 365 return sz; 366 } 367 368 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { 369 MEMORY_BASIC_INFORMATION minfo; 370 committed_start = NULL; 371 committed_size = 0; 372 address top = start + size; 373 const address start_addr = start; 374 while (start < top) { 375 VirtualQuery(start, &minfo, sizeof(minfo)); 376 if ((minfo.State & MEM_COMMIT) == 0) { // not committed 377 if (committed_start != NULL) { 378 break; 379 } 380 } else { // committed 381 if (committed_start == NULL) { 382 committed_start = start; 383 } 384 size_t offset = start - (address)minfo.BaseAddress; 385 committed_size += minfo.RegionSize - offset; 386 } 387 start = (address)minfo.BaseAddress + minfo.RegionSize; 388 } 389 390 if (committed_start == NULL) { 391 assert(committed_size == 0, "Sanity"); 392 return false; 393 } else { 394 assert(committed_start >= start_addr && committed_start < top, "Out of range"); 395 // current region may go beyond the limit, trim to the limit 396 committed_size = MIN2(committed_size, size_t(top - committed_start)); 397 return true; 398 } 399 } 400 401 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 402 const struct tm* time_struct_ptr = localtime(clock); 403 if (time_struct_ptr != NULL) { 404 *res = *time_struct_ptr; 405 return res; 406 } 407 return NULL; 408 } 409 410 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 411 const struct tm* time_struct_ptr = gmtime(clock); 412 if (time_struct_ptr != NULL) { 413 *res = *time_struct_ptr; 414 return res; 415 } 416 return NULL; 417 } 418 419 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 420 421 // Thread start routine for all newly created threads 422 static unsigned __stdcall thread_native_entry(Thread* thread) { 423 424 thread->record_stack_base_and_size(); 425 426 // Try to randomize the cache line index of hot stack frames. 427 // This helps when threads of the same stack traces evict each other's 428 // cache lines. The threads can be either from the same JVM instance, or 429 // from different JVM instances. The benefit is especially true for 430 // processors with hyperthreading technology. 431 static int counter = 0; 432 int pid = os::current_process_id(); 433 _alloca(((pid ^ counter++) & 7) * 128); 434 435 thread->initialize_thread_current(); 436 437 OSThread* osthr = thread->osthread(); 438 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 439 440 if (UseNUMA) { 441 int lgrp_id = os::numa_get_group_id(); 442 if (lgrp_id != -1) { 443 thread->set_lgrp_id(lgrp_id); 444 } 445 } 446 447 // Diagnostic code to investigate JDK-6573254 448 int res = 30115; // non-java thread 449 if (thread->is_Java_thread()) { 450 res = 20115; // java thread 451 } 452 453 // Install a win32 structured exception handler around every thread created 454 // by VM, so VM can generate error dump when an exception occurred in non- 455 // Java thread (e.g. VM thread). 456 __try { 457 thread->call_run(); 458 } __except(topLevelExceptionFilter( 459 (_EXCEPTION_POINTERS*)_exception_info())) { 460 // Nothing to do. 461 } 462 463 // One less thread is executing 464 // When the VMThread gets here, the main thread may have already exited 465 // which frees the CodeHeap containing the Atomic::add code 466 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 467 Atomic::dec(&os::win32::_os_thread_count); 468 } 469 470 // If a thread has not deleted itself ("delete this") as part of its 471 // termination sequence, we have to ensure thread-local-storage is 472 // cleared before we actually terminate. No threads should ever be 473 // deleted asynchronously with respect to their termination. 474 if (Thread::current_or_null_safe() != NULL) { 475 assert(Thread::current_or_null_safe() == thread, "current thread is wrong"); 476 thread->clear_thread_current(); 477 } 478 479 // Thread must not return from exit_process_or_thread(), but if it does, 480 // let it proceed to exit normally 481 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 482 } 483 484 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 485 int thread_id) { 486 // Allocate the OSThread object 487 OSThread* osthread = new OSThread(NULL, NULL); 488 if (osthread == NULL) return NULL; 489 490 // Initialize support for Java interrupts 491 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 492 if (interrupt_event == NULL) { 493 delete osthread; 494 return NULL; 495 } 496 osthread->set_interrupt_event(interrupt_event); 497 498 // Store info on the Win32 thread into the OSThread 499 osthread->set_thread_handle(thread_handle); 500 osthread->set_thread_id(thread_id); 501 502 if (UseNUMA) { 503 int lgrp_id = os::numa_get_group_id(); 504 if (lgrp_id != -1) { 505 thread->set_lgrp_id(lgrp_id); 506 } 507 } 508 509 // Initial thread state is INITIALIZED, not SUSPENDED 510 osthread->set_state(INITIALIZED); 511 512 return osthread; 513 } 514 515 516 bool os::create_attached_thread(JavaThread* thread) { 517 #ifdef ASSERT 518 thread->verify_not_published(); 519 #endif 520 HANDLE thread_h; 521 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 522 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 523 fatal("DuplicateHandle failed\n"); 524 } 525 OSThread* osthread = create_os_thread(thread, thread_h, 526 (int)current_thread_id()); 527 if (osthread == NULL) { 528 return false; 529 } 530 531 // Initial thread state is RUNNABLE 532 osthread->set_state(RUNNABLE); 533 534 thread->set_osthread(osthread); 535 536 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 537 os::current_thread_id()); 538 539 return true; 540 } 541 542 bool os::create_main_thread(JavaThread* thread) { 543 #ifdef ASSERT 544 thread->verify_not_published(); 545 #endif 546 if (_starting_thread == NULL) { 547 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 548 if (_starting_thread == NULL) { 549 return false; 550 } 551 } 552 553 // The primordial thread is runnable from the start) 554 _starting_thread->set_state(RUNNABLE); 555 556 thread->set_osthread(_starting_thread); 557 return true; 558 } 559 560 // Helper function to trace _beginthreadex attributes, 561 // similar to os::Posix::describe_pthread_attr() 562 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 563 size_t stacksize, unsigned initflag) { 564 stringStream ss(buf, buflen); 565 if (stacksize == 0) { 566 ss.print("stacksize: default, "); 567 } else { 568 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 569 } 570 ss.print("flags: "); 571 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 572 #define ALL(X) \ 573 X(CREATE_SUSPENDED) \ 574 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 575 ALL(PRINT_FLAG) 576 #undef ALL 577 #undef PRINT_FLAG 578 return buf; 579 } 580 581 // Allocate and initialize a new OSThread 582 bool os::create_thread(Thread* thread, ThreadType thr_type, 583 size_t stack_size) { 584 unsigned thread_id; 585 586 // Allocate the OSThread object 587 OSThread* osthread = new OSThread(NULL, NULL); 588 if (osthread == NULL) { 589 return false; 590 } 591 592 // Initialize support for Java interrupts 593 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 594 if (interrupt_event == NULL) { 595 delete osthread; 596 return NULL; 597 } 598 osthread->set_interrupt_event(interrupt_event); 599 osthread->set_interrupted(false); 600 601 thread->set_osthread(osthread); 602 603 if (stack_size == 0) { 604 switch (thr_type) { 605 case os::java_thread: 606 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 607 if (JavaThread::stack_size_at_create() > 0) { 608 stack_size = JavaThread::stack_size_at_create(); 609 } 610 break; 611 case os::compiler_thread: 612 if (CompilerThreadStackSize > 0) { 613 stack_size = (size_t)(CompilerThreadStackSize * K); 614 break; 615 } // else fall through: 616 // use VMThreadStackSize if CompilerThreadStackSize is not defined 617 case os::vm_thread: 618 case os::pgc_thread: 619 case os::cgc_thread: 620 case os::watcher_thread: 621 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 622 break; 623 } 624 } 625 626 // Create the Win32 thread 627 // 628 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 629 // does not specify stack size. Instead, it specifies the size of 630 // initially committed space. The stack size is determined by 631 // PE header in the executable. If the committed "stack_size" is larger 632 // than default value in the PE header, the stack is rounded up to the 633 // nearest multiple of 1MB. For example if the launcher has default 634 // stack size of 320k, specifying any size less than 320k does not 635 // affect the actual stack size at all, it only affects the initial 636 // commitment. On the other hand, specifying 'stack_size' larger than 637 // default value may cause significant increase in memory usage, because 638 // not only the stack space will be rounded up to MB, but also the 639 // entire space is committed upfront. 640 // 641 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 642 // for CreateThread() that can treat 'stack_size' as stack size. However we 643 // are not supposed to call CreateThread() directly according to MSDN 644 // document because JVM uses C runtime library. The good news is that the 645 // flag appears to work with _beginthredex() as well. 646 647 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 648 HANDLE thread_handle = 649 (HANDLE)_beginthreadex(NULL, 650 (unsigned)stack_size, 651 (unsigned (__stdcall *)(void*)) thread_native_entry, 652 thread, 653 initflag, 654 &thread_id); 655 656 char buf[64]; 657 if (thread_handle != NULL) { 658 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 659 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 660 } else { 661 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 662 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 663 } 664 665 if (thread_handle == NULL) { 666 // Need to clean up stuff we've allocated so far 667 CloseHandle(osthread->interrupt_event()); 668 thread->set_osthread(NULL); 669 delete osthread; 670 return NULL; 671 } 672 673 Atomic::inc(&os::win32::_os_thread_count); 674 675 // Store info on the Win32 thread into the OSThread 676 osthread->set_thread_handle(thread_handle); 677 osthread->set_thread_id(thread_id); 678 679 // Initial thread state is INITIALIZED, not SUSPENDED 680 osthread->set_state(INITIALIZED); 681 682 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 683 return true; 684 } 685 686 687 // Free Win32 resources related to the OSThread 688 void os::free_thread(OSThread* osthread) { 689 assert(osthread != NULL, "osthread not set"); 690 691 // We are told to free resources of the argument thread, 692 // but we can only really operate on the current thread. 693 assert(Thread::current()->osthread() == osthread, 694 "os::free_thread but not current thread"); 695 696 CloseHandle(osthread->thread_handle()); 697 CloseHandle(osthread->interrupt_event()); 698 delete osthread; 699 } 700 701 static jlong first_filetime; 702 static jlong initial_performance_count; 703 static jlong performance_frequency; 704 705 706 jlong as_long(LARGE_INTEGER x) { 707 jlong result = 0; // initialization to avoid warning 708 set_high(&result, x.HighPart); 709 set_low(&result, x.LowPart); 710 return result; 711 } 712 713 714 jlong os::elapsed_counter() { 715 LARGE_INTEGER count; 716 QueryPerformanceCounter(&count); 717 return as_long(count) - initial_performance_count; 718 } 719 720 721 jlong os::elapsed_frequency() { 722 return performance_frequency; 723 } 724 725 726 julong os::available_memory() { 727 return win32::available_memory(); 728 } 729 730 julong os::win32::available_memory() { 731 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 732 // value if total memory is larger than 4GB 733 MEMORYSTATUSEX ms; 734 ms.dwLength = sizeof(ms); 735 GlobalMemoryStatusEx(&ms); 736 737 return (julong)ms.ullAvailPhys; 738 } 739 740 julong os::physical_memory() { 741 return win32::physical_memory(); 742 } 743 744 bool os::has_allocatable_memory_limit(julong* limit) { 745 MEMORYSTATUSEX ms; 746 ms.dwLength = sizeof(ms); 747 GlobalMemoryStatusEx(&ms); 748 #ifdef _LP64 749 *limit = (julong)ms.ullAvailVirtual; 750 return true; 751 #else 752 // Limit to 1400m because of the 2gb address space wall 753 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 754 return true; 755 #endif 756 } 757 758 int os::active_processor_count() { 759 // User has overridden the number of active processors 760 if (ActiveProcessorCount > 0) { 761 log_trace(os)("active_processor_count: " 762 "active processor count set by user : %d", 763 ActiveProcessorCount); 764 return ActiveProcessorCount; 765 } 766 767 DWORD_PTR lpProcessAffinityMask = 0; 768 DWORD_PTR lpSystemAffinityMask = 0; 769 int proc_count = processor_count(); 770 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 771 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 772 // Nof active processors is number of bits in process affinity mask 773 int bitcount = 0; 774 while (lpProcessAffinityMask != 0) { 775 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 776 bitcount++; 777 } 778 return bitcount; 779 } else { 780 return proc_count; 781 } 782 } 783 784 void os::set_native_thread_name(const char *name) { 785 786 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 787 // 788 // Note that unfortunately this only works if the process 789 // is already attached to a debugger; debugger must observe 790 // the exception below to show the correct name. 791 792 // If there is no debugger attached skip raising the exception 793 if (!IsDebuggerPresent()) { 794 return; 795 } 796 797 const DWORD MS_VC_EXCEPTION = 0x406D1388; 798 struct { 799 DWORD dwType; // must be 0x1000 800 LPCSTR szName; // pointer to name (in user addr space) 801 DWORD dwThreadID; // thread ID (-1=caller thread) 802 DWORD dwFlags; // reserved for future use, must be zero 803 } info; 804 805 info.dwType = 0x1000; 806 info.szName = name; 807 info.dwThreadID = -1; 808 info.dwFlags = 0; 809 810 __try { 811 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 812 } __except(EXCEPTION_EXECUTE_HANDLER) {} 813 } 814 815 bool os::distribute_processes(uint length, uint* distribution) { 816 // Not yet implemented. 817 return false; 818 } 819 820 bool os::bind_to_processor(uint processor_id) { 821 // Not yet implemented. 822 return false; 823 } 824 825 void os::win32::initialize_performance_counter() { 826 LARGE_INTEGER count; 827 QueryPerformanceFrequency(&count); 828 performance_frequency = as_long(count); 829 QueryPerformanceCounter(&count); 830 initial_performance_count = as_long(count); 831 } 832 833 834 double os::elapsedTime() { 835 return (double) elapsed_counter() / (double) elapsed_frequency(); 836 } 837 838 839 // Windows format: 840 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 841 // Java format: 842 // Java standards require the number of milliseconds since 1/1/1970 843 844 // Constant offset - calculated using offset() 845 static jlong _offset = 116444736000000000; 846 // Fake time counter for reproducible results when debugging 847 static jlong fake_time = 0; 848 849 #ifdef ASSERT 850 // Just to be safe, recalculate the offset in debug mode 851 static jlong _calculated_offset = 0; 852 static int _has_calculated_offset = 0; 853 854 jlong offset() { 855 if (_has_calculated_offset) return _calculated_offset; 856 SYSTEMTIME java_origin; 857 java_origin.wYear = 1970; 858 java_origin.wMonth = 1; 859 java_origin.wDayOfWeek = 0; // ignored 860 java_origin.wDay = 1; 861 java_origin.wHour = 0; 862 java_origin.wMinute = 0; 863 java_origin.wSecond = 0; 864 java_origin.wMilliseconds = 0; 865 FILETIME jot; 866 if (!SystemTimeToFileTime(&java_origin, &jot)) { 867 fatal("Error = %d\nWindows error", GetLastError()); 868 } 869 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 870 _has_calculated_offset = 1; 871 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 872 return _calculated_offset; 873 } 874 #else 875 jlong offset() { 876 return _offset; 877 } 878 #endif 879 880 jlong windows_to_java_time(FILETIME wt) { 881 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 882 return (a - offset()) / 10000; 883 } 884 885 // Returns time ticks in (10th of micro seconds) 886 jlong windows_to_time_ticks(FILETIME wt) { 887 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 888 return (a - offset()); 889 } 890 891 FILETIME java_to_windows_time(jlong l) { 892 jlong a = (l * 10000) + offset(); 893 FILETIME result; 894 result.dwHighDateTime = high(a); 895 result.dwLowDateTime = low(a); 896 return result; 897 } 898 899 bool os::supports_vtime() { return true; } 900 bool os::enable_vtime() { return false; } 901 bool os::vtime_enabled() { return false; } 902 903 double os::elapsedVTime() { 904 FILETIME created; 905 FILETIME exited; 906 FILETIME kernel; 907 FILETIME user; 908 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 909 // the resolution of windows_to_java_time() should be sufficient (ms) 910 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 911 } else { 912 return elapsedTime(); 913 } 914 } 915 916 jlong os::javaTimeMillis() { 917 if (UseFakeTimers) { 918 return fake_time++; 919 } else { 920 FILETIME wt; 921 GetSystemTimeAsFileTime(&wt); 922 return windows_to_java_time(wt); 923 } 924 } 925 926 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 927 FILETIME wt; 928 GetSystemTimeAsFileTime(&wt); 929 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 930 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 931 seconds = secs; 932 nanos = jlong(ticks - (secs*10000000)) * 100; 933 } 934 935 jlong os::javaTimeNanos() { 936 LARGE_INTEGER current_count; 937 QueryPerformanceCounter(¤t_count); 938 double current = as_long(current_count); 939 double freq = performance_frequency; 940 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 941 return time; 942 } 943 944 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 945 jlong freq = performance_frequency; 946 if (freq < NANOSECS_PER_SEC) { 947 // the performance counter is 64 bits and we will 948 // be multiplying it -- so no wrap in 64 bits 949 info_ptr->max_value = ALL_64_BITS; 950 } else if (freq > NANOSECS_PER_SEC) { 951 // use the max value the counter can reach to 952 // determine the max value which could be returned 953 julong max_counter = (julong)ALL_64_BITS; 954 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 955 } else { 956 // the performance counter is 64 bits and we will 957 // be using it directly -- so no wrap in 64 bits 958 info_ptr->max_value = ALL_64_BITS; 959 } 960 961 // using a counter, so no skipping 962 info_ptr->may_skip_backward = false; 963 info_ptr->may_skip_forward = false; 964 965 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 966 } 967 968 char* os::local_time_string(char *buf, size_t buflen) { 969 SYSTEMTIME st; 970 GetLocalTime(&st); 971 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 972 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 973 return buf; 974 } 975 976 bool os::getTimesSecs(double* process_real_time, 977 double* process_user_time, 978 double* process_system_time) { 979 HANDLE h_process = GetCurrentProcess(); 980 FILETIME create_time, exit_time, kernel_time, user_time; 981 BOOL result = GetProcessTimes(h_process, 982 &create_time, 983 &exit_time, 984 &kernel_time, 985 &user_time); 986 if (result != 0) { 987 FILETIME wt; 988 GetSystemTimeAsFileTime(&wt); 989 jlong rtc_millis = windows_to_java_time(wt); 990 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 991 *process_user_time = 992 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 993 *process_system_time = 994 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 995 return true; 996 } else { 997 return false; 998 } 999 } 1000 1001 void os::shutdown() { 1002 // allow PerfMemory to attempt cleanup of any persistent resources 1003 perfMemory_exit(); 1004 1005 // flush buffered output, finish log files 1006 ostream_abort(); 1007 1008 // Check for abort hook 1009 abort_hook_t abort_hook = Arguments::abort_hook(); 1010 if (abort_hook != NULL) { 1011 abort_hook(); 1012 } 1013 } 1014 1015 1016 static HANDLE dumpFile = NULL; 1017 1018 // Check if dump file can be created. 1019 void os::check_dump_limit(char* buffer, size_t buffsz) { 1020 bool status = true; 1021 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 1022 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 1023 status = false; 1024 } 1025 1026 #ifndef ASSERT 1027 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 1028 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 1029 status = false; 1030 } 1031 #endif 1032 1033 if (status) { 1034 const char* cwd = get_current_directory(NULL, 0); 1035 int pid = current_process_id(); 1036 if (cwd != NULL) { 1037 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1038 } else { 1039 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1040 } 1041 1042 if (dumpFile == NULL && 1043 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1044 == INVALID_HANDLE_VALUE) { 1045 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1046 status = false; 1047 } 1048 } 1049 VMError::record_coredump_status(buffer, status); 1050 } 1051 1052 void os::abort(bool dump_core, void* siginfo, const void* context) { 1053 EXCEPTION_POINTERS ep; 1054 MINIDUMP_EXCEPTION_INFORMATION mei; 1055 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1056 1057 HANDLE hProcess = GetCurrentProcess(); 1058 DWORD processId = GetCurrentProcessId(); 1059 MINIDUMP_TYPE dumpType; 1060 1061 shutdown(); 1062 if (!dump_core || dumpFile == NULL) { 1063 if (dumpFile != NULL) { 1064 CloseHandle(dumpFile); 1065 } 1066 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1067 } 1068 1069 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1070 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1071 1072 if (siginfo != NULL && context != NULL) { 1073 ep.ContextRecord = (PCONTEXT) context; 1074 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1075 1076 mei.ThreadId = GetCurrentThreadId(); 1077 mei.ExceptionPointers = &ep; 1078 pmei = &mei; 1079 } else { 1080 pmei = NULL; 1081 } 1082 1083 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1084 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1085 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1086 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1087 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1088 } 1089 CloseHandle(dumpFile); 1090 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1091 } 1092 1093 // Die immediately, no exit hook, no abort hook, no cleanup. 1094 void os::die() { 1095 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1096 } 1097 1098 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1099 // * dirent_md.c 1.15 00/02/02 1100 // 1101 // The declarations for DIR and struct dirent are in jvm_win32.h. 1102 1103 // Caller must have already run dirname through JVM_NativePath, which removes 1104 // duplicate slashes and converts all instances of '/' into '\\'. 1105 1106 DIR * os::opendir(const char *dirname) { 1107 assert(dirname != NULL, "just checking"); // hotspot change 1108 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1109 DWORD fattr; // hotspot change 1110 char alt_dirname[4] = { 0, 0, 0, 0 }; 1111 1112 if (dirp == 0) { 1113 errno = ENOMEM; 1114 return 0; 1115 } 1116 1117 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1118 // as a directory in FindFirstFile(). We detect this case here and 1119 // prepend the current drive name. 1120 // 1121 if (dirname[1] == '\0' && dirname[0] == '\\') { 1122 alt_dirname[0] = _getdrive() + 'A' - 1; 1123 alt_dirname[1] = ':'; 1124 alt_dirname[2] = '\\'; 1125 alt_dirname[3] = '\0'; 1126 dirname = alt_dirname; 1127 } 1128 1129 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1130 if (dirp->path == 0) { 1131 free(dirp); 1132 errno = ENOMEM; 1133 return 0; 1134 } 1135 strcpy(dirp->path, dirname); 1136 1137 fattr = GetFileAttributes(dirp->path); 1138 if (fattr == 0xffffffff) { 1139 free(dirp->path); 1140 free(dirp); 1141 errno = ENOENT; 1142 return 0; 1143 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1144 free(dirp->path); 1145 free(dirp); 1146 errno = ENOTDIR; 1147 return 0; 1148 } 1149 1150 // Append "*.*", or possibly "\\*.*", to path 1151 if (dirp->path[1] == ':' && 1152 (dirp->path[2] == '\0' || 1153 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1154 // No '\\' needed for cases like "Z:" or "Z:\" 1155 strcat(dirp->path, "*.*"); 1156 } else { 1157 strcat(dirp->path, "\\*.*"); 1158 } 1159 1160 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1161 if (dirp->handle == INVALID_HANDLE_VALUE) { 1162 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1163 free(dirp->path); 1164 free(dirp); 1165 errno = EACCES; 1166 return 0; 1167 } 1168 } 1169 return dirp; 1170 } 1171 1172 struct dirent * os::readdir(DIR *dirp) { 1173 assert(dirp != NULL, "just checking"); // hotspot change 1174 if (dirp->handle == INVALID_HANDLE_VALUE) { 1175 return NULL; 1176 } 1177 1178 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1179 1180 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1181 if (GetLastError() == ERROR_INVALID_HANDLE) { 1182 errno = EBADF; 1183 return NULL; 1184 } 1185 FindClose(dirp->handle); 1186 dirp->handle = INVALID_HANDLE_VALUE; 1187 } 1188 1189 return &dirp->dirent; 1190 } 1191 1192 int os::closedir(DIR *dirp) { 1193 assert(dirp != NULL, "just checking"); // hotspot change 1194 if (dirp->handle != INVALID_HANDLE_VALUE) { 1195 if (!FindClose(dirp->handle)) { 1196 errno = EBADF; 1197 return -1; 1198 } 1199 dirp->handle = INVALID_HANDLE_VALUE; 1200 } 1201 free(dirp->path); 1202 free(dirp); 1203 return 0; 1204 } 1205 1206 // This must be hard coded because it's the system's temporary 1207 // directory not the java application's temp directory, ala java.io.tmpdir. 1208 const char* os::get_temp_directory() { 1209 static char path_buf[MAX_PATH]; 1210 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1211 return path_buf; 1212 } else { 1213 path_buf[0] = '\0'; 1214 return path_buf; 1215 } 1216 } 1217 1218 // Needs to be in os specific directory because windows requires another 1219 // header file <direct.h> 1220 const char* os::get_current_directory(char *buf, size_t buflen) { 1221 int n = static_cast<int>(buflen); 1222 if (buflen > INT_MAX) n = INT_MAX; 1223 return _getcwd(buf, n); 1224 } 1225 1226 //----------------------------------------------------------- 1227 // Helper functions for fatal error handler 1228 #ifdef _WIN64 1229 // Helper routine which returns true if address in 1230 // within the NTDLL address space. 1231 // 1232 static bool _addr_in_ntdll(address addr) { 1233 HMODULE hmod; 1234 MODULEINFO minfo; 1235 1236 hmod = GetModuleHandle("NTDLL.DLL"); 1237 if (hmod == NULL) return false; 1238 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1239 &minfo, sizeof(MODULEINFO))) { 1240 return false; 1241 } 1242 1243 if ((addr >= minfo.lpBaseOfDll) && 1244 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1245 return true; 1246 } else { 1247 return false; 1248 } 1249 } 1250 #endif 1251 1252 struct _modinfo { 1253 address addr; 1254 char* full_path; // point to a char buffer 1255 int buflen; // size of the buffer 1256 address base_addr; 1257 }; 1258 1259 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1260 address top_address, void * param) { 1261 struct _modinfo *pmod = (struct _modinfo *)param; 1262 if (!pmod) return -1; 1263 1264 if (base_addr <= pmod->addr && 1265 top_address > pmod->addr) { 1266 // if a buffer is provided, copy path name to the buffer 1267 if (pmod->full_path) { 1268 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1269 } 1270 pmod->base_addr = base_addr; 1271 return 1; 1272 } 1273 return 0; 1274 } 1275 1276 bool os::dll_address_to_library_name(address addr, char* buf, 1277 int buflen, int* offset) { 1278 // buf is not optional, but offset is optional 1279 assert(buf != NULL, "sanity check"); 1280 1281 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1282 // return the full path to the DLL file, sometimes it returns path 1283 // to the corresponding PDB file (debug info); sometimes it only 1284 // returns partial path, which makes life painful. 1285 1286 struct _modinfo mi; 1287 mi.addr = addr; 1288 mi.full_path = buf; 1289 mi.buflen = buflen; 1290 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1291 // buf already contains path name 1292 if (offset) *offset = addr - mi.base_addr; 1293 return true; 1294 } 1295 1296 buf[0] = '\0'; 1297 if (offset) *offset = -1; 1298 return false; 1299 } 1300 1301 bool os::dll_address_to_function_name(address addr, char *buf, 1302 int buflen, int *offset, 1303 bool demangle) { 1304 // buf is not optional, but offset is optional 1305 assert(buf != NULL, "sanity check"); 1306 1307 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1308 return true; 1309 } 1310 if (offset != NULL) *offset = -1; 1311 buf[0] = '\0'; 1312 return false; 1313 } 1314 1315 // save the start and end address of jvm.dll into param[0] and param[1] 1316 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1317 address top_address, void * param) { 1318 if (!param) return -1; 1319 1320 if (base_addr <= (address)_locate_jvm_dll && 1321 top_address > (address)_locate_jvm_dll) { 1322 ((address*)param)[0] = base_addr; 1323 ((address*)param)[1] = top_address; 1324 return 1; 1325 } 1326 return 0; 1327 } 1328 1329 address vm_lib_location[2]; // start and end address of jvm.dll 1330 1331 // check if addr is inside jvm.dll 1332 bool os::address_is_in_vm(address addr) { 1333 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1334 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1335 assert(false, "Can't find jvm module."); 1336 return false; 1337 } 1338 } 1339 1340 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1341 } 1342 1343 // print module info; param is outputStream* 1344 static int _print_module(const char* fname, address base_address, 1345 address top_address, void* param) { 1346 if (!param) return -1; 1347 1348 outputStream* st = (outputStream*)param; 1349 1350 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1351 return 0; 1352 } 1353 1354 // Loads .dll/.so and 1355 // in case of error it checks if .dll/.so was built for the 1356 // same architecture as Hotspot is running on 1357 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1358 void * result = LoadLibrary(name); 1359 if (result != NULL) { 1360 // Recalculate pdb search path if a DLL was loaded successfully. 1361 SymbolEngine::recalc_search_path(); 1362 return result; 1363 } 1364 1365 DWORD errcode = GetLastError(); 1366 if (errcode == ERROR_MOD_NOT_FOUND) { 1367 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1368 ebuf[ebuflen - 1] = '\0'; 1369 return NULL; 1370 } 1371 1372 // Parsing dll below 1373 // If we can read dll-info and find that dll was built 1374 // for an architecture other than Hotspot is running in 1375 // - then print to buffer "DLL was built for a different architecture" 1376 // else call os::lasterror to obtain system error message 1377 1378 // Read system error message into ebuf 1379 // It may or may not be overwritten below (in the for loop and just above) 1380 lasterror(ebuf, (size_t) ebuflen); 1381 ebuf[ebuflen - 1] = '\0'; 1382 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1383 if (fd < 0) { 1384 return NULL; 1385 } 1386 1387 uint32_t signature_offset; 1388 uint16_t lib_arch = 0; 1389 bool failed_to_get_lib_arch = 1390 ( // Go to position 3c in the dll 1391 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1392 || 1393 // Read location of signature 1394 (sizeof(signature_offset) != 1395 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1396 || 1397 // Go to COFF File Header in dll 1398 // that is located after "signature" (4 bytes long) 1399 (os::seek_to_file_offset(fd, 1400 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1401 || 1402 // Read field that contains code of architecture 1403 // that dll was built for 1404 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1405 ); 1406 1407 ::close(fd); 1408 if (failed_to_get_lib_arch) { 1409 // file i/o error - report os::lasterror(...) msg 1410 return NULL; 1411 } 1412 1413 typedef struct { 1414 uint16_t arch_code; 1415 char* arch_name; 1416 } arch_t; 1417 1418 static const arch_t arch_array[] = { 1419 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1420 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1421 }; 1422 #if (defined _M_AMD64) 1423 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1424 #elif (defined _M_IX86) 1425 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1426 #else 1427 #error Method os::dll_load requires that one of following \ 1428 is defined :_M_AMD64 or _M_IX86 1429 #endif 1430 1431 1432 // Obtain a string for printf operation 1433 // lib_arch_str shall contain string what platform this .dll was built for 1434 // running_arch_str shall string contain what platform Hotspot was built for 1435 char *running_arch_str = NULL, *lib_arch_str = NULL; 1436 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1437 if (lib_arch == arch_array[i].arch_code) { 1438 lib_arch_str = arch_array[i].arch_name; 1439 } 1440 if (running_arch == arch_array[i].arch_code) { 1441 running_arch_str = arch_array[i].arch_name; 1442 } 1443 } 1444 1445 assert(running_arch_str, 1446 "Didn't find running architecture code in arch_array"); 1447 1448 // If the architecture is right 1449 // but some other error took place - report os::lasterror(...) msg 1450 if (lib_arch == running_arch) { 1451 return NULL; 1452 } 1453 1454 if (lib_arch_str != NULL) { 1455 ::_snprintf(ebuf, ebuflen - 1, 1456 "Can't load %s-bit .dll on a %s-bit platform", 1457 lib_arch_str, running_arch_str); 1458 } else { 1459 // don't know what architecture this dll was build for 1460 ::_snprintf(ebuf, ebuflen - 1, 1461 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1462 lib_arch, running_arch_str); 1463 } 1464 1465 return NULL; 1466 } 1467 1468 void os::print_dll_info(outputStream *st) { 1469 st->print_cr("Dynamic libraries:"); 1470 get_loaded_modules_info(_print_module, (void *)st); 1471 } 1472 1473 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1474 HANDLE hProcess; 1475 1476 # define MAX_NUM_MODULES 128 1477 HMODULE modules[MAX_NUM_MODULES]; 1478 static char filename[MAX_PATH]; 1479 int result = 0; 1480 1481 int pid = os::current_process_id(); 1482 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1483 FALSE, pid); 1484 if (hProcess == NULL) return 0; 1485 1486 DWORD size_needed; 1487 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1488 CloseHandle(hProcess); 1489 return 0; 1490 } 1491 1492 // number of modules that are currently loaded 1493 int num_modules = size_needed / sizeof(HMODULE); 1494 1495 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1496 // Get Full pathname: 1497 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1498 filename[0] = '\0'; 1499 } 1500 1501 MODULEINFO modinfo; 1502 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1503 modinfo.lpBaseOfDll = NULL; 1504 modinfo.SizeOfImage = 0; 1505 } 1506 1507 // Invoke callback function 1508 result = callback(filename, (address)modinfo.lpBaseOfDll, 1509 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1510 if (result) break; 1511 } 1512 1513 CloseHandle(hProcess); 1514 return result; 1515 } 1516 1517 bool os::get_host_name(char* buf, size_t buflen) { 1518 DWORD size = (DWORD)buflen; 1519 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1520 } 1521 1522 void os::get_summary_os_info(char* buf, size_t buflen) { 1523 stringStream sst(buf, buflen); 1524 os::win32::print_windows_version(&sst); 1525 // chop off newline character 1526 char* nl = strchr(buf, '\n'); 1527 if (nl != NULL) *nl = '\0'; 1528 } 1529 1530 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1531 #if _MSC_VER >= 1900 1532 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1533 int result = ::vsnprintf(buf, len, fmt, args); 1534 // If an encoding error occurred (result < 0) then it's not clear 1535 // whether the buffer is NUL terminated, so ensure it is. 1536 if ((result < 0) && (len > 0)) { 1537 buf[len - 1] = '\0'; 1538 } 1539 return result; 1540 #else 1541 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1542 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1543 // versions. However, when len == 0, avoid _vsnprintf too, and just 1544 // go straight to _vscprintf. The output is going to be truncated in 1545 // that case, except in the unusual case of empty output. More 1546 // importantly, the documentation for various versions of Visual Studio 1547 // are inconsistent about the behavior of _vsnprintf when len == 0, 1548 // including it possibly being an error. 1549 int result = -1; 1550 if (len > 0) { 1551 result = _vsnprintf(buf, len, fmt, args); 1552 // If output (including NUL terminator) is truncated, the buffer 1553 // won't be NUL terminated. Add the trailing NUL specified by C99. 1554 if ((result < 0) || ((size_t)result >= len)) { 1555 buf[len - 1] = '\0'; 1556 } 1557 } 1558 if (result < 0) { 1559 result = _vscprintf(fmt, args); 1560 } 1561 return result; 1562 #endif // _MSC_VER dispatch 1563 } 1564 1565 static inline time_t get_mtime(const char* filename) { 1566 struct stat st; 1567 int ret = os::stat(filename, &st); 1568 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1569 return st.st_mtime; 1570 } 1571 1572 int os::compare_file_modified_times(const char* file1, const char* file2) { 1573 time_t t1 = get_mtime(file1); 1574 time_t t2 = get_mtime(file2); 1575 return t1 - t2; 1576 } 1577 1578 void os::print_os_info_brief(outputStream* st) { 1579 os::print_os_info(st); 1580 } 1581 1582 void os::print_os_info(outputStream* st) { 1583 #ifdef ASSERT 1584 char buffer[1024]; 1585 st->print("HostName: "); 1586 if (get_host_name(buffer, sizeof(buffer))) { 1587 st->print("%s ", buffer); 1588 } else { 1589 st->print("N/A "); 1590 } 1591 #endif 1592 st->print("OS:"); 1593 os::win32::print_windows_version(st); 1594 } 1595 1596 void os::win32::print_windows_version(outputStream* st) { 1597 OSVERSIONINFOEX osvi; 1598 VS_FIXEDFILEINFO *file_info; 1599 TCHAR kernel32_path[MAX_PATH]; 1600 UINT len, ret; 1601 1602 // Use the GetVersionEx information to see if we're on a server or 1603 // workstation edition of Windows. Starting with Windows 8.1 we can't 1604 // trust the OS version information returned by this API. 1605 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1606 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1607 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1608 st->print_cr("Call to GetVersionEx failed"); 1609 return; 1610 } 1611 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1612 1613 // Get the full path to \Windows\System32\kernel32.dll and use that for 1614 // determining what version of Windows we're running on. 1615 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1616 ret = GetSystemDirectory(kernel32_path, len); 1617 if (ret == 0 || ret > len) { 1618 st->print_cr("Call to GetSystemDirectory failed"); 1619 return; 1620 } 1621 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1622 1623 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1624 if (version_size == 0) { 1625 st->print_cr("Call to GetFileVersionInfoSize failed"); 1626 return; 1627 } 1628 1629 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1630 if (version_info == NULL) { 1631 st->print_cr("Failed to allocate version_info"); 1632 return; 1633 } 1634 1635 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1636 os::free(version_info); 1637 st->print_cr("Call to GetFileVersionInfo failed"); 1638 return; 1639 } 1640 1641 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1642 os::free(version_info); 1643 st->print_cr("Call to VerQueryValue failed"); 1644 return; 1645 } 1646 1647 int major_version = HIWORD(file_info->dwProductVersionMS); 1648 int minor_version = LOWORD(file_info->dwProductVersionMS); 1649 int build_number = HIWORD(file_info->dwProductVersionLS); 1650 int build_minor = LOWORD(file_info->dwProductVersionLS); 1651 int os_vers = major_version * 1000 + minor_version; 1652 os::free(version_info); 1653 1654 st->print(" Windows "); 1655 switch (os_vers) { 1656 1657 case 6000: 1658 if (is_workstation) { 1659 st->print("Vista"); 1660 } else { 1661 st->print("Server 2008"); 1662 } 1663 break; 1664 1665 case 6001: 1666 if (is_workstation) { 1667 st->print("7"); 1668 } else { 1669 st->print("Server 2008 R2"); 1670 } 1671 break; 1672 1673 case 6002: 1674 if (is_workstation) { 1675 st->print("8"); 1676 } else { 1677 st->print("Server 2012"); 1678 } 1679 break; 1680 1681 case 6003: 1682 if (is_workstation) { 1683 st->print("8.1"); 1684 } else { 1685 st->print("Server 2012 R2"); 1686 } 1687 break; 1688 1689 case 10000: 1690 if (is_workstation) { 1691 st->print("10"); 1692 } else { 1693 st->print("Server 2016"); 1694 } 1695 break; 1696 1697 default: 1698 // Unrecognized windows, print out its major and minor versions 1699 st->print("%d.%d", major_version, minor_version); 1700 break; 1701 } 1702 1703 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1704 // find out whether we are running on 64 bit processor or not 1705 SYSTEM_INFO si; 1706 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1707 GetNativeSystemInfo(&si); 1708 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1709 st->print(" , 64 bit"); 1710 } 1711 1712 st->print(" Build %d", build_number); 1713 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1714 st->cr(); 1715 } 1716 1717 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1718 // Nothing to do for now. 1719 } 1720 1721 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1722 HKEY key; 1723 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1724 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1725 if (status == ERROR_SUCCESS) { 1726 DWORD size = (DWORD)buflen; 1727 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1728 if (status != ERROR_SUCCESS) { 1729 strncpy(buf, "## __CPU__", buflen); 1730 } 1731 RegCloseKey(key); 1732 } else { 1733 // Put generic cpu info to return 1734 strncpy(buf, "## __CPU__", buflen); 1735 } 1736 } 1737 1738 void os::print_memory_info(outputStream* st) { 1739 st->print("Memory:"); 1740 st->print(" %dk page", os::vm_page_size()>>10); 1741 1742 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1743 // value if total memory is larger than 4GB 1744 MEMORYSTATUSEX ms; 1745 ms.dwLength = sizeof(ms); 1746 int r1 = GlobalMemoryStatusEx(&ms); 1747 1748 if (r1 != 0) { 1749 st->print(", system-wide physical " INT64_FORMAT "M ", 1750 (int64_t) ms.ullTotalPhys >> 20); 1751 st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20); 1752 1753 st->print("TotalPageFile size " INT64_FORMAT "M ", 1754 (int64_t) ms.ullTotalPageFile >> 20); 1755 st->print("(AvailPageFile size " INT64_FORMAT "M)", 1756 (int64_t) ms.ullAvailPageFile >> 20); 1757 1758 // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders) 1759 #if defined(_M_IX86) 1760 st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ", 1761 (int64_t) ms.ullTotalVirtual >> 20); 1762 st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20); 1763 #endif 1764 } else { 1765 st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values."); 1766 } 1767 1768 // extended memory statistics for a process 1769 PROCESS_MEMORY_COUNTERS_EX pmex; 1770 ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX)); 1771 pmex.cb = sizeof(pmex); 1772 int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex)); 1773 1774 if (r2 != 0) { 1775 st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ", 1776 (int64_t) pmex.WorkingSetSize >> 20); 1777 st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20); 1778 1779 st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ", 1780 (int64_t) pmex.PrivateUsage >> 20); 1781 st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20); 1782 } else { 1783 st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values."); 1784 } 1785 1786 st->cr(); 1787 } 1788 1789 void os::print_siginfo(outputStream *st, const void* siginfo) { 1790 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1791 st->print("siginfo:"); 1792 1793 char tmp[64]; 1794 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1795 strcpy(tmp, "EXCEPTION_??"); 1796 } 1797 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1798 1799 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1800 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1801 er->NumberParameters >= 2) { 1802 switch (er->ExceptionInformation[0]) { 1803 case 0: st->print(", reading address"); break; 1804 case 1: st->print(", writing address"); break; 1805 case 8: st->print(", data execution prevention violation at address"); break; 1806 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1807 er->ExceptionInformation[0]); 1808 } 1809 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1810 } else { 1811 int num = er->NumberParameters; 1812 if (num > 0) { 1813 st->print(", ExceptionInformation="); 1814 for (int i = 0; i < num; i++) { 1815 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1816 } 1817 } 1818 } 1819 st->cr(); 1820 } 1821 1822 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1823 // do nothing 1824 } 1825 1826 static char saved_jvm_path[MAX_PATH] = {0}; 1827 1828 // Find the full path to the current module, jvm.dll 1829 void os::jvm_path(char *buf, jint buflen) { 1830 // Error checking. 1831 if (buflen < MAX_PATH) { 1832 assert(false, "must use a large-enough buffer"); 1833 buf[0] = '\0'; 1834 return; 1835 } 1836 // Lazy resolve the path to current module. 1837 if (saved_jvm_path[0] != 0) { 1838 strcpy(buf, saved_jvm_path); 1839 return; 1840 } 1841 1842 buf[0] = '\0'; 1843 if (Arguments::sun_java_launcher_is_altjvm()) { 1844 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1845 // for a JAVA_HOME environment variable and fix up the path so it 1846 // looks like jvm.dll is installed there (append a fake suffix 1847 // hotspot/jvm.dll). 1848 char* java_home_var = ::getenv("JAVA_HOME"); 1849 if (java_home_var != NULL && java_home_var[0] != 0 && 1850 strlen(java_home_var) < (size_t)buflen) { 1851 strncpy(buf, java_home_var, buflen); 1852 1853 // determine if this is a legacy image or modules image 1854 // modules image doesn't have "jre" subdirectory 1855 size_t len = strlen(buf); 1856 char* jrebin_p = buf + len; 1857 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1858 if (0 != _access(buf, 0)) { 1859 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1860 } 1861 len = strlen(buf); 1862 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1863 } 1864 } 1865 1866 if (buf[0] == '\0') { 1867 GetModuleFileName(vm_lib_handle, buf, buflen); 1868 } 1869 strncpy(saved_jvm_path, buf, MAX_PATH); 1870 saved_jvm_path[MAX_PATH - 1] = '\0'; 1871 } 1872 1873 1874 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1875 #ifndef _WIN64 1876 st->print("_"); 1877 #endif 1878 } 1879 1880 1881 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1882 #ifndef _WIN64 1883 st->print("@%d", args_size * sizeof(int)); 1884 #endif 1885 } 1886 1887 // This method is a copy of JDK's sysGetLastErrorString 1888 // from src/windows/hpi/src/system_md.c 1889 1890 size_t os::lasterror(char* buf, size_t len) { 1891 DWORD errval; 1892 1893 if ((errval = GetLastError()) != 0) { 1894 // DOS error 1895 size_t n = (size_t)FormatMessage( 1896 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1897 NULL, 1898 errval, 1899 0, 1900 buf, 1901 (DWORD)len, 1902 NULL); 1903 if (n > 3) { 1904 // Drop final '.', CR, LF 1905 if (buf[n - 1] == '\n') n--; 1906 if (buf[n - 1] == '\r') n--; 1907 if (buf[n - 1] == '.') n--; 1908 buf[n] = '\0'; 1909 } 1910 return n; 1911 } 1912 1913 if (errno != 0) { 1914 // C runtime error that has no corresponding DOS error code 1915 const char* s = os::strerror(errno); 1916 size_t n = strlen(s); 1917 if (n >= len) n = len - 1; 1918 strncpy(buf, s, n); 1919 buf[n] = '\0'; 1920 return n; 1921 } 1922 1923 return 0; 1924 } 1925 1926 int os::get_last_error() { 1927 DWORD error = GetLastError(); 1928 if (error == 0) { 1929 error = errno; 1930 } 1931 return (int)error; 1932 } 1933 1934 // sun.misc.Signal 1935 // NOTE that this is a workaround for an apparent kernel bug where if 1936 // a signal handler for SIGBREAK is installed then that signal handler 1937 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1938 // See bug 4416763. 1939 static void (*sigbreakHandler)(int) = NULL; 1940 1941 static void UserHandler(int sig, void *siginfo, void *context) { 1942 os::signal_notify(sig); 1943 // We need to reinstate the signal handler each time... 1944 os::signal(sig, (void*)UserHandler); 1945 } 1946 1947 void* os::user_handler() { 1948 return (void*) UserHandler; 1949 } 1950 1951 void* os::signal(int signal_number, void* handler) { 1952 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1953 void (*oldHandler)(int) = sigbreakHandler; 1954 sigbreakHandler = (void (*)(int)) handler; 1955 return (void*) oldHandler; 1956 } else { 1957 return (void*)::signal(signal_number, (void (*)(int))handler); 1958 } 1959 } 1960 1961 void os::signal_raise(int signal_number) { 1962 raise(signal_number); 1963 } 1964 1965 // The Win32 C runtime library maps all console control events other than ^C 1966 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1967 // logoff, and shutdown events. We therefore install our own console handler 1968 // that raises SIGTERM for the latter cases. 1969 // 1970 static BOOL WINAPI consoleHandler(DWORD event) { 1971 switch (event) { 1972 case CTRL_C_EVENT: 1973 if (VMError::is_error_reported()) { 1974 // Ctrl-C is pressed during error reporting, likely because the error 1975 // handler fails to abort. Let VM die immediately. 1976 os::die(); 1977 } 1978 1979 os::signal_raise(SIGINT); 1980 return TRUE; 1981 break; 1982 case CTRL_BREAK_EVENT: 1983 if (sigbreakHandler != NULL) { 1984 (*sigbreakHandler)(SIGBREAK); 1985 } 1986 return TRUE; 1987 break; 1988 case CTRL_LOGOFF_EVENT: { 1989 // Don't terminate JVM if it is running in a non-interactive session, 1990 // such as a service process. 1991 USEROBJECTFLAGS flags; 1992 HANDLE handle = GetProcessWindowStation(); 1993 if (handle != NULL && 1994 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1995 sizeof(USEROBJECTFLAGS), NULL)) { 1996 // If it is a non-interactive session, let next handler to deal 1997 // with it. 1998 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1999 return FALSE; 2000 } 2001 } 2002 } 2003 case CTRL_CLOSE_EVENT: 2004 case CTRL_SHUTDOWN_EVENT: 2005 os::signal_raise(SIGTERM); 2006 return TRUE; 2007 break; 2008 default: 2009 break; 2010 } 2011 return FALSE; 2012 } 2013 2014 // The following code is moved from os.cpp for making this 2015 // code platform specific, which it is by its very nature. 2016 2017 // Return maximum OS signal used + 1 for internal use only 2018 // Used as exit signal for signal_thread 2019 int os::sigexitnum_pd() { 2020 return NSIG; 2021 } 2022 2023 // a counter for each possible signal value, including signal_thread exit signal 2024 static volatile jint pending_signals[NSIG+1] = { 0 }; 2025 static Semaphore* sig_sem = NULL; 2026 2027 static void jdk_misc_signal_init() { 2028 // Initialize signal structures 2029 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2030 2031 // Initialize signal semaphore 2032 sig_sem = new Semaphore(); 2033 2034 // Programs embedding the VM do not want it to attempt to receive 2035 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2036 // shutdown hooks mechanism introduced in 1.3. For example, when 2037 // the VM is run as part of a Windows NT service (i.e., a servlet 2038 // engine in a web server), the correct behavior is for any console 2039 // control handler to return FALSE, not TRUE, because the OS's 2040 // "final" handler for such events allows the process to continue if 2041 // it is a service (while terminating it if it is not a service). 2042 // To make this behavior uniform and the mechanism simpler, we 2043 // completely disable the VM's usage of these console events if -Xrs 2044 // (=ReduceSignalUsage) is specified. This means, for example, that 2045 // the CTRL-BREAK thread dump mechanism is also disabled in this 2046 // case. See bugs 4323062, 4345157, and related bugs. 2047 2048 // Add a CTRL-C handler 2049 SetConsoleCtrlHandler(consoleHandler, TRUE); 2050 } 2051 2052 void os::signal_notify(int sig) { 2053 if (sig_sem != NULL) { 2054 Atomic::inc(&pending_signals[sig]); 2055 sig_sem->signal(); 2056 } else { 2057 // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init 2058 // initialization isn't called. 2059 assert(ReduceSignalUsage, "signal semaphore should be created"); 2060 } 2061 } 2062 2063 static int check_pending_signals() { 2064 while (true) { 2065 for (int i = 0; i < NSIG + 1; i++) { 2066 jint n = pending_signals[i]; 2067 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2068 return i; 2069 } 2070 } 2071 JavaThread *thread = JavaThread::current(); 2072 2073 ThreadBlockInVM tbivm(thread); 2074 2075 bool threadIsSuspended; 2076 do { 2077 thread->set_suspend_equivalent(); 2078 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2079 sig_sem->wait(); 2080 2081 // were we externally suspended while we were waiting? 2082 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2083 if (threadIsSuspended) { 2084 // The semaphore has been incremented, but while we were waiting 2085 // another thread suspended us. We don't want to continue running 2086 // while suspended because that would surprise the thread that 2087 // suspended us. 2088 sig_sem->signal(); 2089 2090 thread->java_suspend_self(); 2091 } 2092 } while (threadIsSuspended); 2093 } 2094 } 2095 2096 int os::signal_wait() { 2097 return check_pending_signals(); 2098 } 2099 2100 // Implicit OS exception handling 2101 2102 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2103 address handler) { 2104 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2105 // Save pc in thread 2106 #ifdef _M_AMD64 2107 // Do not blow up if no thread info available. 2108 if (thread) { 2109 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2110 } 2111 // Set pc to handler 2112 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2113 #else 2114 // Do not blow up if no thread info available. 2115 if (thread) { 2116 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2117 } 2118 // Set pc to handler 2119 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2120 #endif 2121 2122 // Continue the execution 2123 return EXCEPTION_CONTINUE_EXECUTION; 2124 } 2125 2126 2127 // Used for PostMortemDump 2128 extern "C" void safepoints(); 2129 extern "C" void find(int x); 2130 extern "C" void events(); 2131 2132 // According to Windows API documentation, an illegal instruction sequence should generate 2133 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2134 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2135 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2136 2137 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2138 2139 // From "Execution Protection in the Windows Operating System" draft 0.35 2140 // Once a system header becomes available, the "real" define should be 2141 // included or copied here. 2142 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2143 2144 // Windows Vista/2008 heap corruption check 2145 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2146 2147 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2148 // C++ compiler contain this error code. Because this is a compiler-generated 2149 // error, the code is not listed in the Win32 API header files. 2150 // The code is actually a cryptic mnemonic device, with the initial "E" 2151 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2152 // ASCII values of "msc". 2153 2154 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2155 2156 #define def_excpt(val) { #val, (val) } 2157 2158 static const struct { char* name; uint number; } exceptlabels[] = { 2159 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2160 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2161 def_excpt(EXCEPTION_BREAKPOINT), 2162 def_excpt(EXCEPTION_SINGLE_STEP), 2163 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2164 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2165 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2166 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2167 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2168 def_excpt(EXCEPTION_FLT_OVERFLOW), 2169 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2170 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2171 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2172 def_excpt(EXCEPTION_INT_OVERFLOW), 2173 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2174 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2175 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2176 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2177 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2178 def_excpt(EXCEPTION_STACK_OVERFLOW), 2179 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2180 def_excpt(EXCEPTION_GUARD_PAGE), 2181 def_excpt(EXCEPTION_INVALID_HANDLE), 2182 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2183 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2184 }; 2185 2186 #undef def_excpt 2187 2188 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2189 uint code = static_cast<uint>(exception_code); 2190 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2191 if (exceptlabels[i].number == code) { 2192 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2193 return buf; 2194 } 2195 } 2196 2197 return NULL; 2198 } 2199 2200 //----------------------------------------------------------------------------- 2201 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2202 // handle exception caused by idiv; should only happen for -MinInt/-1 2203 // (division by zero is handled explicitly) 2204 #ifdef _M_AMD64 2205 PCONTEXT ctx = exceptionInfo->ContextRecord; 2206 address pc = (address)ctx->Rip; 2207 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2208 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2209 if (pc[0] == 0xF7) { 2210 // set correct result values and continue after idiv instruction 2211 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2212 } else { 2213 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2214 } 2215 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2216 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2217 // idiv opcode (0xF7). 2218 ctx->Rdx = (DWORD)0; // remainder 2219 // Continue the execution 2220 #else 2221 PCONTEXT ctx = exceptionInfo->ContextRecord; 2222 address pc = (address)ctx->Eip; 2223 assert(pc[0] == 0xF7, "not an idiv opcode"); 2224 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2225 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2226 // set correct result values and continue after idiv instruction 2227 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2228 ctx->Eax = (DWORD)min_jint; // result 2229 ctx->Edx = (DWORD)0; // remainder 2230 // Continue the execution 2231 #endif 2232 return EXCEPTION_CONTINUE_EXECUTION; 2233 } 2234 2235 //----------------------------------------------------------------------------- 2236 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2237 PCONTEXT ctx = exceptionInfo->ContextRecord; 2238 #ifndef _WIN64 2239 // handle exception caused by native method modifying control word 2240 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2241 2242 switch (exception_code) { 2243 case EXCEPTION_FLT_DENORMAL_OPERAND: 2244 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2245 case EXCEPTION_FLT_INEXACT_RESULT: 2246 case EXCEPTION_FLT_INVALID_OPERATION: 2247 case EXCEPTION_FLT_OVERFLOW: 2248 case EXCEPTION_FLT_STACK_CHECK: 2249 case EXCEPTION_FLT_UNDERFLOW: 2250 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2251 if (fp_control_word != ctx->FloatSave.ControlWord) { 2252 // Restore FPCW and mask out FLT exceptions 2253 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2254 // Mask out pending FLT exceptions 2255 ctx->FloatSave.StatusWord &= 0xffffff00; 2256 return EXCEPTION_CONTINUE_EXECUTION; 2257 } 2258 } 2259 2260 if (prev_uef_handler != NULL) { 2261 // We didn't handle this exception so pass it to the previous 2262 // UnhandledExceptionFilter. 2263 return (prev_uef_handler)(exceptionInfo); 2264 } 2265 #else // !_WIN64 2266 // On Windows, the mxcsr control bits are non-volatile across calls 2267 // See also CR 6192333 2268 // 2269 jint MxCsr = INITIAL_MXCSR; 2270 // we can't use StubRoutines::addr_mxcsr_std() 2271 // because in Win64 mxcsr is not saved there 2272 if (MxCsr != ctx->MxCsr) { 2273 ctx->MxCsr = MxCsr; 2274 return EXCEPTION_CONTINUE_EXECUTION; 2275 } 2276 #endif // !_WIN64 2277 2278 return EXCEPTION_CONTINUE_SEARCH; 2279 } 2280 2281 static inline void report_error(Thread* t, DWORD exception_code, 2282 address addr, void* siginfo, void* context) { 2283 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2284 2285 // If UseOsErrorReporting, this will return here and save the error file 2286 // somewhere where we can find it in the minidump. 2287 } 2288 2289 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2290 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2291 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2292 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2293 if (Interpreter::contains(pc)) { 2294 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2295 if (!fr->is_first_java_frame()) { 2296 // get_frame_at_stack_banging_point() is only called when we 2297 // have well defined stacks so java_sender() calls do not need 2298 // to assert safe_for_sender() first. 2299 *fr = fr->java_sender(); 2300 } 2301 } else { 2302 // more complex code with compiled code 2303 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2304 CodeBlob* cb = CodeCache::find_blob(pc); 2305 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2306 // Not sure where the pc points to, fallback to default 2307 // stack overflow handling 2308 return false; 2309 } else { 2310 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2311 // in compiled code, the stack banging is performed just after the return pc 2312 // has been pushed on the stack 2313 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2314 if (!fr->is_java_frame()) { 2315 // See java_sender() comment above. 2316 *fr = fr->java_sender(); 2317 } 2318 } 2319 } 2320 assert(fr->is_java_frame(), "Safety check"); 2321 return true; 2322 } 2323 2324 //----------------------------------------------------------------------------- 2325 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2326 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2327 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2328 #ifdef _M_AMD64 2329 address pc = (address) exceptionInfo->ContextRecord->Rip; 2330 #else 2331 address pc = (address) exceptionInfo->ContextRecord->Eip; 2332 #endif 2333 Thread* t = Thread::current_or_null_safe(); 2334 2335 // Handle SafeFetch32 and SafeFetchN exceptions. 2336 if (StubRoutines::is_safefetch_fault(pc)) { 2337 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2338 } 2339 2340 #ifndef _WIN64 2341 // Execution protection violation - win32 running on AMD64 only 2342 // Handled first to avoid misdiagnosis as a "normal" access violation; 2343 // This is safe to do because we have a new/unique ExceptionInformation 2344 // code for this condition. 2345 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2346 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2347 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2348 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2349 2350 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2351 int page_size = os::vm_page_size(); 2352 2353 // Make sure the pc and the faulting address are sane. 2354 // 2355 // If an instruction spans a page boundary, and the page containing 2356 // the beginning of the instruction is executable but the following 2357 // page is not, the pc and the faulting address might be slightly 2358 // different - we still want to unguard the 2nd page in this case. 2359 // 2360 // 15 bytes seems to be a (very) safe value for max instruction size. 2361 bool pc_is_near_addr = 2362 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2363 bool instr_spans_page_boundary = 2364 (align_down((intptr_t) pc ^ (intptr_t) addr, 2365 (intptr_t) page_size) > 0); 2366 2367 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2368 static volatile address last_addr = 2369 (address) os::non_memory_address_word(); 2370 2371 // In conservative mode, don't unguard unless the address is in the VM 2372 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2373 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2374 2375 // Set memory to RWX and retry 2376 address page_start = align_down(addr, page_size); 2377 bool res = os::protect_memory((char*) page_start, page_size, 2378 os::MEM_PROT_RWX); 2379 2380 log_debug(os)("Execution protection violation " 2381 "at " INTPTR_FORMAT 2382 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2383 p2i(page_start), (res ? "success" : os::strerror(errno))); 2384 2385 // Set last_addr so if we fault again at the same address, we don't 2386 // end up in an endless loop. 2387 // 2388 // There are two potential complications here. Two threads trapping 2389 // at the same address at the same time could cause one of the 2390 // threads to think it already unguarded, and abort the VM. Likely 2391 // very rare. 2392 // 2393 // The other race involves two threads alternately trapping at 2394 // different addresses and failing to unguard the page, resulting in 2395 // an endless loop. This condition is probably even more unlikely 2396 // than the first. 2397 // 2398 // Although both cases could be avoided by using locks or thread 2399 // local last_addr, these solutions are unnecessary complication: 2400 // this handler is a best-effort safety net, not a complete solution. 2401 // It is disabled by default and should only be used as a workaround 2402 // in case we missed any no-execute-unsafe VM code. 2403 2404 last_addr = addr; 2405 2406 return EXCEPTION_CONTINUE_EXECUTION; 2407 } 2408 } 2409 2410 // Last unguard failed or not unguarding 2411 tty->print_raw_cr("Execution protection violation"); 2412 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2413 exceptionInfo->ContextRecord); 2414 return EXCEPTION_CONTINUE_SEARCH; 2415 } 2416 } 2417 #endif // _WIN64 2418 2419 // Check to see if we caught the safepoint code in the 2420 // process of write protecting the memory serialization page. 2421 // It write enables the page immediately after protecting it 2422 // so just return. 2423 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2424 if (t != NULL && t->is_Java_thread()) { 2425 JavaThread* thread = (JavaThread*) t; 2426 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2427 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2428 if (os::is_memory_serialize_page(thread, addr)) { 2429 // Block current thread until the memory serialize page permission restored. 2430 os::block_on_serialize_page_trap(); 2431 return EXCEPTION_CONTINUE_EXECUTION; 2432 } 2433 } 2434 } 2435 2436 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2437 VM_Version::is_cpuinfo_segv_addr(pc)) { 2438 // Verify that OS save/restore AVX registers. 2439 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2440 } 2441 2442 if (t != NULL && t->is_Java_thread()) { 2443 JavaThread* thread = (JavaThread*) t; 2444 bool in_java = thread->thread_state() == _thread_in_Java; 2445 2446 // Handle potential stack overflows up front. 2447 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2448 if (thread->stack_guards_enabled()) { 2449 if (in_java) { 2450 frame fr; 2451 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2452 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2453 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2454 assert(fr.is_java_frame(), "Must be a Java frame"); 2455 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2456 } 2457 } 2458 // Yellow zone violation. The o/s has unprotected the first yellow 2459 // zone page for us. Note: must call disable_stack_yellow_zone to 2460 // update the enabled status, even if the zone contains only one page. 2461 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2462 thread->disable_stack_yellow_reserved_zone(); 2463 // If not in java code, return and hope for the best. 2464 return in_java 2465 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2466 : EXCEPTION_CONTINUE_EXECUTION; 2467 } else { 2468 // Fatal red zone violation. 2469 thread->disable_stack_red_zone(); 2470 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2471 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2472 exceptionInfo->ContextRecord); 2473 return EXCEPTION_CONTINUE_SEARCH; 2474 } 2475 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2476 // Either stack overflow or null pointer exception. 2477 if (in_java) { 2478 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2479 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2480 address stack_end = thread->stack_end(); 2481 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2482 // Stack overflow. 2483 assert(!os::uses_stack_guard_pages(), 2484 "should be caught by red zone code above."); 2485 return Handle_Exception(exceptionInfo, 2486 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2487 } 2488 // Check for safepoint polling and implicit null 2489 // We only expect null pointers in the stubs (vtable) 2490 // the rest are checked explicitly now. 2491 CodeBlob* cb = CodeCache::find_blob(pc); 2492 if (cb != NULL) { 2493 if (os::is_poll_address(addr)) { 2494 address stub = SharedRuntime::get_poll_stub(pc); 2495 return Handle_Exception(exceptionInfo, stub); 2496 } 2497 } 2498 { 2499 #ifdef _WIN64 2500 // If it's a legal stack address map the entire region in 2501 // 2502 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2503 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2504 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2505 addr = (address)((uintptr_t)addr & 2506 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2507 os::commit_memory((char *)addr, thread->stack_base() - addr, 2508 !ExecMem); 2509 return EXCEPTION_CONTINUE_EXECUTION; 2510 } else 2511 #endif 2512 { 2513 // Null pointer exception. 2514 if (!MacroAssembler::needs_explicit_null_check((intptr_t)addr)) { 2515 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2516 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2517 } 2518 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2519 exceptionInfo->ContextRecord); 2520 return EXCEPTION_CONTINUE_SEARCH; 2521 } 2522 } 2523 } 2524 2525 #ifdef _WIN64 2526 // Special care for fast JNI field accessors. 2527 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2528 // in and the heap gets shrunk before the field access. 2529 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2530 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2531 if (addr != (address)-1) { 2532 return Handle_Exception(exceptionInfo, addr); 2533 } 2534 } 2535 #endif 2536 2537 // Stack overflow or null pointer exception in native code. 2538 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2539 exceptionInfo->ContextRecord); 2540 return EXCEPTION_CONTINUE_SEARCH; 2541 } // /EXCEPTION_ACCESS_VIOLATION 2542 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2543 2544 if (exception_code == EXCEPTION_IN_PAGE_ERROR) { 2545 CompiledMethod* nm = NULL; 2546 JavaThread* thread = (JavaThread*)t; 2547 if (in_java) { 2548 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); 2549 nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 2550 } 2551 if ((thread->thread_state() == _thread_in_vm && 2552 thread->doing_unsafe_access()) || 2553 (nm != NULL && nm->has_unsafe_access())) { 2554 return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc))); 2555 } 2556 } 2557 2558 if (in_java) { 2559 switch (exception_code) { 2560 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2561 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2562 2563 case EXCEPTION_INT_OVERFLOW: 2564 return Handle_IDiv_Exception(exceptionInfo); 2565 2566 } // switch 2567 } 2568 if (((thread->thread_state() == _thread_in_Java) || 2569 (thread->thread_state() == _thread_in_native)) && 2570 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2571 LONG result=Handle_FLT_Exception(exceptionInfo); 2572 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2573 } 2574 } 2575 2576 if (exception_code != EXCEPTION_BREAKPOINT) { 2577 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2578 exceptionInfo->ContextRecord); 2579 } 2580 return EXCEPTION_CONTINUE_SEARCH; 2581 } 2582 2583 #ifndef _WIN64 2584 // Special care for fast JNI accessors. 2585 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2586 // the heap gets shrunk before the field access. 2587 // Need to install our own structured exception handler since native code may 2588 // install its own. 2589 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2590 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2591 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2592 address pc = (address) exceptionInfo->ContextRecord->Eip; 2593 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2594 if (addr != (address)-1) { 2595 return Handle_Exception(exceptionInfo, addr); 2596 } 2597 } 2598 return EXCEPTION_CONTINUE_SEARCH; 2599 } 2600 2601 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2602 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2603 jobject obj, \ 2604 jfieldID fieldID) { \ 2605 __try { \ 2606 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2607 obj, \ 2608 fieldID); \ 2609 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2610 _exception_info())) { \ 2611 } \ 2612 return 0; \ 2613 } 2614 2615 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2616 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2617 DEFINE_FAST_GETFIELD(jchar, char, Char) 2618 DEFINE_FAST_GETFIELD(jshort, short, Short) 2619 DEFINE_FAST_GETFIELD(jint, int, Int) 2620 DEFINE_FAST_GETFIELD(jlong, long, Long) 2621 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2622 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2623 2624 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2625 switch (type) { 2626 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2627 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2628 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2629 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2630 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2631 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2632 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2633 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2634 default: ShouldNotReachHere(); 2635 } 2636 return (address)-1; 2637 } 2638 #endif 2639 2640 // Virtual Memory 2641 2642 int os::vm_page_size() { return os::win32::vm_page_size(); } 2643 int os::vm_allocation_granularity() { 2644 return os::win32::vm_allocation_granularity(); 2645 } 2646 2647 // Windows large page support is available on Windows 2003. In order to use 2648 // large page memory, the administrator must first assign additional privilege 2649 // to the user: 2650 // + select Control Panel -> Administrative Tools -> Local Security Policy 2651 // + select Local Policies -> User Rights Assignment 2652 // + double click "Lock pages in memory", add users and/or groups 2653 // + reboot 2654 // Note the above steps are needed for administrator as well, as administrators 2655 // by default do not have the privilege to lock pages in memory. 2656 // 2657 // Note about Windows 2003: although the API supports committing large page 2658 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2659 // scenario, I found through experiment it only uses large page if the entire 2660 // memory region is reserved and committed in a single VirtualAlloc() call. 2661 // This makes Windows large page support more or less like Solaris ISM, in 2662 // that the entire heap must be committed upfront. This probably will change 2663 // in the future, if so the code below needs to be revisited. 2664 2665 #ifndef MEM_LARGE_PAGES 2666 #define MEM_LARGE_PAGES 0x20000000 2667 #endif 2668 2669 static HANDLE _hProcess; 2670 static HANDLE _hToken; 2671 2672 // Container for NUMA node list info 2673 class NUMANodeListHolder { 2674 private: 2675 int *_numa_used_node_list; // allocated below 2676 int _numa_used_node_count; 2677 2678 void free_node_list() { 2679 if (_numa_used_node_list != NULL) { 2680 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2681 } 2682 } 2683 2684 public: 2685 NUMANodeListHolder() { 2686 _numa_used_node_count = 0; 2687 _numa_used_node_list = NULL; 2688 // do rest of initialization in build routine (after function pointers are set up) 2689 } 2690 2691 ~NUMANodeListHolder() { 2692 free_node_list(); 2693 } 2694 2695 bool build() { 2696 DWORD_PTR proc_aff_mask; 2697 DWORD_PTR sys_aff_mask; 2698 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2699 ULONG highest_node_number; 2700 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2701 free_node_list(); 2702 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2703 for (unsigned int i = 0; i <= highest_node_number; i++) { 2704 ULONGLONG proc_mask_numa_node; 2705 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2706 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2707 _numa_used_node_list[_numa_used_node_count++] = i; 2708 } 2709 } 2710 return (_numa_used_node_count > 1); 2711 } 2712 2713 int get_count() { return _numa_used_node_count; } 2714 int get_node_list_entry(int n) { 2715 // for indexes out of range, returns -1 2716 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2717 } 2718 2719 } numa_node_list_holder; 2720 2721 2722 2723 static size_t _large_page_size = 0; 2724 2725 static bool request_lock_memory_privilege() { 2726 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2727 os::current_process_id()); 2728 2729 LUID luid; 2730 if (_hProcess != NULL && 2731 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2732 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2733 2734 TOKEN_PRIVILEGES tp; 2735 tp.PrivilegeCount = 1; 2736 tp.Privileges[0].Luid = luid; 2737 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2738 2739 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2740 // privilege. Check GetLastError() too. See MSDN document. 2741 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2742 (GetLastError() == ERROR_SUCCESS)) { 2743 return true; 2744 } 2745 } 2746 2747 return false; 2748 } 2749 2750 static void cleanup_after_large_page_init() { 2751 if (_hProcess) CloseHandle(_hProcess); 2752 _hProcess = NULL; 2753 if (_hToken) CloseHandle(_hToken); 2754 _hToken = NULL; 2755 } 2756 2757 static bool numa_interleaving_init() { 2758 bool success = false; 2759 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2760 2761 // print a warning if UseNUMAInterleaving flag is specified on command line 2762 bool warn_on_failure = use_numa_interleaving_specified; 2763 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2764 2765 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2766 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2767 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2768 2769 if (numa_node_list_holder.build()) { 2770 if (log_is_enabled(Debug, os, cpu)) { 2771 Log(os, cpu) log; 2772 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2773 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2774 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2775 } 2776 } 2777 success = true; 2778 } else { 2779 WARN("Process does not cover multiple NUMA nodes."); 2780 } 2781 if (!success) { 2782 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2783 } 2784 return success; 2785 #undef WARN 2786 } 2787 2788 // this routine is used whenever we need to reserve a contiguous VA range 2789 // but we need to make separate VirtualAlloc calls for each piece of the range 2790 // Reasons for doing this: 2791 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2792 // * UseNUMAInterleaving requires a separate node for each piece 2793 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2794 DWORD prot, 2795 bool should_inject_error = false) { 2796 char * p_buf; 2797 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2798 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2799 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2800 2801 // first reserve enough address space in advance since we want to be 2802 // able to break a single contiguous virtual address range into multiple 2803 // large page commits but WS2003 does not allow reserving large page space 2804 // so we just use 4K pages for reserve, this gives us a legal contiguous 2805 // address space. then we will deallocate that reservation, and re alloc 2806 // using large pages 2807 const size_t size_of_reserve = bytes + chunk_size; 2808 if (bytes > size_of_reserve) { 2809 // Overflowed. 2810 return NULL; 2811 } 2812 p_buf = (char *) VirtualAlloc(addr, 2813 size_of_reserve, // size of Reserve 2814 MEM_RESERVE, 2815 PAGE_READWRITE); 2816 // If reservation failed, return NULL 2817 if (p_buf == NULL) return NULL; 2818 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2819 os::release_memory(p_buf, bytes + chunk_size); 2820 2821 // we still need to round up to a page boundary (in case we are using large pages) 2822 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2823 // instead we handle this in the bytes_to_rq computation below 2824 p_buf = align_up(p_buf, page_size); 2825 2826 // now go through and allocate one chunk at a time until all bytes are 2827 // allocated 2828 size_t bytes_remaining = bytes; 2829 // An overflow of align_up() would have been caught above 2830 // in the calculation of size_of_reserve. 2831 char * next_alloc_addr = p_buf; 2832 HANDLE hProc = GetCurrentProcess(); 2833 2834 #ifdef ASSERT 2835 // Variable for the failure injection 2836 int ran_num = os::random(); 2837 size_t fail_after = ran_num % bytes; 2838 #endif 2839 2840 int count=0; 2841 while (bytes_remaining) { 2842 // select bytes_to_rq to get to the next chunk_size boundary 2843 2844 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2845 // Note allocate and commit 2846 char * p_new; 2847 2848 #ifdef ASSERT 2849 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2850 #else 2851 const bool inject_error_now = false; 2852 #endif 2853 2854 if (inject_error_now) { 2855 p_new = NULL; 2856 } else { 2857 if (!UseNUMAInterleaving) { 2858 p_new = (char *) VirtualAlloc(next_alloc_addr, 2859 bytes_to_rq, 2860 flags, 2861 prot); 2862 } else { 2863 // get the next node to use from the used_node_list 2864 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2865 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2866 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2867 } 2868 } 2869 2870 if (p_new == NULL) { 2871 // Free any allocated pages 2872 if (next_alloc_addr > p_buf) { 2873 // Some memory was committed so release it. 2874 size_t bytes_to_release = bytes - bytes_remaining; 2875 // NMT has yet to record any individual blocks, so it 2876 // need to create a dummy 'reserve' record to match 2877 // the release. 2878 MemTracker::record_virtual_memory_reserve((address)p_buf, 2879 bytes_to_release, CALLER_PC); 2880 os::release_memory(p_buf, bytes_to_release); 2881 } 2882 #ifdef ASSERT 2883 if (should_inject_error) { 2884 log_develop_debug(pagesize)("Reserving pages individually failed."); 2885 } 2886 #endif 2887 return NULL; 2888 } 2889 2890 bytes_remaining -= bytes_to_rq; 2891 next_alloc_addr += bytes_to_rq; 2892 count++; 2893 } 2894 // Although the memory is allocated individually, it is returned as one. 2895 // NMT records it as one block. 2896 if ((flags & MEM_COMMIT) != 0) { 2897 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2898 } else { 2899 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2900 } 2901 2902 // made it this far, success 2903 return p_buf; 2904 } 2905 2906 2907 2908 void os::large_page_init() { 2909 if (!UseLargePages) return; 2910 2911 // print a warning if any large page related flag is specified on command line 2912 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2913 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2914 bool success = false; 2915 2916 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2917 if (request_lock_memory_privilege()) { 2918 size_t s = GetLargePageMinimum(); 2919 if (s) { 2920 #if defined(IA32) || defined(AMD64) 2921 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2922 WARN("JVM cannot use large pages bigger than 4mb."); 2923 } else { 2924 #endif 2925 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2926 _large_page_size = LargePageSizeInBytes; 2927 } else { 2928 _large_page_size = s; 2929 } 2930 success = true; 2931 #if defined(IA32) || defined(AMD64) 2932 } 2933 #endif 2934 } else { 2935 WARN("Large page is not supported by the processor."); 2936 } 2937 } else { 2938 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2939 } 2940 #undef WARN 2941 2942 const size_t default_page_size = (size_t) vm_page_size(); 2943 if (success && _large_page_size > default_page_size) { 2944 _page_sizes[0] = _large_page_size; 2945 _page_sizes[1] = default_page_size; 2946 _page_sizes[2] = 0; 2947 } 2948 2949 cleanup_after_large_page_init(); 2950 UseLargePages = success; 2951 } 2952 2953 int os::create_file_for_heap(const char* dir) { 2954 2955 const char name_template[] = "/jvmheap.XXXXXX"; 2956 char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal); 2957 if (fullname == NULL) { 2958 vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno))); 2959 return -1; 2960 } 2961 2962 (void)strncpy(fullname, dir, strlen(dir)+1); 2963 (void)strncat(fullname, name_template, strlen(name_template)); 2964 2965 os::native_path(fullname); 2966 2967 char *path = _mktemp(fullname); 2968 if (path == NULL) { 2969 warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno)); 2970 os::free(fullname); 2971 return -1; 2972 } 2973 2974 int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD); 2975 2976 os::free(fullname); 2977 if (fd < 0) { 2978 warning("Problem opening file for heap (%s)", os::strerror(errno)); 2979 return -1; 2980 } 2981 return fd; 2982 } 2983 2984 // If 'base' is not NULL, function will return NULL if it cannot get 'base' 2985 char* os::map_memory_to_file(char* base, size_t size, int fd) { 2986 assert(fd != -1, "File descriptor is not valid"); 2987 2988 HANDLE fh = (HANDLE)_get_osfhandle(fd); 2989 #ifdef _LP64 2990 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2991 (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL); 2992 #else 2993 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2994 0, (DWORD)size, NULL); 2995 #endif 2996 if (fileMapping == NULL) { 2997 if (GetLastError() == ERROR_DISK_FULL) { 2998 vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap")); 2999 } 3000 else { 3001 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); 3002 } 3003 3004 return NULL; 3005 } 3006 3007 LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base); 3008 3009 CloseHandle(fileMapping); 3010 3011 return (char*)addr; 3012 } 3013 3014 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { 3015 assert(fd != -1, "File descriptor is not valid"); 3016 assert(base != NULL, "Base address cannot be NULL"); 3017 3018 release_memory(base, size); 3019 return map_memory_to_file(base, size, fd); 3020 } 3021 3022 // On win32, one cannot release just a part of reserved memory, it's an 3023 // all or nothing deal. When we split a reservation, we must break the 3024 // reservation into two reservations. 3025 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3026 bool realloc) { 3027 if (size > 0) { 3028 release_memory(base, size); 3029 if (realloc) { 3030 reserve_memory(split, base); 3031 } 3032 if (size != split) { 3033 reserve_memory(size - split, base + split); 3034 } 3035 } 3036 } 3037 3038 // Multiple threads can race in this code but it's not possible to unmap small sections of 3039 // virtual space to get requested alignment, like posix-like os's. 3040 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3041 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { 3042 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3043 "Alignment must be a multiple of allocation granularity (page size)"); 3044 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3045 3046 size_t extra_size = size + alignment; 3047 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3048 3049 char* aligned_base = NULL; 3050 3051 do { 3052 char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc); 3053 if (extra_base == NULL) { 3054 return NULL; 3055 } 3056 // Do manual alignment 3057 aligned_base = align_up(extra_base, alignment); 3058 3059 if (file_desc != -1) { 3060 os::unmap_memory(extra_base, extra_size); 3061 } else { 3062 os::release_memory(extra_base, extra_size); 3063 } 3064 3065 aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc); 3066 3067 } while (aligned_base == NULL); 3068 3069 return aligned_base; 3070 } 3071 3072 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3073 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3074 "reserve alignment"); 3075 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3076 char* res; 3077 // note that if UseLargePages is on, all the areas that require interleaving 3078 // will go thru reserve_memory_special rather than thru here. 3079 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3080 if (!use_individual) { 3081 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3082 } else { 3083 elapsedTimer reserveTimer; 3084 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3085 // in numa interleaving, we have to allocate pages individually 3086 // (well really chunks of NUMAInterleaveGranularity size) 3087 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3088 if (res == NULL) { 3089 warning("NUMA page allocation failed"); 3090 } 3091 if (Verbose && PrintMiscellaneous) { 3092 reserveTimer.stop(); 3093 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3094 reserveTimer.milliseconds(), reserveTimer.ticks()); 3095 } 3096 } 3097 assert(res == NULL || addr == NULL || addr == res, 3098 "Unexpected address from reserve."); 3099 3100 return res; 3101 } 3102 3103 // Reserve memory at an arbitrary address, only if that area is 3104 // available (and not reserved for something else). 3105 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3106 // Windows os::reserve_memory() fails of the requested address range is 3107 // not avilable. 3108 return reserve_memory(bytes, requested_addr); 3109 } 3110 3111 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) { 3112 assert(file_desc >= 0, "file_desc is not valid"); 3113 return map_memory_to_file(requested_addr, bytes, file_desc); 3114 } 3115 3116 size_t os::large_page_size() { 3117 return _large_page_size; 3118 } 3119 3120 bool os::can_commit_large_page_memory() { 3121 // Windows only uses large page memory when the entire region is reserved 3122 // and committed in a single VirtualAlloc() call. This may change in the 3123 // future, but with Windows 2003 it's not possible to commit on demand. 3124 return false; 3125 } 3126 3127 bool os::can_execute_large_page_memory() { 3128 return true; 3129 } 3130 3131 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3132 bool exec) { 3133 assert(UseLargePages, "only for large pages"); 3134 3135 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3136 return NULL; // Fallback to small pages. 3137 } 3138 3139 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3140 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3141 3142 // with large pages, there are two cases where we need to use Individual Allocation 3143 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3144 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3145 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3146 log_debug(pagesize)("Reserving large pages individually."); 3147 3148 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3149 if (p_buf == NULL) { 3150 // give an appropriate warning message 3151 if (UseNUMAInterleaving) { 3152 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3153 } 3154 if (UseLargePagesIndividualAllocation) { 3155 warning("Individually allocated large pages failed, " 3156 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3157 } 3158 return NULL; 3159 } 3160 3161 return p_buf; 3162 3163 } else { 3164 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3165 3166 // normal policy just allocate it all at once 3167 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3168 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3169 if (res != NULL) { 3170 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3171 } 3172 3173 return res; 3174 } 3175 } 3176 3177 bool os::release_memory_special(char* base, size_t bytes) { 3178 assert(base != NULL, "Sanity check"); 3179 return release_memory(base, bytes); 3180 } 3181 3182 void os::print_statistics() { 3183 } 3184 3185 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3186 int err = os::get_last_error(); 3187 char buf[256]; 3188 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3189 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3190 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3191 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3192 } 3193 3194 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3195 if (bytes == 0) { 3196 // Don't bother the OS with noops. 3197 return true; 3198 } 3199 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3200 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3201 // Don't attempt to print anything if the OS call fails. We're 3202 // probably low on resources, so the print itself may cause crashes. 3203 3204 // unless we have NUMAInterleaving enabled, the range of a commit 3205 // is always within a reserve covered by a single VirtualAlloc 3206 // in that case we can just do a single commit for the requested size 3207 if (!UseNUMAInterleaving) { 3208 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3209 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3210 return false; 3211 } 3212 if (exec) { 3213 DWORD oldprot; 3214 // Windows doc says to use VirtualProtect to get execute permissions 3215 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3216 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3217 return false; 3218 } 3219 } 3220 return true; 3221 } else { 3222 3223 // when NUMAInterleaving is enabled, the commit might cover a range that 3224 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3225 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3226 // returns represents the number of bytes that can be committed in one step. 3227 size_t bytes_remaining = bytes; 3228 char * next_alloc_addr = addr; 3229 while (bytes_remaining > 0) { 3230 MEMORY_BASIC_INFORMATION alloc_info; 3231 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3232 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3233 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3234 PAGE_READWRITE) == NULL) { 3235 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3236 exec);) 3237 return false; 3238 } 3239 if (exec) { 3240 DWORD oldprot; 3241 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3242 PAGE_EXECUTE_READWRITE, &oldprot)) { 3243 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3244 exec);) 3245 return false; 3246 } 3247 } 3248 bytes_remaining -= bytes_to_rq; 3249 next_alloc_addr += bytes_to_rq; 3250 } 3251 } 3252 // if we made it this far, return true 3253 return true; 3254 } 3255 3256 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3257 bool exec) { 3258 // alignment_hint is ignored on this OS 3259 return pd_commit_memory(addr, size, exec); 3260 } 3261 3262 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3263 const char* mesg) { 3264 assert(mesg != NULL, "mesg must be specified"); 3265 if (!pd_commit_memory(addr, size, exec)) { 3266 warn_fail_commit_memory(addr, size, exec); 3267 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3268 } 3269 } 3270 3271 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3272 size_t alignment_hint, bool exec, 3273 const char* mesg) { 3274 // alignment_hint is ignored on this OS 3275 pd_commit_memory_or_exit(addr, size, exec, mesg); 3276 } 3277 3278 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3279 if (bytes == 0) { 3280 // Don't bother the OS with noops. 3281 return true; 3282 } 3283 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3284 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3285 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3286 } 3287 3288 bool os::pd_release_memory(char* addr, size_t bytes) { 3289 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3290 } 3291 3292 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3293 return os::commit_memory(addr, size, !ExecMem); 3294 } 3295 3296 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3297 return os::uncommit_memory(addr, size); 3298 } 3299 3300 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3301 uint count = 0; 3302 bool ret = false; 3303 size_t bytes_remaining = bytes; 3304 char * next_protect_addr = addr; 3305 3306 // Use VirtualQuery() to get the chunk size. 3307 while (bytes_remaining) { 3308 MEMORY_BASIC_INFORMATION alloc_info; 3309 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3310 return false; 3311 } 3312 3313 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3314 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3315 // but we don't distinguish here as both cases are protected by same API. 3316 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3317 warning("Failed protecting pages individually for chunk #%u", count); 3318 if (!ret) { 3319 return false; 3320 } 3321 3322 bytes_remaining -= bytes_to_protect; 3323 next_protect_addr += bytes_to_protect; 3324 count++; 3325 } 3326 return ret; 3327 } 3328 3329 // Set protections specified 3330 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3331 bool is_committed) { 3332 unsigned int p = 0; 3333 switch (prot) { 3334 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3335 case MEM_PROT_READ: p = PAGE_READONLY; break; 3336 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3337 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3338 default: 3339 ShouldNotReachHere(); 3340 } 3341 3342 DWORD old_status; 3343 3344 // Strange enough, but on Win32 one can change protection only for committed 3345 // memory, not a big deal anyway, as bytes less or equal than 64K 3346 if (!is_committed) { 3347 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3348 "cannot commit protection page"); 3349 } 3350 // One cannot use os::guard_memory() here, as on Win32 guard page 3351 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3352 // 3353 // Pages in the region become guard pages. Any attempt to access a guard page 3354 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3355 // the guard page status. Guard pages thus act as a one-time access alarm. 3356 bool ret; 3357 if (UseNUMAInterleaving) { 3358 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3359 // so we must protect the chunks individually. 3360 ret = protect_pages_individually(addr, bytes, p, &old_status); 3361 } else { 3362 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3363 } 3364 #ifdef ASSERT 3365 if (!ret) { 3366 int err = os::get_last_error(); 3367 char buf[256]; 3368 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3369 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3370 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3371 buf_len != 0 ? buf : "<no_error_string>", err); 3372 } 3373 #endif 3374 return ret; 3375 } 3376 3377 bool os::guard_memory(char* addr, size_t bytes) { 3378 DWORD old_status; 3379 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3380 } 3381 3382 bool os::unguard_memory(char* addr, size_t bytes) { 3383 DWORD old_status; 3384 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3385 } 3386 3387 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3388 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3389 void os::numa_make_global(char *addr, size_t bytes) { } 3390 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3391 bool os::numa_topology_changed() { return false; } 3392 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3393 int os::numa_get_group_id() { return 0; } 3394 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3395 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3396 // Provide an answer for UMA systems 3397 ids[0] = 0; 3398 return 1; 3399 } else { 3400 // check for size bigger than actual groups_num 3401 size = MIN2(size, numa_get_groups_num()); 3402 for (int i = 0; i < (int)size; i++) { 3403 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3404 } 3405 return size; 3406 } 3407 } 3408 3409 bool os::get_page_info(char *start, page_info* info) { 3410 return false; 3411 } 3412 3413 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3414 page_info* page_found) { 3415 return end; 3416 } 3417 3418 char* os::non_memory_address_word() { 3419 // Must never look like an address returned by reserve_memory, 3420 // even in its subfields (as defined by the CPU immediate fields, 3421 // if the CPU splits constants across multiple instructions). 3422 return (char*)-1; 3423 } 3424 3425 #define MAX_ERROR_COUNT 100 3426 #define SYS_THREAD_ERROR 0xffffffffUL 3427 3428 void os::pd_start_thread(Thread* thread) { 3429 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3430 // Returns previous suspend state: 3431 // 0: Thread was not suspended 3432 // 1: Thread is running now 3433 // >1: Thread is still suspended. 3434 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3435 } 3436 3437 class HighResolutionInterval : public CHeapObj<mtThread> { 3438 // The default timer resolution seems to be 10 milliseconds. 3439 // (Where is this written down?) 3440 // If someone wants to sleep for only a fraction of the default, 3441 // then we set the timer resolution down to 1 millisecond for 3442 // the duration of their interval. 3443 // We carefully set the resolution back, since otherwise we 3444 // seem to incur an overhead (3%?) that we don't need. 3445 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3446 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3447 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3448 // timeBeginPeriod() if the relative error exceeded some threshold. 3449 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3450 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3451 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3452 // resolution timers running. 3453 private: 3454 jlong resolution; 3455 public: 3456 HighResolutionInterval(jlong ms) { 3457 resolution = ms % 10L; 3458 if (resolution != 0) { 3459 MMRESULT result = timeBeginPeriod(1L); 3460 } 3461 } 3462 ~HighResolutionInterval() { 3463 if (resolution != 0) { 3464 MMRESULT result = timeEndPeriod(1L); 3465 } 3466 resolution = 0L; 3467 } 3468 }; 3469 3470 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3471 jlong limit = (jlong) MAXDWORD; 3472 3473 while (ms > limit) { 3474 int res; 3475 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3476 return res; 3477 } 3478 ms -= limit; 3479 } 3480 3481 assert(thread == Thread::current(), "thread consistency check"); 3482 OSThread* osthread = thread->osthread(); 3483 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3484 int result; 3485 if (interruptable) { 3486 assert(thread->is_Java_thread(), "must be java thread"); 3487 JavaThread *jt = (JavaThread *) thread; 3488 ThreadBlockInVM tbivm(jt); 3489 3490 jt->set_suspend_equivalent(); 3491 // cleared by handle_special_suspend_equivalent_condition() or 3492 // java_suspend_self() via check_and_wait_while_suspended() 3493 3494 HANDLE events[1]; 3495 events[0] = osthread->interrupt_event(); 3496 HighResolutionInterval *phri=NULL; 3497 if (!ForceTimeHighResolution) { 3498 phri = new HighResolutionInterval(ms); 3499 } 3500 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3501 result = OS_TIMEOUT; 3502 } else { 3503 ResetEvent(osthread->interrupt_event()); 3504 osthread->set_interrupted(false); 3505 result = OS_INTRPT; 3506 } 3507 delete phri; //if it is NULL, harmless 3508 3509 // were we externally suspended while we were waiting? 3510 jt->check_and_wait_while_suspended(); 3511 } else { 3512 assert(!thread->is_Java_thread(), "must not be java thread"); 3513 Sleep((long) ms); 3514 result = OS_TIMEOUT; 3515 } 3516 return result; 3517 } 3518 3519 // Short sleep, direct OS call. 3520 // 3521 // ms = 0, means allow others (if any) to run. 3522 // 3523 void os::naked_short_sleep(jlong ms) { 3524 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3525 Sleep(ms); 3526 } 3527 3528 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3529 void os::infinite_sleep() { 3530 while (true) { // sleep forever ... 3531 Sleep(100000); // ... 100 seconds at a time 3532 } 3533 } 3534 3535 typedef BOOL (WINAPI * STTSignature)(void); 3536 3537 void os::naked_yield() { 3538 // Consider passing back the return value from SwitchToThread(). 3539 SwitchToThread(); 3540 } 3541 3542 // Win32 only gives you access to seven real priorities at a time, 3543 // so we compress Java's ten down to seven. It would be better 3544 // if we dynamically adjusted relative priorities. 3545 3546 int os::java_to_os_priority[CriticalPriority + 1] = { 3547 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3548 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3549 THREAD_PRIORITY_LOWEST, // 2 3550 THREAD_PRIORITY_BELOW_NORMAL, // 3 3551 THREAD_PRIORITY_BELOW_NORMAL, // 4 3552 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3553 THREAD_PRIORITY_NORMAL, // 6 3554 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3555 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3556 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3557 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3558 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3559 }; 3560 3561 int prio_policy1[CriticalPriority + 1] = { 3562 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3563 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3564 THREAD_PRIORITY_LOWEST, // 2 3565 THREAD_PRIORITY_BELOW_NORMAL, // 3 3566 THREAD_PRIORITY_BELOW_NORMAL, // 4 3567 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3568 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3569 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3570 THREAD_PRIORITY_HIGHEST, // 8 3571 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3572 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3573 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3574 }; 3575 3576 static int prio_init() { 3577 // If ThreadPriorityPolicy is 1, switch tables 3578 if (ThreadPriorityPolicy == 1) { 3579 int i; 3580 for (i = 0; i < CriticalPriority + 1; i++) { 3581 os::java_to_os_priority[i] = prio_policy1[i]; 3582 } 3583 } 3584 if (UseCriticalJavaThreadPriority) { 3585 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3586 } 3587 return 0; 3588 } 3589 3590 OSReturn os::set_native_priority(Thread* thread, int priority) { 3591 if (!UseThreadPriorities) return OS_OK; 3592 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3593 return ret ? OS_OK : OS_ERR; 3594 } 3595 3596 OSReturn os::get_native_priority(const Thread* const thread, 3597 int* priority_ptr) { 3598 if (!UseThreadPriorities) { 3599 *priority_ptr = java_to_os_priority[NormPriority]; 3600 return OS_OK; 3601 } 3602 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3603 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3604 assert(false, "GetThreadPriority failed"); 3605 return OS_ERR; 3606 } 3607 *priority_ptr = os_prio; 3608 return OS_OK; 3609 } 3610 3611 void os::interrupt(Thread* thread) { 3612 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3613 3614 OSThread* osthread = thread->osthread(); 3615 osthread->set_interrupted(true); 3616 // More than one thread can get here with the same value of osthread, 3617 // resulting in multiple notifications. We do, however, want the store 3618 // to interrupted() to be visible to other threads before we post 3619 // the interrupt event. 3620 OrderAccess::release(); 3621 SetEvent(osthread->interrupt_event()); 3622 // For JSR166: unpark after setting status 3623 if (thread->is_Java_thread()) { 3624 ((JavaThread*)thread)->parker()->unpark(); 3625 } 3626 3627 ParkEvent * ev = thread->_ParkEvent; 3628 if (ev != NULL) ev->unpark(); 3629 } 3630 3631 3632 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3633 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3634 3635 OSThread* osthread = thread->osthread(); 3636 // There is no synchronization between the setting of the interrupt 3637 // and it being cleared here. It is critical - see 6535709 - that 3638 // we only clear the interrupt state, and reset the interrupt event, 3639 // if we are going to report that we were indeed interrupted - else 3640 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3641 // depending on the timing. By checking thread interrupt event to see 3642 // if the thread gets real interrupt thus prevent spurious wakeup. 3643 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3644 if (interrupted && clear_interrupted) { 3645 osthread->set_interrupted(false); 3646 ResetEvent(osthread->interrupt_event()); 3647 } // Otherwise leave the interrupted state alone 3648 3649 return interrupted; 3650 } 3651 3652 // GetCurrentThreadId() returns DWORD 3653 intx os::current_thread_id() { return GetCurrentThreadId(); } 3654 3655 static int _initial_pid = 0; 3656 3657 int os::current_process_id() { 3658 return (_initial_pid ? _initial_pid : _getpid()); 3659 } 3660 3661 int os::win32::_vm_page_size = 0; 3662 int os::win32::_vm_allocation_granularity = 0; 3663 int os::win32::_processor_type = 0; 3664 // Processor level is not available on non-NT systems, use vm_version instead 3665 int os::win32::_processor_level = 0; 3666 julong os::win32::_physical_memory = 0; 3667 size_t os::win32::_default_stack_size = 0; 3668 3669 intx os::win32::_os_thread_limit = 0; 3670 volatile intx os::win32::_os_thread_count = 0; 3671 3672 bool os::win32::_is_windows_server = false; 3673 3674 // 6573254 3675 // Currently, the bug is observed across all the supported Windows releases, 3676 // including the latest one (as of this writing - Windows Server 2012 R2) 3677 bool os::win32::_has_exit_bug = true; 3678 3679 void os::win32::initialize_system_info() { 3680 SYSTEM_INFO si; 3681 GetSystemInfo(&si); 3682 _vm_page_size = si.dwPageSize; 3683 _vm_allocation_granularity = si.dwAllocationGranularity; 3684 _processor_type = si.dwProcessorType; 3685 _processor_level = si.wProcessorLevel; 3686 set_processor_count(si.dwNumberOfProcessors); 3687 3688 MEMORYSTATUSEX ms; 3689 ms.dwLength = sizeof(ms); 3690 3691 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3692 // dwMemoryLoad (% of memory in use) 3693 GlobalMemoryStatusEx(&ms); 3694 _physical_memory = ms.ullTotalPhys; 3695 3696 if (FLAG_IS_DEFAULT(MaxRAM)) { 3697 // Adjust MaxRAM according to the maximum virtual address space available. 3698 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3699 } 3700 3701 OSVERSIONINFOEX oi; 3702 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3703 GetVersionEx((OSVERSIONINFO*)&oi); 3704 switch (oi.dwPlatformId) { 3705 case VER_PLATFORM_WIN32_NT: 3706 { 3707 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3708 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3709 oi.wProductType == VER_NT_SERVER) { 3710 _is_windows_server = true; 3711 } 3712 } 3713 break; 3714 default: fatal("Unknown platform"); 3715 } 3716 3717 _default_stack_size = os::current_stack_size(); 3718 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3719 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3720 "stack size not a multiple of page size"); 3721 3722 initialize_performance_counter(); 3723 } 3724 3725 3726 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3727 int ebuflen) { 3728 char path[MAX_PATH]; 3729 DWORD size; 3730 DWORD pathLen = (DWORD)sizeof(path); 3731 HINSTANCE result = NULL; 3732 3733 // only allow library name without path component 3734 assert(strchr(name, '\\') == NULL, "path not allowed"); 3735 assert(strchr(name, ':') == NULL, "path not allowed"); 3736 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3737 jio_snprintf(ebuf, ebuflen, 3738 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3739 return NULL; 3740 } 3741 3742 // search system directory 3743 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3744 if (size >= pathLen) { 3745 return NULL; // truncated 3746 } 3747 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3748 return NULL; // truncated 3749 } 3750 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3751 return result; 3752 } 3753 } 3754 3755 // try Windows directory 3756 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3757 if (size >= pathLen) { 3758 return NULL; // truncated 3759 } 3760 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3761 return NULL; // truncated 3762 } 3763 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3764 return result; 3765 } 3766 } 3767 3768 jio_snprintf(ebuf, ebuflen, 3769 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3770 return NULL; 3771 } 3772 3773 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3774 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3775 3776 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3777 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3778 return TRUE; 3779 } 3780 3781 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3782 // Basic approach: 3783 // - Each exiting thread registers its intent to exit and then does so. 3784 // - A thread trying to terminate the process must wait for all 3785 // threads currently exiting to complete their exit. 3786 3787 if (os::win32::has_exit_bug()) { 3788 // The array holds handles of the threads that have started exiting by calling 3789 // _endthreadex(). 3790 // Should be large enough to avoid blocking the exiting thread due to lack of 3791 // a free slot. 3792 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3793 static int handle_count = 0; 3794 3795 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3796 static CRITICAL_SECTION crit_sect; 3797 static volatile DWORD process_exiting = 0; 3798 int i, j; 3799 DWORD res; 3800 HANDLE hproc, hthr; 3801 3802 // We only attempt to register threads until a process exiting 3803 // thread manages to set the process_exiting flag. Any threads 3804 // that come through here after the process_exiting flag is set 3805 // are unregistered and will be caught in the SuspendThread() 3806 // infinite loop below. 3807 bool registered = false; 3808 3809 // The first thread that reached this point, initializes the critical section. 3810 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3811 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3812 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3813 if (what != EPT_THREAD) { 3814 // Atomically set process_exiting before the critical section 3815 // to increase the visibility between racing threads. 3816 Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0); 3817 } 3818 EnterCriticalSection(&crit_sect); 3819 3820 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3821 // Remove from the array those handles of the threads that have completed exiting. 3822 for (i = 0, j = 0; i < handle_count; ++i) { 3823 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3824 if (res == WAIT_TIMEOUT) { 3825 handles[j++] = handles[i]; 3826 } else { 3827 if (res == WAIT_FAILED) { 3828 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3829 GetLastError(), __FILE__, __LINE__); 3830 } 3831 // Don't keep the handle, if we failed waiting for it. 3832 CloseHandle(handles[i]); 3833 } 3834 } 3835 3836 // If there's no free slot in the array of the kept handles, we'll have to 3837 // wait until at least one thread completes exiting. 3838 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3839 // Raise the priority of the oldest exiting thread to increase its chances 3840 // to complete sooner. 3841 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3842 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3843 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3844 i = (res - WAIT_OBJECT_0); 3845 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3846 for (; i < handle_count; ++i) { 3847 handles[i] = handles[i + 1]; 3848 } 3849 } else { 3850 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3851 (res == WAIT_FAILED ? "failed" : "timed out"), 3852 GetLastError(), __FILE__, __LINE__); 3853 // Don't keep handles, if we failed waiting for them. 3854 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3855 CloseHandle(handles[i]); 3856 } 3857 handle_count = 0; 3858 } 3859 } 3860 3861 // Store a duplicate of the current thread handle in the array of handles. 3862 hproc = GetCurrentProcess(); 3863 hthr = GetCurrentThread(); 3864 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3865 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3866 warning("DuplicateHandle failed (%u) in %s: %d\n", 3867 GetLastError(), __FILE__, __LINE__); 3868 3869 // We can't register this thread (no more handles) so this thread 3870 // may be racing with a thread that is calling exit(). If the thread 3871 // that is calling exit() has managed to set the process_exiting 3872 // flag, then this thread will be caught in the SuspendThread() 3873 // infinite loop below which closes that race. A small timing 3874 // window remains before the process_exiting flag is set, but it 3875 // is only exposed when we are out of handles. 3876 } else { 3877 ++handle_count; 3878 registered = true; 3879 3880 // The current exiting thread has stored its handle in the array, and now 3881 // should leave the critical section before calling _endthreadex(). 3882 } 3883 3884 } else if (what != EPT_THREAD && handle_count > 0) { 3885 jlong start_time, finish_time, timeout_left; 3886 // Before ending the process, make sure all the threads that had called 3887 // _endthreadex() completed. 3888 3889 // Set the priority level of the current thread to the same value as 3890 // the priority level of exiting threads. 3891 // This is to ensure it will be given a fair chance to execute if 3892 // the timeout expires. 3893 hthr = GetCurrentThread(); 3894 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3895 start_time = os::javaTimeNanos(); 3896 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3897 for (i = 0; ; ) { 3898 int portion_count = handle_count - i; 3899 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3900 portion_count = MAXIMUM_WAIT_OBJECTS; 3901 } 3902 for (j = 0; j < portion_count; ++j) { 3903 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3904 } 3905 timeout_left = (finish_time - start_time) / 1000000L; 3906 if (timeout_left < 0) { 3907 timeout_left = 0; 3908 } 3909 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3910 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3911 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3912 (res == WAIT_FAILED ? "failed" : "timed out"), 3913 GetLastError(), __FILE__, __LINE__); 3914 // Reset portion_count so we close the remaining 3915 // handles due to this error. 3916 portion_count = handle_count - i; 3917 } 3918 for (j = 0; j < portion_count; ++j) { 3919 CloseHandle(handles[i + j]); 3920 } 3921 if ((i += portion_count) >= handle_count) { 3922 break; 3923 } 3924 start_time = os::javaTimeNanos(); 3925 } 3926 handle_count = 0; 3927 } 3928 3929 LeaveCriticalSection(&crit_sect); 3930 } 3931 3932 if (!registered && 3933 OrderAccess::load_acquire(&process_exiting) != 0 && 3934 process_exiting != GetCurrentThreadId()) { 3935 // Some other thread is about to call exit(), so we don't let 3936 // the current unregistered thread proceed to exit() or _endthreadex() 3937 while (true) { 3938 SuspendThread(GetCurrentThread()); 3939 // Avoid busy-wait loop, if SuspendThread() failed. 3940 Sleep(EXIT_TIMEOUT); 3941 } 3942 } 3943 } 3944 3945 // We are here if either 3946 // - there's no 'race at exit' bug on this OS release; 3947 // - initialization of the critical section failed (unlikely); 3948 // - the current thread has registered itself and left the critical section; 3949 // - the process-exiting thread has raised the flag and left the critical section. 3950 if (what == EPT_THREAD) { 3951 _endthreadex((unsigned)exit_code); 3952 } else if (what == EPT_PROCESS) { 3953 ::exit(exit_code); 3954 } else { 3955 _exit(exit_code); 3956 } 3957 3958 // Should not reach here 3959 return exit_code; 3960 } 3961 3962 #undef EXIT_TIMEOUT 3963 3964 void os::win32::setmode_streams() { 3965 _setmode(_fileno(stdin), _O_BINARY); 3966 _setmode(_fileno(stdout), _O_BINARY); 3967 _setmode(_fileno(stderr), _O_BINARY); 3968 } 3969 3970 3971 bool os::is_debugger_attached() { 3972 return IsDebuggerPresent() ? true : false; 3973 } 3974 3975 3976 void os::wait_for_keypress_at_exit(void) { 3977 if (PauseAtExit) { 3978 fprintf(stderr, "Press any key to continue...\n"); 3979 fgetc(stdin); 3980 } 3981 } 3982 3983 3984 bool os::message_box(const char* title, const char* message) { 3985 int result = MessageBox(NULL, message, title, 3986 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3987 return result == IDYES; 3988 } 3989 3990 #ifndef PRODUCT 3991 #ifndef _WIN64 3992 // Helpers to check whether NX protection is enabled 3993 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3994 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3995 pex->ExceptionRecord->NumberParameters > 0 && 3996 pex->ExceptionRecord->ExceptionInformation[0] == 3997 EXCEPTION_INFO_EXEC_VIOLATION) { 3998 return EXCEPTION_EXECUTE_HANDLER; 3999 } 4000 return EXCEPTION_CONTINUE_SEARCH; 4001 } 4002 4003 void nx_check_protection() { 4004 // If NX is enabled we'll get an exception calling into code on the stack 4005 char code[] = { (char)0xC3 }; // ret 4006 void *code_ptr = (void *)code; 4007 __try { 4008 __asm call code_ptr 4009 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4010 tty->print_raw_cr("NX protection detected."); 4011 } 4012 } 4013 #endif // _WIN64 4014 #endif // PRODUCT 4015 4016 // This is called _before_ the global arguments have been parsed 4017 void os::init(void) { 4018 _initial_pid = _getpid(); 4019 4020 init_random(1234567); 4021 4022 win32::initialize_system_info(); 4023 win32::setmode_streams(); 4024 init_page_sizes((size_t) win32::vm_page_size()); 4025 4026 // This may be overridden later when argument processing is done. 4027 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 4028 4029 // Initialize main_process and main_thread 4030 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4031 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4032 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4033 fatal("DuplicateHandle failed\n"); 4034 } 4035 main_thread_id = (int) GetCurrentThreadId(); 4036 4037 // initialize fast thread access - only used for 32-bit 4038 win32::initialize_thread_ptr_offset(); 4039 } 4040 4041 // To install functions for atexit processing 4042 extern "C" { 4043 static void perfMemory_exit_helper() { 4044 perfMemory_exit(); 4045 } 4046 } 4047 4048 static jint initSock(); 4049 4050 // this is called _after_ the global arguments have been parsed 4051 jint os::init_2(void) { 4052 // Setup Windows Exceptions 4053 4054 // for debugging float code generation bugs 4055 if (ForceFloatExceptions) { 4056 #ifndef _WIN64 4057 static long fp_control_word = 0; 4058 __asm { fstcw fp_control_word } 4059 // see Intel PPro Manual, Vol. 2, p 7-16 4060 const long precision = 0x20; 4061 const long underflow = 0x10; 4062 const long overflow = 0x08; 4063 const long zero_div = 0x04; 4064 const long denorm = 0x02; 4065 const long invalid = 0x01; 4066 fp_control_word |= invalid; 4067 __asm { fldcw fp_control_word } 4068 #endif 4069 } 4070 4071 // If stack_commit_size is 0, windows will reserve the default size, 4072 // but only commit a small portion of it. 4073 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 4074 size_t default_reserve_size = os::win32::default_stack_size(); 4075 size_t actual_reserve_size = stack_commit_size; 4076 if (stack_commit_size < default_reserve_size) { 4077 // If stack_commit_size == 0, we want this too 4078 actual_reserve_size = default_reserve_size; 4079 } 4080 4081 // Check minimum allowable stack size for thread creation and to initialize 4082 // the java system classes, including StackOverflowError - depends on page 4083 // size. Add two 4K pages for compiler2 recursion in main thread. 4084 // Add in 4*BytesPerWord 4K pages to account for VM stack during 4085 // class initialization depending on 32 or 64 bit VM. 4086 size_t min_stack_allowed = 4087 (size_t)(JavaThread::stack_guard_zone_size() + 4088 JavaThread::stack_shadow_zone_size() + 4089 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 4090 4091 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 4092 4093 if (actual_reserve_size < min_stack_allowed) { 4094 tty->print_cr("\nThe Java thread stack size specified is too small. " 4095 "Specify at least %dk", 4096 min_stack_allowed / K); 4097 return JNI_ERR; 4098 } 4099 4100 JavaThread::set_stack_size_at_create(stack_commit_size); 4101 4102 // Calculate theoretical max. size of Threads to guard gainst artifical 4103 // out-of-memory situations, where all available address-space has been 4104 // reserved by thread stacks. 4105 assert(actual_reserve_size != 0, "Must have a stack"); 4106 4107 // Calculate the thread limit when we should start doing Virtual Memory 4108 // banging. Currently when the threads will have used all but 200Mb of space. 4109 // 4110 // TODO: consider performing a similar calculation for commit size instead 4111 // as reserve size, since on a 64-bit platform we'll run into that more 4112 // often than running out of virtual memory space. We can use the 4113 // lower value of the two calculations as the os_thread_limit. 4114 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4115 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4116 4117 // at exit methods are called in the reverse order of their registration. 4118 // there is no limit to the number of functions registered. atexit does 4119 // not set errno. 4120 4121 if (PerfAllowAtExitRegistration) { 4122 // only register atexit functions if PerfAllowAtExitRegistration is set. 4123 // atexit functions can be delayed until process exit time, which 4124 // can be problematic for embedded VM situations. Embedded VMs should 4125 // call DestroyJavaVM() to assure that VM resources are released. 4126 4127 // note: perfMemory_exit_helper atexit function may be removed in 4128 // the future if the appropriate cleanup code can be added to the 4129 // VM_Exit VMOperation's doit method. 4130 if (atexit(perfMemory_exit_helper) != 0) { 4131 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4132 } 4133 } 4134 4135 #ifndef _WIN64 4136 // Print something if NX is enabled (win32 on AMD64) 4137 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4138 #endif 4139 4140 // initialize thread priority policy 4141 prio_init(); 4142 4143 if (UseNUMA && !ForceNUMA) { 4144 UseNUMA = false; // We don't fully support this yet 4145 } 4146 4147 if (UseNUMAInterleaving) { 4148 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4149 bool success = numa_interleaving_init(); 4150 if (!success) UseNUMAInterleaving = false; 4151 } 4152 4153 if (initSock() != JNI_OK) { 4154 return JNI_ERR; 4155 } 4156 4157 SymbolEngine::recalc_search_path(); 4158 4159 // Initialize data for jdk.internal.misc.Signal 4160 if (!ReduceSignalUsage) { 4161 jdk_misc_signal_init(); 4162 } 4163 4164 return JNI_OK; 4165 } 4166 4167 // Mark the polling page as unreadable 4168 void os::make_polling_page_unreadable(void) { 4169 DWORD old_status; 4170 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4171 PAGE_NOACCESS, &old_status)) { 4172 fatal("Could not disable polling page"); 4173 } 4174 } 4175 4176 // Mark the polling page as readable 4177 void os::make_polling_page_readable(void) { 4178 DWORD old_status; 4179 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4180 PAGE_READONLY, &old_status)) { 4181 fatal("Could not enable polling page"); 4182 } 4183 } 4184 4185 // combine the high and low DWORD into a ULONGLONG 4186 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) { 4187 ULONGLONG value = high_word; 4188 value <<= sizeof(high_word) * 8; 4189 value |= low_word; 4190 return value; 4191 } 4192 4193 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat 4194 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) { 4195 ::memset((void*)sbuf, 0, sizeof(struct stat)); 4196 sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow); 4197 sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime, 4198 file_data.ftLastWriteTime.dwLowDateTime); 4199 sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime, 4200 file_data.ftCreationTime.dwLowDateTime); 4201 sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime, 4202 file_data.ftLastAccessTime.dwLowDateTime); 4203 if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) { 4204 sbuf->st_mode |= S_IFDIR; 4205 } else { 4206 sbuf->st_mode |= S_IFREG; 4207 } 4208 } 4209 4210 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c 4211 // Creates an UNC path from a single byte path. Return buffer is 4212 // allocated in C heap and needs to be freed by the caller. 4213 // Returns NULL on error. 4214 static wchar_t* create_unc_path(const char* path, errno_t &err) { 4215 wchar_t* wpath = NULL; 4216 size_t converted_chars = 0; 4217 size_t path_len = strlen(path) + 1; // includes the terminating NULL 4218 if (path[0] == '\\' && path[1] == '\\') { 4219 if (path[2] == '?' && path[3] == '\\'){ 4220 // if it already has a \\?\ don't do the prefix 4221 wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal); 4222 if (wpath != NULL) { 4223 err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len); 4224 } else { 4225 err = ENOMEM; 4226 } 4227 } else { 4228 // only UNC pathname includes double slashes here 4229 wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal); 4230 if (wpath != NULL) { 4231 ::wcscpy(wpath, L"\\\\?\\UNC\0"); 4232 err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len); 4233 } else { 4234 err = ENOMEM; 4235 } 4236 } 4237 } else { 4238 wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal); 4239 if (wpath != NULL) { 4240 ::wcscpy(wpath, L"\\\\?\\\0"); 4241 err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len); 4242 } else { 4243 err = ENOMEM; 4244 } 4245 } 4246 return wpath; 4247 } 4248 4249 static void destroy_unc_path(wchar_t* wpath) { 4250 os::free(wpath); 4251 } 4252 4253 int os::stat(const char *path, struct stat *sbuf) { 4254 char* pathbuf = (char*)os::strdup(path, mtInternal); 4255 if (pathbuf == NULL) { 4256 errno = ENOMEM; 4257 return -1; 4258 } 4259 os::native_path(pathbuf); 4260 int ret; 4261 WIN32_FILE_ATTRIBUTE_DATA file_data; 4262 // Not using stat() to avoid the problem described in JDK-6539723 4263 if (strlen(path) < MAX_PATH) { 4264 BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data); 4265 if (!bret) { 4266 errno = ::GetLastError(); 4267 ret = -1; 4268 } 4269 else { 4270 file_attribute_data_to_stat(sbuf, file_data); 4271 ret = 0; 4272 } 4273 } else { 4274 errno_t err = ERROR_SUCCESS; 4275 wchar_t* wpath = create_unc_path(pathbuf, err); 4276 if (err != ERROR_SUCCESS) { 4277 if (wpath != NULL) { 4278 destroy_unc_path(wpath); 4279 } 4280 os::free(pathbuf); 4281 errno = err; 4282 return -1; 4283 } 4284 BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data); 4285 if (!bret) { 4286 errno = ::GetLastError(); 4287 ret = -1; 4288 } else { 4289 file_attribute_data_to_stat(sbuf, file_data); 4290 ret = 0; 4291 } 4292 destroy_unc_path(wpath); 4293 } 4294 os::free(pathbuf); 4295 return ret; 4296 } 4297 4298 4299 #define FT2INT64(ft) \ 4300 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4301 4302 4303 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4304 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4305 // of a thread. 4306 // 4307 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4308 // the fast estimate available on the platform. 4309 4310 // current_thread_cpu_time() is not optimized for Windows yet 4311 jlong os::current_thread_cpu_time() { 4312 // return user + sys since the cost is the same 4313 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4314 } 4315 4316 jlong os::thread_cpu_time(Thread* thread) { 4317 // consistent with what current_thread_cpu_time() returns. 4318 return os::thread_cpu_time(thread, true /* user+sys */); 4319 } 4320 4321 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4322 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4323 } 4324 4325 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4326 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4327 // If this function changes, os::is_thread_cpu_time_supported() should too 4328 FILETIME CreationTime; 4329 FILETIME ExitTime; 4330 FILETIME KernelTime; 4331 FILETIME UserTime; 4332 4333 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4334 &ExitTime, &KernelTime, &UserTime) == 0) { 4335 return -1; 4336 } else if (user_sys_cpu_time) { 4337 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4338 } else { 4339 return FT2INT64(UserTime) * 100; 4340 } 4341 } 4342 4343 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4344 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4345 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4346 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4347 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4348 } 4349 4350 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4351 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4352 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4353 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4354 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4355 } 4356 4357 bool os::is_thread_cpu_time_supported() { 4358 // see os::thread_cpu_time 4359 FILETIME CreationTime; 4360 FILETIME ExitTime; 4361 FILETIME KernelTime; 4362 FILETIME UserTime; 4363 4364 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4365 &KernelTime, &UserTime) == 0) { 4366 return false; 4367 } else { 4368 return true; 4369 } 4370 } 4371 4372 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4373 // It does have primitives (PDH API) to get CPU usage and run queue length. 4374 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4375 // If we wanted to implement loadavg on Windows, we have a few options: 4376 // 4377 // a) Query CPU usage and run queue length and "fake" an answer by 4378 // returning the CPU usage if it's under 100%, and the run queue 4379 // length otherwise. It turns out that querying is pretty slow 4380 // on Windows, on the order of 200 microseconds on a fast machine. 4381 // Note that on the Windows the CPU usage value is the % usage 4382 // since the last time the API was called (and the first call 4383 // returns 100%), so we'd have to deal with that as well. 4384 // 4385 // b) Sample the "fake" answer using a sampling thread and store 4386 // the answer in a global variable. The call to loadavg would 4387 // just return the value of the global, avoiding the slow query. 4388 // 4389 // c) Sample a better answer using exponential decay to smooth the 4390 // value. This is basically the algorithm used by UNIX kernels. 4391 // 4392 // Note that sampling thread starvation could affect both (b) and (c). 4393 int os::loadavg(double loadavg[], int nelem) { 4394 return -1; 4395 } 4396 4397 4398 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4399 bool os::dont_yield() { 4400 return DontYieldALot; 4401 } 4402 4403 // This method is a slightly reworked copy of JDK's sysOpen 4404 // from src/windows/hpi/src/sys_api_md.c 4405 4406 int os::open(const char *path, int oflag, int mode) { 4407 char* pathbuf = (char*)os::strdup(path, mtInternal); 4408 if (pathbuf == NULL) { 4409 errno = ENOMEM; 4410 return -1; 4411 } 4412 os::native_path(pathbuf); 4413 int ret; 4414 if (strlen(path) < MAX_PATH) { 4415 ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4416 } else { 4417 errno_t err = ERROR_SUCCESS; 4418 wchar_t* wpath = create_unc_path(pathbuf, err); 4419 if (err != ERROR_SUCCESS) { 4420 if (wpath != NULL) { 4421 destroy_unc_path(wpath); 4422 } 4423 os::free(pathbuf); 4424 errno = err; 4425 return -1; 4426 } 4427 ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode); 4428 if (ret == -1) { 4429 errno = ::GetLastError(); 4430 } 4431 destroy_unc_path(wpath); 4432 } 4433 os::free(pathbuf); 4434 return ret; 4435 } 4436 4437 FILE* os::open(int fd, const char* mode) { 4438 return ::_fdopen(fd, mode); 4439 } 4440 4441 // Is a (classpath) directory empty? 4442 bool os::dir_is_empty(const char* path) { 4443 char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal); 4444 if (search_path == NULL) { 4445 errno = ENOMEM; 4446 return false; 4447 } 4448 strcpy(search_path, path); 4449 os::native_path(search_path); 4450 // Append "*", or possibly "\\*", to path 4451 if (search_path[1] == ':' && 4452 (search_path[2] == '\0' || 4453 (search_path[2] == '\\' && search_path[3] == '\0'))) { 4454 // No '\\' needed for cases like "Z:" or "Z:\" 4455 strcat(search_path, "*"); 4456 } 4457 else { 4458 strcat(search_path, "\\*"); 4459 } 4460 errno_t err = ERROR_SUCCESS; 4461 wchar_t* wpath = create_unc_path(search_path, err); 4462 if (err != ERROR_SUCCESS) { 4463 if (wpath != NULL) { 4464 destroy_unc_path(wpath); 4465 } 4466 os::free(search_path); 4467 errno = err; 4468 return false; 4469 } 4470 WIN32_FIND_DATAW fd; 4471 HANDLE f = ::FindFirstFileW(wpath, &fd); 4472 destroy_unc_path(wpath); 4473 bool is_empty = true; 4474 if (f != INVALID_HANDLE_VALUE) { 4475 while (is_empty && ::FindNextFileW(f, &fd)) { 4476 // An empty directory contains only the current directory file 4477 // and the previous directory file. 4478 if ((wcscmp(fd.cFileName, L".") != 0) && 4479 (wcscmp(fd.cFileName, L"..") != 0)) { 4480 is_empty = false; 4481 } 4482 } 4483 FindClose(f); 4484 } 4485 os::free(search_path); 4486 return is_empty; 4487 } 4488 4489 // create binary file, rewriting existing file if required 4490 int os::create_binary_file(const char* path, bool rewrite_existing) { 4491 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4492 if (!rewrite_existing) { 4493 oflags |= _O_EXCL; 4494 } 4495 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4496 } 4497 4498 // return current position of file pointer 4499 jlong os::current_file_offset(int fd) { 4500 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4501 } 4502 4503 // move file pointer to the specified offset 4504 jlong os::seek_to_file_offset(int fd, jlong offset) { 4505 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4506 } 4507 4508 4509 jlong os::lseek(int fd, jlong offset, int whence) { 4510 return (jlong) ::_lseeki64(fd, offset, whence); 4511 } 4512 4513 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4514 OVERLAPPED ov; 4515 DWORD nread; 4516 BOOL result; 4517 4518 ZeroMemory(&ov, sizeof(ov)); 4519 ov.Offset = (DWORD)offset; 4520 ov.OffsetHigh = (DWORD)(offset >> 32); 4521 4522 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4523 4524 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4525 4526 return result ? nread : 0; 4527 } 4528 4529 4530 // This method is a slightly reworked copy of JDK's sysNativePath 4531 // from src/windows/hpi/src/path_md.c 4532 4533 // Convert a pathname to native format. On win32, this involves forcing all 4534 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4535 // sometimes rejects '/') and removing redundant separators. The input path is 4536 // assumed to have been converted into the character encoding used by the local 4537 // system. Because this might be a double-byte encoding, care is taken to 4538 // treat double-byte lead characters correctly. 4539 // 4540 // This procedure modifies the given path in place, as the result is never 4541 // longer than the original. There is no error return; this operation always 4542 // succeeds. 4543 char * os::native_path(char *path) { 4544 char *src = path, *dst = path, *end = path; 4545 char *colon = NULL; // If a drive specifier is found, this will 4546 // point to the colon following the drive letter 4547 4548 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4549 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4550 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4551 4552 // Check for leading separators 4553 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4554 while (isfilesep(*src)) { 4555 src++; 4556 } 4557 4558 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4559 // Remove leading separators if followed by drive specifier. This 4560 // hack is necessary to support file URLs containing drive 4561 // specifiers (e.g., "file://c:/path"). As a side effect, 4562 // "/c:/path" can be used as an alternative to "c:/path". 4563 *dst++ = *src++; 4564 colon = dst; 4565 *dst++ = ':'; 4566 src++; 4567 } else { 4568 src = path; 4569 if (isfilesep(src[0]) && isfilesep(src[1])) { 4570 // UNC pathname: Retain first separator; leave src pointed at 4571 // second separator so that further separators will be collapsed 4572 // into the second separator. The result will be a pathname 4573 // beginning with "\\\\" followed (most likely) by a host name. 4574 src = dst = path + 1; 4575 path[0] = '\\'; // Force first separator to '\\' 4576 } 4577 } 4578 4579 end = dst; 4580 4581 // Remove redundant separators from remainder of path, forcing all 4582 // separators to be '\\' rather than '/'. Also, single byte space 4583 // characters are removed from the end of the path because those 4584 // are not legal ending characters on this operating system. 4585 // 4586 while (*src != '\0') { 4587 if (isfilesep(*src)) { 4588 *dst++ = '\\'; src++; 4589 while (isfilesep(*src)) src++; 4590 if (*src == '\0') { 4591 // Check for trailing separator 4592 end = dst; 4593 if (colon == dst - 2) break; // "z:\\" 4594 if (dst == path + 1) break; // "\\" 4595 if (dst == path + 2 && isfilesep(path[0])) { 4596 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4597 // beginning of a UNC pathname. Even though it is not, by 4598 // itself, a valid UNC pathname, we leave it as is in order 4599 // to be consistent with the path canonicalizer as well 4600 // as the win32 APIs, which treat this case as an invalid 4601 // UNC pathname rather than as an alias for the root 4602 // directory of the current drive. 4603 break; 4604 } 4605 end = --dst; // Path does not denote a root directory, so 4606 // remove trailing separator 4607 break; 4608 } 4609 end = dst; 4610 } else { 4611 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4612 *dst++ = *src++; 4613 if (*src) *dst++ = *src++; 4614 end = dst; 4615 } else { // Copy a single-byte character 4616 char c = *src++; 4617 *dst++ = c; 4618 // Space is not a legal ending character 4619 if (c != ' ') end = dst; 4620 } 4621 } 4622 } 4623 4624 *end = '\0'; 4625 4626 // For "z:", add "." to work around a bug in the C runtime library 4627 if (colon == dst - 1) { 4628 path[2] = '.'; 4629 path[3] = '\0'; 4630 } 4631 4632 return path; 4633 } 4634 4635 // This code is a copy of JDK's sysSetLength 4636 // from src/windows/hpi/src/sys_api_md.c 4637 4638 int os::ftruncate(int fd, jlong length) { 4639 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4640 long high = (long)(length >> 32); 4641 DWORD ret; 4642 4643 if (h == (HANDLE)(-1)) { 4644 return -1; 4645 } 4646 4647 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4648 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4649 return -1; 4650 } 4651 4652 if (::SetEndOfFile(h) == FALSE) { 4653 return -1; 4654 } 4655 4656 return 0; 4657 } 4658 4659 int os::get_fileno(FILE* fp) { 4660 return _fileno(fp); 4661 } 4662 4663 // This code is a copy of JDK's sysSync 4664 // from src/windows/hpi/src/sys_api_md.c 4665 // except for the legacy workaround for a bug in Win 98 4666 4667 int os::fsync(int fd) { 4668 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4669 4670 if ((!::FlushFileBuffers(handle)) && 4671 (GetLastError() != ERROR_ACCESS_DENIED)) { 4672 // from winerror.h 4673 return -1; 4674 } 4675 return 0; 4676 } 4677 4678 static int nonSeekAvailable(int, long *); 4679 static int stdinAvailable(int, long *); 4680 4681 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4682 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4683 4684 // This code is a copy of JDK's sysAvailable 4685 // from src/windows/hpi/src/sys_api_md.c 4686 4687 int os::available(int fd, jlong *bytes) { 4688 jlong cur, end; 4689 struct _stati64 stbuf64; 4690 4691 if (::_fstati64(fd, &stbuf64) >= 0) { 4692 int mode = stbuf64.st_mode; 4693 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4694 int ret; 4695 long lpbytes; 4696 if (fd == 0) { 4697 ret = stdinAvailable(fd, &lpbytes); 4698 } else { 4699 ret = nonSeekAvailable(fd, &lpbytes); 4700 } 4701 (*bytes) = (jlong)(lpbytes); 4702 return ret; 4703 } 4704 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4705 return FALSE; 4706 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4707 return FALSE; 4708 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4709 return FALSE; 4710 } 4711 *bytes = end - cur; 4712 return TRUE; 4713 } else { 4714 return FALSE; 4715 } 4716 } 4717 4718 void os::flockfile(FILE* fp) { 4719 _lock_file(fp); 4720 } 4721 4722 void os::funlockfile(FILE* fp) { 4723 _unlock_file(fp); 4724 } 4725 4726 // This code is a copy of JDK's nonSeekAvailable 4727 // from src/windows/hpi/src/sys_api_md.c 4728 4729 static int nonSeekAvailable(int fd, long *pbytes) { 4730 // This is used for available on non-seekable devices 4731 // (like both named and anonymous pipes, such as pipes 4732 // connected to an exec'd process). 4733 // Standard Input is a special case. 4734 HANDLE han; 4735 4736 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4737 return FALSE; 4738 } 4739 4740 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4741 // PeekNamedPipe fails when at EOF. In that case we 4742 // simply make *pbytes = 0 which is consistent with the 4743 // behavior we get on Solaris when an fd is at EOF. 4744 // The only alternative is to raise an Exception, 4745 // which isn't really warranted. 4746 // 4747 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4748 return FALSE; 4749 } 4750 *pbytes = 0; 4751 } 4752 return TRUE; 4753 } 4754 4755 #define MAX_INPUT_EVENTS 2000 4756 4757 // This code is a copy of JDK's stdinAvailable 4758 // from src/windows/hpi/src/sys_api_md.c 4759 4760 static int stdinAvailable(int fd, long *pbytes) { 4761 HANDLE han; 4762 DWORD numEventsRead = 0; // Number of events read from buffer 4763 DWORD numEvents = 0; // Number of events in buffer 4764 DWORD i = 0; // Loop index 4765 DWORD curLength = 0; // Position marker 4766 DWORD actualLength = 0; // Number of bytes readable 4767 BOOL error = FALSE; // Error holder 4768 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4769 4770 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4771 return FALSE; 4772 } 4773 4774 // Construct an array of input records in the console buffer 4775 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4776 if (error == 0) { 4777 return nonSeekAvailable(fd, pbytes); 4778 } 4779 4780 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4781 if (numEvents > MAX_INPUT_EVENTS) { 4782 numEvents = MAX_INPUT_EVENTS; 4783 } 4784 4785 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4786 if (lpBuffer == NULL) { 4787 return FALSE; 4788 } 4789 4790 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4791 if (error == 0) { 4792 os::free(lpBuffer); 4793 return FALSE; 4794 } 4795 4796 // Examine input records for the number of bytes available 4797 for (i=0; i<numEvents; i++) { 4798 if (lpBuffer[i].EventType == KEY_EVENT) { 4799 4800 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4801 &(lpBuffer[i].Event); 4802 if (keyRecord->bKeyDown == TRUE) { 4803 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4804 curLength++; 4805 if (*keyPressed == '\r') { 4806 actualLength = curLength; 4807 } 4808 } 4809 } 4810 } 4811 4812 if (lpBuffer != NULL) { 4813 os::free(lpBuffer); 4814 } 4815 4816 *pbytes = (long) actualLength; 4817 return TRUE; 4818 } 4819 4820 // Map a block of memory. 4821 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4822 char *addr, size_t bytes, bool read_only, 4823 bool allow_exec) { 4824 HANDLE hFile; 4825 char* base; 4826 4827 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4828 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4829 if (hFile == NULL) { 4830 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4831 return NULL; 4832 } 4833 4834 if (allow_exec) { 4835 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4836 // unless it comes from a PE image (which the shared archive is not.) 4837 // Even VirtualProtect refuses to give execute access to mapped memory 4838 // that was not previously executable. 4839 // 4840 // Instead, stick the executable region in anonymous memory. Yuck. 4841 // Penalty is that ~4 pages will not be shareable - in the future 4842 // we might consider DLLizing the shared archive with a proper PE 4843 // header so that mapping executable + sharing is possible. 4844 4845 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4846 PAGE_READWRITE); 4847 if (base == NULL) { 4848 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4849 CloseHandle(hFile); 4850 return NULL; 4851 } 4852 4853 DWORD bytes_read; 4854 OVERLAPPED overlapped; 4855 overlapped.Offset = (DWORD)file_offset; 4856 overlapped.OffsetHigh = 0; 4857 overlapped.hEvent = NULL; 4858 // ReadFile guarantees that if the return value is true, the requested 4859 // number of bytes were read before returning. 4860 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4861 if (!res) { 4862 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4863 release_memory(base, bytes); 4864 CloseHandle(hFile); 4865 return NULL; 4866 } 4867 } else { 4868 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4869 NULL /* file_name */); 4870 if (hMap == NULL) { 4871 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4872 CloseHandle(hFile); 4873 return NULL; 4874 } 4875 4876 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4877 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4878 (DWORD)bytes, addr); 4879 if (base == NULL) { 4880 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4881 CloseHandle(hMap); 4882 CloseHandle(hFile); 4883 return NULL; 4884 } 4885 4886 if (CloseHandle(hMap) == 0) { 4887 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4888 CloseHandle(hFile); 4889 return base; 4890 } 4891 } 4892 4893 if (allow_exec) { 4894 DWORD old_protect; 4895 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4896 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4897 4898 if (!res) { 4899 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4900 // Don't consider this a hard error, on IA32 even if the 4901 // VirtualProtect fails, we should still be able to execute 4902 CloseHandle(hFile); 4903 return base; 4904 } 4905 } 4906 4907 if (CloseHandle(hFile) == 0) { 4908 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4909 return base; 4910 } 4911 4912 return base; 4913 } 4914 4915 4916 // Remap a block of memory. 4917 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4918 char *addr, size_t bytes, bool read_only, 4919 bool allow_exec) { 4920 // This OS does not allow existing memory maps to be remapped so we 4921 // have to unmap the memory before we remap it. 4922 if (!os::unmap_memory(addr, bytes)) { 4923 return NULL; 4924 } 4925 4926 // There is a very small theoretical window between the unmap_memory() 4927 // call above and the map_memory() call below where a thread in native 4928 // code may be able to access an address that is no longer mapped. 4929 4930 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4931 read_only, allow_exec); 4932 } 4933 4934 4935 // Unmap a block of memory. 4936 // Returns true=success, otherwise false. 4937 4938 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4939 MEMORY_BASIC_INFORMATION mem_info; 4940 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4941 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4942 return false; 4943 } 4944 4945 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4946 // Instead, executable region was allocated using VirtualAlloc(). See 4947 // pd_map_memory() above. 4948 // 4949 // The following flags should match the 'exec_access' flages used for 4950 // VirtualProtect() in pd_map_memory(). 4951 if (mem_info.Protect == PAGE_EXECUTE_READ || 4952 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4953 return pd_release_memory(addr, bytes); 4954 } 4955 4956 BOOL result = UnmapViewOfFile(addr); 4957 if (result == 0) { 4958 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4959 return false; 4960 } 4961 return true; 4962 } 4963 4964 void os::pause() { 4965 char filename[MAX_PATH]; 4966 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4967 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4968 } else { 4969 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4970 } 4971 4972 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4973 if (fd != -1) { 4974 struct stat buf; 4975 ::close(fd); 4976 while (::stat(filename, &buf) == 0) { 4977 Sleep(100); 4978 } 4979 } else { 4980 jio_fprintf(stderr, 4981 "Could not open pause file '%s', continuing immediately.\n", filename); 4982 } 4983 } 4984 4985 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4986 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4987 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4988 4989 os::ThreadCrashProtection::ThreadCrashProtection() { 4990 } 4991 4992 // See the caveats for this class in os_windows.hpp 4993 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4994 // into this method and returns false. If no OS EXCEPTION was raised, returns 4995 // true. 4996 // The callback is supposed to provide the method that should be protected. 4997 // 4998 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4999 5000 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 5001 5002 _protected_thread = Thread::current_or_null(); 5003 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 5004 5005 bool success = true; 5006 __try { 5007 _crash_protection = this; 5008 cb.call(); 5009 } __except(EXCEPTION_EXECUTE_HANDLER) { 5010 // only for protection, nothing to do 5011 success = false; 5012 } 5013 _crash_protection = NULL; 5014 _protected_thread = NULL; 5015 Thread::muxRelease(&_crash_mux); 5016 return success; 5017 } 5018 5019 // An Event wraps a win32 "CreateEvent" kernel handle. 5020 // 5021 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 5022 // 5023 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 5024 // field, and call CloseHandle() on the win32 event handle. Unpark() would 5025 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 5026 // In addition, an unpark() operation might fetch the handle field, but the 5027 // event could recycle between the fetch and the SetEvent() operation. 5028 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 5029 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 5030 // on an stale but recycled handle would be harmless, but in practice this might 5031 // confuse other non-Sun code, so it's not a viable approach. 5032 // 5033 // 2: Once a win32 event handle is associated with an Event, it remains associated 5034 // with the Event. The event handle is never closed. This could be construed 5035 // as handle leakage, but only up to the maximum # of threads that have been extant 5036 // at any one time. This shouldn't be an issue, as windows platforms typically 5037 // permit a process to have hundreds of thousands of open handles. 5038 // 5039 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 5040 // and release unused handles. 5041 // 5042 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 5043 // It's not clear, however, that we wouldn't be trading one type of leak for another. 5044 // 5045 // 5. Use an RCU-like mechanism (Read-Copy Update). 5046 // Or perhaps something similar to Maged Michael's "Hazard pointers". 5047 // 5048 // We use (2). 5049 // 5050 // TODO-FIXME: 5051 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 5052 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 5053 // to recover from (or at least detect) the dreaded Windows 841176 bug. 5054 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 5055 // into a single win32 CreateEvent() handle. 5056 // 5057 // Assumption: 5058 // Only one parker can exist on an event, which is why we allocate 5059 // them per-thread. Multiple unparkers can coexist. 5060 // 5061 // _Event transitions in park() 5062 // -1 => -1 : illegal 5063 // 1 => 0 : pass - return immediately 5064 // 0 => -1 : block; then set _Event to 0 before returning 5065 // 5066 // _Event transitions in unpark() 5067 // 0 => 1 : just return 5068 // 1 => 1 : just return 5069 // -1 => either 0 or 1; must signal target thread 5070 // That is, we can safely transition _Event from -1 to either 5071 // 0 or 1. 5072 // 5073 // _Event serves as a restricted-range semaphore. 5074 // -1 : thread is blocked, i.e. there is a waiter 5075 // 0 : neutral: thread is running or ready, 5076 // could have been signaled after a wait started 5077 // 1 : signaled - thread is running or ready 5078 // 5079 // Another possible encoding of _Event would be with 5080 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5081 // 5082 5083 int os::PlatformEvent::park(jlong Millis) { 5084 // Transitions for _Event: 5085 // -1 => -1 : illegal 5086 // 1 => 0 : pass - return immediately 5087 // 0 => -1 : block; then set _Event to 0 before returning 5088 5089 guarantee(_ParkHandle != NULL , "Invariant"); 5090 guarantee(Millis > 0 , "Invariant"); 5091 5092 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 5093 // the initial park() operation. 5094 // Consider: use atomic decrement instead of CAS-loop 5095 5096 int v; 5097 for (;;) { 5098 v = _Event; 5099 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5100 } 5101 guarantee((v == 0) || (v == 1), "invariant"); 5102 if (v != 0) return OS_OK; 5103 5104 // Do this the hard way by blocking ... 5105 // TODO: consider a brief spin here, gated on the success of recent 5106 // spin attempts by this thread. 5107 // 5108 // We decompose long timeouts into series of shorter timed waits. 5109 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5110 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5111 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5112 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5113 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5114 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5115 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5116 // for the already waited time. This policy does not admit any new outcomes. 5117 // In the future, however, we might want to track the accumulated wait time and 5118 // adjust Millis accordingly if we encounter a spurious wakeup. 5119 5120 const int MAXTIMEOUT = 0x10000000; 5121 DWORD rv = WAIT_TIMEOUT; 5122 while (_Event < 0 && Millis > 0) { 5123 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5124 if (Millis > MAXTIMEOUT) { 5125 prd = MAXTIMEOUT; 5126 } 5127 rv = ::WaitForSingleObject(_ParkHandle, prd); 5128 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5129 if (rv == WAIT_TIMEOUT) { 5130 Millis -= prd; 5131 } 5132 } 5133 v = _Event; 5134 _Event = 0; 5135 // see comment at end of os::PlatformEvent::park() below: 5136 OrderAccess::fence(); 5137 // If we encounter a nearly simultanous timeout expiry and unpark() 5138 // we return OS_OK indicating we awoke via unpark(). 5139 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5140 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5141 } 5142 5143 void os::PlatformEvent::park() { 5144 // Transitions for _Event: 5145 // -1 => -1 : illegal 5146 // 1 => 0 : pass - return immediately 5147 // 0 => -1 : block; then set _Event to 0 before returning 5148 5149 guarantee(_ParkHandle != NULL, "Invariant"); 5150 // Invariant: Only the thread associated with the Event/PlatformEvent 5151 // may call park(). 5152 // Consider: use atomic decrement instead of CAS-loop 5153 int v; 5154 for (;;) { 5155 v = _Event; 5156 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5157 } 5158 guarantee((v == 0) || (v == 1), "invariant"); 5159 if (v != 0) return; 5160 5161 // Do this the hard way by blocking ... 5162 // TODO: consider a brief spin here, gated on the success of recent 5163 // spin attempts by this thread. 5164 while (_Event < 0) { 5165 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5166 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5167 } 5168 5169 // Usually we'll find _Event == 0 at this point, but as 5170 // an optional optimization we clear it, just in case can 5171 // multiple unpark() operations drove _Event up to 1. 5172 _Event = 0; 5173 OrderAccess::fence(); 5174 guarantee(_Event >= 0, "invariant"); 5175 } 5176 5177 void os::PlatformEvent::unpark() { 5178 guarantee(_ParkHandle != NULL, "Invariant"); 5179 5180 // Transitions for _Event: 5181 // 0 => 1 : just return 5182 // 1 => 1 : just return 5183 // -1 => either 0 or 1; must signal target thread 5184 // That is, we can safely transition _Event from -1 to either 5185 // 0 or 1. 5186 // See also: "Semaphores in Plan 9" by Mullender & Cox 5187 // 5188 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5189 // that it will take two back-to-back park() calls for the owning 5190 // thread to block. This has the benefit of forcing a spurious return 5191 // from the first park() call after an unpark() call which will help 5192 // shake out uses of park() and unpark() without condition variables. 5193 5194 if (Atomic::xchg(1, &_Event) >= 0) return; 5195 5196 ::SetEvent(_ParkHandle); 5197 } 5198 5199 5200 // JSR166 5201 // ------------------------------------------------------- 5202 5203 // The Windows implementation of Park is very straightforward: Basic 5204 // operations on Win32 Events turn out to have the right semantics to 5205 // use them directly. We opportunistically resuse the event inherited 5206 // from Monitor. 5207 5208 void Parker::park(bool isAbsolute, jlong time) { 5209 guarantee(_ParkEvent != NULL, "invariant"); 5210 // First, demultiplex/decode time arguments 5211 if (time < 0) { // don't wait 5212 return; 5213 } else if (time == 0 && !isAbsolute) { 5214 time = INFINITE; 5215 } else if (isAbsolute) { 5216 time -= os::javaTimeMillis(); // convert to relative time 5217 if (time <= 0) { // already elapsed 5218 return; 5219 } 5220 } else { // relative 5221 time /= 1000000; // Must coarsen from nanos to millis 5222 if (time == 0) { // Wait for the minimal time unit if zero 5223 time = 1; 5224 } 5225 } 5226 5227 JavaThread* thread = JavaThread::current(); 5228 5229 // Don't wait if interrupted or already triggered 5230 if (Thread::is_interrupted(thread, false) || 5231 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5232 ResetEvent(_ParkEvent); 5233 return; 5234 } else { 5235 ThreadBlockInVM tbivm(thread); 5236 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5237 thread->set_suspend_equivalent(); 5238 5239 WaitForSingleObject(_ParkEvent, time); 5240 ResetEvent(_ParkEvent); 5241 5242 // If externally suspended while waiting, re-suspend 5243 if (thread->handle_special_suspend_equivalent_condition()) { 5244 thread->java_suspend_self(); 5245 } 5246 } 5247 } 5248 5249 void Parker::unpark() { 5250 guarantee(_ParkEvent != NULL, "invariant"); 5251 SetEvent(_ParkEvent); 5252 } 5253 5254 // Run the specified command in a separate process. Return its exit value, 5255 // or -1 on failure (e.g. can't create a new process). 5256 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { 5257 STARTUPINFO si; 5258 PROCESS_INFORMATION pi; 5259 DWORD exit_code; 5260 5261 char * cmd_string; 5262 char * cmd_prefix = "cmd /C "; 5263 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5264 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5265 if (cmd_string == NULL) { 5266 return -1; 5267 } 5268 cmd_string[0] = '\0'; 5269 strcat(cmd_string, cmd_prefix); 5270 strcat(cmd_string, cmd); 5271 5272 // now replace all '\n' with '&' 5273 char * substring = cmd_string; 5274 while ((substring = strchr(substring, '\n')) != NULL) { 5275 substring[0] = '&'; 5276 substring++; 5277 } 5278 memset(&si, 0, sizeof(si)); 5279 si.cb = sizeof(si); 5280 memset(&pi, 0, sizeof(pi)); 5281 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5282 cmd_string, // command line 5283 NULL, // process security attribute 5284 NULL, // thread security attribute 5285 TRUE, // inherits system handles 5286 0, // no creation flags 5287 NULL, // use parent's environment block 5288 NULL, // use parent's starting directory 5289 &si, // (in) startup information 5290 &pi); // (out) process information 5291 5292 if (rslt) { 5293 // Wait until child process exits. 5294 WaitForSingleObject(pi.hProcess, INFINITE); 5295 5296 GetExitCodeProcess(pi.hProcess, &exit_code); 5297 5298 // Close process and thread handles. 5299 CloseHandle(pi.hProcess); 5300 CloseHandle(pi.hThread); 5301 } else { 5302 exit_code = -1; 5303 } 5304 5305 FREE_C_HEAP_ARRAY(char, cmd_string); 5306 return (int)exit_code; 5307 } 5308 5309 bool os::find(address addr, outputStream* st) { 5310 int offset = -1; 5311 bool result = false; 5312 char buf[256]; 5313 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5314 st->print(PTR_FORMAT " ", addr); 5315 if (strlen(buf) < sizeof(buf) - 1) { 5316 char* p = strrchr(buf, '\\'); 5317 if (p) { 5318 st->print("%s", p + 1); 5319 } else { 5320 st->print("%s", buf); 5321 } 5322 } else { 5323 // The library name is probably truncated. Let's omit the library name. 5324 // See also JDK-8147512. 5325 } 5326 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5327 st->print("::%s + 0x%x", buf, offset); 5328 } 5329 st->cr(); 5330 result = true; 5331 } 5332 return result; 5333 } 5334 5335 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5336 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5337 5338 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5339 JavaThread* thread = JavaThread::current(); 5340 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5341 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5342 5343 if (os::is_memory_serialize_page(thread, addr)) { 5344 return EXCEPTION_CONTINUE_EXECUTION; 5345 } 5346 } 5347 5348 return EXCEPTION_CONTINUE_SEARCH; 5349 } 5350 5351 static jint initSock() { 5352 WSADATA wsadata; 5353 5354 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5355 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5356 ::GetLastError()); 5357 return JNI_ERR; 5358 } 5359 return JNI_OK; 5360 } 5361 5362 struct hostent* os::get_host_by_name(char* name) { 5363 return (struct hostent*)gethostbyname(name); 5364 } 5365 5366 int os::socket_close(int fd) { 5367 return ::closesocket(fd); 5368 } 5369 5370 int os::socket(int domain, int type, int protocol) { 5371 return ::socket(domain, type, protocol); 5372 } 5373 5374 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5375 return ::connect(fd, him, len); 5376 } 5377 5378 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5379 return ::recv(fd, buf, (int)nBytes, flags); 5380 } 5381 5382 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5383 return ::send(fd, buf, (int)nBytes, flags); 5384 } 5385 5386 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5387 return ::send(fd, buf, (int)nBytes, flags); 5388 } 5389 5390 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5391 #if defined(IA32) 5392 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5393 #elif defined (AMD64) 5394 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5395 #endif 5396 5397 // returns true if thread could be suspended, 5398 // false otherwise 5399 static bool do_suspend(HANDLE* h) { 5400 if (h != NULL) { 5401 if (SuspendThread(*h) != ~0) { 5402 return true; 5403 } 5404 } 5405 return false; 5406 } 5407 5408 // resume the thread 5409 // calling resume on an active thread is a no-op 5410 static void do_resume(HANDLE* h) { 5411 if (h != NULL) { 5412 ResumeThread(*h); 5413 } 5414 } 5415 5416 // retrieve a suspend/resume context capable handle 5417 // from the tid. Caller validates handle return value. 5418 void get_thread_handle_for_extended_context(HANDLE* h, 5419 OSThread::thread_id_t tid) { 5420 if (h != NULL) { 5421 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5422 } 5423 } 5424 5425 // Thread sampling implementation 5426 // 5427 void os::SuspendedThreadTask::internal_do_task() { 5428 CONTEXT ctxt; 5429 HANDLE h = NULL; 5430 5431 // get context capable handle for thread 5432 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5433 5434 // sanity 5435 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5436 return; 5437 } 5438 5439 // suspend the thread 5440 if (do_suspend(&h)) { 5441 ctxt.ContextFlags = sampling_context_flags; 5442 // get thread context 5443 GetThreadContext(h, &ctxt); 5444 SuspendedThreadTaskContext context(_thread, &ctxt); 5445 // pass context to Thread Sampling impl 5446 do_task(context); 5447 // resume thread 5448 do_resume(&h); 5449 } 5450 5451 // close handle 5452 CloseHandle(h); 5453 } 5454 5455 bool os::start_debugging(char *buf, int buflen) { 5456 int len = (int)strlen(buf); 5457 char *p = &buf[len]; 5458 5459 jio_snprintf(p, buflen-len, 5460 "\n\n" 5461 "Do you want to debug the problem?\n\n" 5462 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5463 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5464 "Otherwise, select 'No' to abort...", 5465 os::current_process_id(), os::current_thread_id()); 5466 5467 bool yes = os::message_box("Unexpected Error", buf); 5468 5469 if (yes) { 5470 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5471 // exception. If VM is running inside a debugger, the debugger will 5472 // catch the exception. Otherwise, the breakpoint exception will reach 5473 // the default windows exception handler, which can spawn a debugger and 5474 // automatically attach to the dying VM. 5475 os::breakpoint(); 5476 yes = false; 5477 } 5478 return yes; 5479 } 5480 5481 void* os::get_default_process_handle() { 5482 return (void*)GetModuleHandle(NULL); 5483 } 5484 5485 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5486 // which is used to find statically linked in agents. 5487 // Additionally for windows, takes into account __stdcall names. 5488 // Parameters: 5489 // sym_name: Symbol in library we are looking for 5490 // lib_name: Name of library to look in, NULL for shared libs. 5491 // is_absolute_path == true if lib_name is absolute path to agent 5492 // such as "C:/a/b/L.dll" 5493 // == false if only the base name of the library is passed in 5494 // such as "L" 5495 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5496 bool is_absolute_path) { 5497 char *agent_entry_name; 5498 size_t len; 5499 size_t name_len; 5500 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5501 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5502 const char *start; 5503 5504 if (lib_name != NULL) { 5505 len = name_len = strlen(lib_name); 5506 if (is_absolute_path) { 5507 // Need to strip path, prefix and suffix 5508 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5509 lib_name = ++start; 5510 } else { 5511 // Need to check for drive prefix 5512 if ((start = strchr(lib_name, ':')) != NULL) { 5513 lib_name = ++start; 5514 } 5515 } 5516 if (len <= (prefix_len + suffix_len)) { 5517 return NULL; 5518 } 5519 lib_name += prefix_len; 5520 name_len = strlen(lib_name) - suffix_len; 5521 } 5522 } 5523 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5524 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5525 if (agent_entry_name == NULL) { 5526 return NULL; 5527 } 5528 if (lib_name != NULL) { 5529 const char *p = strrchr(sym_name, '@'); 5530 if (p != NULL && p != sym_name) { 5531 // sym_name == _Agent_OnLoad@XX 5532 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5533 agent_entry_name[(p-sym_name)] = '\0'; 5534 // agent_entry_name == _Agent_OnLoad 5535 strcat(agent_entry_name, "_"); 5536 strncat(agent_entry_name, lib_name, name_len); 5537 strcat(agent_entry_name, p); 5538 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5539 } else { 5540 strcpy(agent_entry_name, sym_name); 5541 strcat(agent_entry_name, "_"); 5542 strncat(agent_entry_name, lib_name, name_len); 5543 } 5544 } else { 5545 strcpy(agent_entry_name, sym_name); 5546 } 5547 return agent_entry_name; 5548 } 5549 5550 #ifndef PRODUCT 5551 5552 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5553 // contiguous memory block at a particular address. 5554 // The test first tries to find a good approximate address to allocate at by using the same 5555 // method to allocate some memory at any address. The test then tries to allocate memory in 5556 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5557 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5558 // the previously allocated memory is available for allocation. The only actual failure 5559 // that is reported is when the test tries to allocate at a particular location but gets a 5560 // different valid one. A NULL return value at this point is not considered an error but may 5561 // be legitimate. 5562 // If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages. 5563 void TestReserveMemorySpecial_test() { 5564 if (!UseLargePages) { 5565 if (VerboseInternalVMTests) { 5566 tty->print("Skipping test because large pages are disabled"); 5567 } 5568 return; 5569 } 5570 // save current value of globals 5571 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5572 bool old_use_numa_interleaving = UseNUMAInterleaving; 5573 5574 // set globals to make sure we hit the correct code path 5575 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5576 5577 // do an allocation at an address selected by the OS to get a good one. 5578 const size_t large_allocation_size = os::large_page_size() * 4; 5579 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5580 if (result == NULL) { 5581 if (VerboseInternalVMTests) { 5582 tty->print("Failed to allocate control block with size " SIZE_FORMAT ". Skipping remainder of test.", 5583 large_allocation_size); 5584 } 5585 } else { 5586 os::release_memory_special(result, large_allocation_size); 5587 5588 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5589 // we managed to get it once. 5590 const size_t expected_allocation_size = os::large_page_size(); 5591 char* expected_location = result + os::large_page_size(); 5592 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5593 if (actual_location == NULL) { 5594 if (VerboseInternalVMTests) { 5595 tty->print("Failed to allocate any memory at " PTR_FORMAT " size " SIZE_FORMAT ". Skipping remainder of test.", 5596 expected_location, large_allocation_size); 5597 } 5598 } else { 5599 // release memory 5600 os::release_memory_special(actual_location, expected_allocation_size); 5601 // only now check, after releasing any memory to avoid any leaks. 5602 assert(actual_location == expected_location, 5603 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5604 expected_location, expected_allocation_size, actual_location); 5605 } 5606 } 5607 5608 // restore globals 5609 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5610 UseNUMAInterleaving = old_use_numa_interleaving; 5611 } 5612 #endif // PRODUCT 5613 5614 /* 5615 All the defined signal names for Windows. 5616 5617 NOTE that not all of these names are accepted by FindSignal! 5618 5619 For various reasons some of these may be rejected at runtime. 5620 5621 Here are the names currently accepted by a user of sun.misc.Signal with 5622 1.4.1 (ignoring potential interaction with use of chaining, etc): 5623 5624 (LIST TBD) 5625 5626 */ 5627 int os::get_signal_number(const char* name) { 5628 static const struct { 5629 char* name; 5630 int number; 5631 } siglabels [] = 5632 // derived from version 6.0 VC98/include/signal.h 5633 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5634 "FPE", SIGFPE, // floating point exception 5635 "SEGV", SIGSEGV, // segment violation 5636 "INT", SIGINT, // interrupt 5637 "TERM", SIGTERM, // software term signal from kill 5638 "BREAK", SIGBREAK, // Ctrl-Break sequence 5639 "ILL", SIGILL}; // illegal instruction 5640 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5641 if (strcmp(name, siglabels[i].name) == 0) { 5642 return siglabels[i].number; 5643 } 5644 } 5645 return -1; 5646 } 5647 5648 // Fast current thread access 5649 5650 int os::win32::_thread_ptr_offset = 0; 5651 5652 static void call_wrapper_dummy() {} 5653 5654 // We need to call the os_exception_wrapper once so that it sets 5655 // up the offset from FS of the thread pointer. 5656 void os::win32::initialize_thread_ptr_offset() { 5657 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5658 NULL, NULL, NULL, NULL); 5659 }