1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce 26 #define _WIN32_WINNT 0x0600 27 28 // no precompiled headers 29 #include "jvm.h" 30 #include "classfile/classLoader.hpp" 31 #include "classfile/systemDictionary.hpp" 32 #include "classfile/vmSymbols.hpp" 33 #include "code/icBuffer.hpp" 34 #include "code/vtableStubs.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/disassembler.hpp" 37 #include "interpreter/interpreter.hpp" 38 #include "logging/log.hpp" 39 #include "memory/allocation.inline.hpp" 40 #include "memory/filemap.hpp" 41 #include "oops/oop.inline.hpp" 42 #include "os_share_windows.hpp" 43 #include "os_windows.inline.hpp" 44 #include "prims/jniFastGetField.hpp" 45 #include "prims/jvm_misc.hpp" 46 #include "runtime/arguments.hpp" 47 #include "runtime/atomic.hpp" 48 #include "runtime/extendedPC.hpp" 49 #include "runtime/globals.hpp" 50 #include "runtime/interfaceSupport.inline.hpp" 51 #include "runtime/java.hpp" 52 #include "runtime/javaCalls.hpp" 53 #include "runtime/mutexLocker.hpp" 54 #include "runtime/objectMonitor.hpp" 55 #include "runtime/orderAccess.hpp" 56 #include "runtime/osThread.hpp" 57 #include "runtime/perfMemory.hpp" 58 #include "runtime/sharedRuntime.hpp" 59 #include "runtime/statSampler.hpp" 60 #include "runtime/stubRoutines.hpp" 61 #include "runtime/thread.inline.hpp" 62 #include "runtime/threadCritical.hpp" 63 #include "runtime/timer.hpp" 64 #include "runtime/vm_version.hpp" 65 #include "services/attachListener.hpp" 66 #include "services/memTracker.hpp" 67 #include "services/runtimeService.hpp" 68 #include "utilities/align.hpp" 69 #include "utilities/decoder.hpp" 70 #include "utilities/defaultStream.hpp" 71 #include "utilities/events.hpp" 72 #include "utilities/growableArray.hpp" 73 #include "utilities/macros.hpp" 74 #include "utilities/vmError.hpp" 75 #include "symbolengine.hpp" 76 #include "windbghelp.hpp" 77 78 79 #ifdef _DEBUG 80 #include <crtdbg.h> 81 #endif 82 83 84 #include <windows.h> 85 #include <sys/types.h> 86 #include <sys/stat.h> 87 #include <sys/timeb.h> 88 #include <objidl.h> 89 #include <shlobj.h> 90 91 #include <malloc.h> 92 #include <signal.h> 93 #include <direct.h> 94 #include <errno.h> 95 #include <fcntl.h> 96 #include <io.h> 97 #include <process.h> // For _beginthreadex(), _endthreadex() 98 #include <imagehlp.h> // For os::dll_address_to_function_name 99 // for enumerating dll libraries 100 #include <vdmdbg.h> 101 #include <psapi.h> 102 #include <mmsystem.h> 103 #include <winsock2.h> 104 105 // for timer info max values which include all bits 106 #define ALL_64_BITS CONST64(-1) 107 108 // For DLL loading/load error detection 109 // Values of PE COFF 110 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c 111 #define IMAGE_FILE_SIGNATURE_LENGTH 4 112 113 static HANDLE main_process; 114 static HANDLE main_thread; 115 static int main_thread_id; 116 117 static FILETIME process_creation_time; 118 static FILETIME process_exit_time; 119 static FILETIME process_user_time; 120 static FILETIME process_kernel_time; 121 122 #ifdef _M_AMD64 123 #define __CPU__ amd64 124 #else 125 #define __CPU__ i486 126 #endif 127 128 // save DLL module handle, used by GetModuleFileName 129 130 HINSTANCE vm_lib_handle; 131 132 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) { 133 switch (reason) { 134 case DLL_PROCESS_ATTACH: 135 vm_lib_handle = hinst; 136 if (ForceTimeHighResolution) { 137 timeBeginPeriod(1L); 138 } 139 WindowsDbgHelp::pre_initialize(); 140 SymbolEngine::pre_initialize(); 141 break; 142 case DLL_PROCESS_DETACH: 143 if (ForceTimeHighResolution) { 144 timeEndPeriod(1L); 145 } 146 break; 147 default: 148 break; 149 } 150 return true; 151 } 152 153 static inline double fileTimeAsDouble(FILETIME* time) { 154 const double high = (double) ((unsigned int) ~0); 155 const double split = 10000000.0; 156 double result = (time->dwLowDateTime / split) + 157 time->dwHighDateTime * (high/split); 158 return result; 159 } 160 161 // Implementation of os 162 163 bool os::unsetenv(const char* name) { 164 assert(name != NULL, "Null pointer"); 165 return (SetEnvironmentVariable(name, NULL) == TRUE); 166 } 167 168 // No setuid programs under Windows. 169 bool os::have_special_privileges() { 170 return false; 171 } 172 173 174 // This method is a periodic task to check for misbehaving JNI applications 175 // under CheckJNI, we can add any periodic checks here. 176 // For Windows at the moment does nothing 177 void os::run_periodic_checks() { 178 return; 179 } 180 181 // previous UnhandledExceptionFilter, if there is one 182 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL; 183 184 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo); 185 186 void os::init_system_properties_values() { 187 // sysclasspath, java_home, dll_dir 188 { 189 char *home_path; 190 char *dll_path; 191 char *pslash; 192 char *bin = "\\bin"; 193 char home_dir[MAX_PATH + 1]; 194 char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR"); 195 196 if (alt_home_dir != NULL) { 197 strncpy(home_dir, alt_home_dir, MAX_PATH + 1); 198 home_dir[MAX_PATH] = '\0'; 199 } else { 200 os::jvm_path(home_dir, sizeof(home_dir)); 201 // Found the full path to jvm.dll. 202 // Now cut the path to <java_home>/jre if we can. 203 *(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll 204 pslash = strrchr(home_dir, '\\'); 205 if (pslash != NULL) { 206 *pslash = '\0'; // get rid of \{client|server} 207 pslash = strrchr(home_dir, '\\'); 208 if (pslash != NULL) { 209 *pslash = '\0'; // get rid of \bin 210 } 211 } 212 } 213 214 home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal); 215 if (home_path == NULL) { 216 return; 217 } 218 strcpy(home_path, home_dir); 219 Arguments::set_java_home(home_path); 220 FREE_C_HEAP_ARRAY(char, home_path); 221 222 dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1, 223 mtInternal); 224 if (dll_path == NULL) { 225 return; 226 } 227 strcpy(dll_path, home_dir); 228 strcat(dll_path, bin); 229 Arguments::set_dll_dir(dll_path); 230 FREE_C_HEAP_ARRAY(char, dll_path); 231 232 if (!set_boot_path('\\', ';')) { 233 vm_exit_during_initialization("Failed setting boot class path.", NULL); 234 } 235 } 236 237 // library_path 238 #define EXT_DIR "\\lib\\ext" 239 #define BIN_DIR "\\bin" 240 #define PACKAGE_DIR "\\Sun\\Java" 241 { 242 // Win32 library search order (See the documentation for LoadLibrary): 243 // 244 // 1. The directory from which application is loaded. 245 // 2. The system wide Java Extensions directory (Java only) 246 // 3. System directory (GetSystemDirectory) 247 // 4. Windows directory (GetWindowsDirectory) 248 // 5. The PATH environment variable 249 // 6. The current directory 250 251 char *library_path; 252 char tmp[MAX_PATH]; 253 char *path_str = ::getenv("PATH"); 254 255 library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) + 256 sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal); 257 258 library_path[0] = '\0'; 259 260 GetModuleFileName(NULL, tmp, sizeof(tmp)); 261 *(strrchr(tmp, '\\')) = '\0'; 262 strcat(library_path, tmp); 263 264 GetWindowsDirectory(tmp, sizeof(tmp)); 265 strcat(library_path, ";"); 266 strcat(library_path, tmp); 267 strcat(library_path, PACKAGE_DIR BIN_DIR); 268 269 GetSystemDirectory(tmp, sizeof(tmp)); 270 strcat(library_path, ";"); 271 strcat(library_path, tmp); 272 273 GetWindowsDirectory(tmp, sizeof(tmp)); 274 strcat(library_path, ";"); 275 strcat(library_path, tmp); 276 277 if (path_str) { 278 strcat(library_path, ";"); 279 strcat(library_path, path_str); 280 } 281 282 strcat(library_path, ";."); 283 284 Arguments::set_library_path(library_path); 285 FREE_C_HEAP_ARRAY(char, library_path); 286 } 287 288 // Default extensions directory 289 { 290 char path[MAX_PATH]; 291 char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1]; 292 GetWindowsDirectory(path, MAX_PATH); 293 sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR, 294 path, PACKAGE_DIR, EXT_DIR); 295 Arguments::set_ext_dirs(buf); 296 } 297 #undef EXT_DIR 298 #undef BIN_DIR 299 #undef PACKAGE_DIR 300 301 #ifndef _WIN64 302 // set our UnhandledExceptionFilter and save any previous one 303 prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception); 304 #endif 305 306 // Done 307 return; 308 } 309 310 void os::breakpoint() { 311 DebugBreak(); 312 } 313 314 // Invoked from the BREAKPOINT Macro 315 extern "C" void breakpoint() { 316 os::breakpoint(); 317 } 318 319 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP. 320 // So far, this method is only used by Native Memory Tracking, which is 321 // only supported on Windows XP or later. 322 // 323 int os::get_native_stack(address* stack, int frames, int toSkip) { 324 int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL); 325 for (int index = captured; index < frames; index ++) { 326 stack[index] = NULL; 327 } 328 return captured; 329 } 330 331 332 // os::current_stack_base() 333 // 334 // Returns the base of the stack, which is the stack's 335 // starting address. This function must be called 336 // while running on the stack of the thread being queried. 337 338 address os::current_stack_base() { 339 MEMORY_BASIC_INFORMATION minfo; 340 address stack_bottom; 341 size_t stack_size; 342 343 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 344 stack_bottom = (address)minfo.AllocationBase; 345 stack_size = minfo.RegionSize; 346 347 // Add up the sizes of all the regions with the same 348 // AllocationBase. 349 while (1) { 350 VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo)); 351 if (stack_bottom == (address)minfo.AllocationBase) { 352 stack_size += minfo.RegionSize; 353 } else { 354 break; 355 } 356 } 357 return stack_bottom + stack_size; 358 } 359 360 size_t os::current_stack_size() { 361 size_t sz; 362 MEMORY_BASIC_INFORMATION minfo; 363 VirtualQuery(&minfo, &minfo, sizeof(minfo)); 364 sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase; 365 return sz; 366 } 367 368 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { 369 MEMORY_BASIC_INFORMATION minfo; 370 committed_start = NULL; 371 committed_size = 0; 372 address top = start + size; 373 const address start_addr = start; 374 while (start < top) { 375 VirtualQuery(start, &minfo, sizeof(minfo)); 376 if ((minfo.State & MEM_COMMIT) == 0) { // not committed 377 if (committed_start != NULL) { 378 break; 379 } 380 } else { // committed 381 if (committed_start == NULL) { 382 committed_start = start; 383 } 384 size_t offset = start - (address)minfo.BaseAddress; 385 committed_size += minfo.RegionSize - offset; 386 } 387 start = (address)minfo.BaseAddress + minfo.RegionSize; 388 } 389 390 if (committed_start == NULL) { 391 assert(committed_size == 0, "Sanity"); 392 return false; 393 } else { 394 assert(committed_start >= start_addr && committed_start < top, "Out of range"); 395 // current region may go beyond the limit, trim to the limit 396 committed_size = MIN2(committed_size, size_t(top - committed_start)); 397 return true; 398 } 399 } 400 401 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) { 402 const struct tm* time_struct_ptr = localtime(clock); 403 if (time_struct_ptr != NULL) { 404 *res = *time_struct_ptr; 405 return res; 406 } 407 return NULL; 408 } 409 410 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) { 411 const struct tm* time_struct_ptr = gmtime(clock); 412 if (time_struct_ptr != NULL) { 413 *res = *time_struct_ptr; 414 return res; 415 } 416 return NULL; 417 } 418 419 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo); 420 421 // Thread start routine for all newly created threads 422 static unsigned __stdcall thread_native_entry(Thread* thread) { 423 424 thread->record_stack_base_and_size(); 425 426 // Try to randomize the cache line index of hot stack frames. 427 // This helps when threads of the same stack traces evict each other's 428 // cache lines. The threads can be either from the same JVM instance, or 429 // from different JVM instances. The benefit is especially true for 430 // processors with hyperthreading technology. 431 static int counter = 0; 432 int pid = os::current_process_id(); 433 _alloca(((pid ^ counter++) & 7) * 128); 434 435 thread->initialize_thread_current(); 436 437 OSThread* osthr = thread->osthread(); 438 assert(osthr->get_state() == RUNNABLE, "invalid os thread state"); 439 440 if (UseNUMA) { 441 int lgrp_id = os::numa_get_group_id(); 442 if (lgrp_id != -1) { 443 thread->set_lgrp_id(lgrp_id); 444 } 445 } 446 447 // Diagnostic code to investigate JDK-6573254 448 int res = 30115; // non-java thread 449 if (thread->is_Java_thread()) { 450 res = 20115; // java thread 451 } 452 453 log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); 454 455 // Install a win32 structured exception handler around every thread created 456 // by VM, so VM can generate error dump when an exception occurred in non- 457 // Java thread (e.g. VM thread). 458 __try { 459 thread->call_run(); 460 } __except(topLevelExceptionFilter( 461 (_EXCEPTION_POINTERS*)_exception_info())) { 462 // Nothing to do. 463 } 464 465 // Note: at this point the thread object may already have deleted itself. 466 // Do not dereference it from here on out. 467 468 log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id()); 469 470 // One less thread is executing 471 // When the VMThread gets here, the main thread may have already exited 472 // which frees the CodeHeap containing the Atomic::add code 473 if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) { 474 Atomic::dec(&os::win32::_os_thread_count); 475 } 476 477 // Thread must not return from exit_process_or_thread(), but if it does, 478 // let it proceed to exit normally 479 return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res); 480 } 481 482 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle, 483 int thread_id) { 484 // Allocate the OSThread object 485 OSThread* osthread = new OSThread(NULL, NULL); 486 if (osthread == NULL) return NULL; 487 488 // Initialize support for Java interrupts 489 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 490 if (interrupt_event == NULL) { 491 delete osthread; 492 return NULL; 493 } 494 osthread->set_interrupt_event(interrupt_event); 495 496 // Store info on the Win32 thread into the OSThread 497 osthread->set_thread_handle(thread_handle); 498 osthread->set_thread_id(thread_id); 499 500 if (UseNUMA) { 501 int lgrp_id = os::numa_get_group_id(); 502 if (lgrp_id != -1) { 503 thread->set_lgrp_id(lgrp_id); 504 } 505 } 506 507 // Initial thread state is INITIALIZED, not SUSPENDED 508 osthread->set_state(INITIALIZED); 509 510 return osthread; 511 } 512 513 514 bool os::create_attached_thread(JavaThread* thread) { 515 #ifdef ASSERT 516 thread->verify_not_published(); 517 #endif 518 HANDLE thread_h; 519 if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(), 520 &thread_h, THREAD_ALL_ACCESS, false, 0)) { 521 fatal("DuplicateHandle failed\n"); 522 } 523 OSThread* osthread = create_os_thread(thread, thread_h, 524 (int)current_thread_id()); 525 if (osthread == NULL) { 526 return false; 527 } 528 529 // Initial thread state is RUNNABLE 530 osthread->set_state(RUNNABLE); 531 532 thread->set_osthread(osthread); 533 534 log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").", 535 os::current_thread_id()); 536 537 return true; 538 } 539 540 bool os::create_main_thread(JavaThread* thread) { 541 #ifdef ASSERT 542 thread->verify_not_published(); 543 #endif 544 if (_starting_thread == NULL) { 545 _starting_thread = create_os_thread(thread, main_thread, main_thread_id); 546 if (_starting_thread == NULL) { 547 return false; 548 } 549 } 550 551 // The primordial thread is runnable from the start) 552 _starting_thread->set_state(RUNNABLE); 553 554 thread->set_osthread(_starting_thread); 555 return true; 556 } 557 558 // Helper function to trace _beginthreadex attributes, 559 // similar to os::Posix::describe_pthread_attr() 560 static char* describe_beginthreadex_attributes(char* buf, size_t buflen, 561 size_t stacksize, unsigned initflag) { 562 stringStream ss(buf, buflen); 563 if (stacksize == 0) { 564 ss.print("stacksize: default, "); 565 } else { 566 ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024); 567 } 568 ss.print("flags: "); 569 #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " "); 570 #define ALL(X) \ 571 X(CREATE_SUSPENDED) \ 572 X(STACK_SIZE_PARAM_IS_A_RESERVATION) 573 ALL(PRINT_FLAG) 574 #undef ALL 575 #undef PRINT_FLAG 576 return buf; 577 } 578 579 // Allocate and initialize a new OSThread 580 bool os::create_thread(Thread* thread, ThreadType thr_type, 581 size_t stack_size) { 582 unsigned thread_id; 583 584 // Allocate the OSThread object 585 OSThread* osthread = new OSThread(NULL, NULL); 586 if (osthread == NULL) { 587 return false; 588 } 589 590 // Initialize support for Java interrupts 591 HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL); 592 if (interrupt_event == NULL) { 593 delete osthread; 594 return NULL; 595 } 596 osthread->set_interrupt_event(interrupt_event); 597 osthread->set_interrupted(false); 598 599 thread->set_osthread(osthread); 600 601 if (stack_size == 0) { 602 switch (thr_type) { 603 case os::java_thread: 604 // Java threads use ThreadStackSize which default value can be changed with the flag -Xss 605 if (JavaThread::stack_size_at_create() > 0) { 606 stack_size = JavaThread::stack_size_at_create(); 607 } 608 break; 609 case os::compiler_thread: 610 if (CompilerThreadStackSize > 0) { 611 stack_size = (size_t)(CompilerThreadStackSize * K); 612 break; 613 } // else fall through: 614 // use VMThreadStackSize if CompilerThreadStackSize is not defined 615 case os::vm_thread: 616 case os::pgc_thread: 617 case os::cgc_thread: 618 case os::watcher_thread: 619 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K); 620 break; 621 } 622 } 623 624 // Create the Win32 thread 625 // 626 // Contrary to what MSDN document says, "stack_size" in _beginthreadex() 627 // does not specify stack size. Instead, it specifies the size of 628 // initially committed space. The stack size is determined by 629 // PE header in the executable. If the committed "stack_size" is larger 630 // than default value in the PE header, the stack is rounded up to the 631 // nearest multiple of 1MB. For example if the launcher has default 632 // stack size of 320k, specifying any size less than 320k does not 633 // affect the actual stack size at all, it only affects the initial 634 // commitment. On the other hand, specifying 'stack_size' larger than 635 // default value may cause significant increase in memory usage, because 636 // not only the stack space will be rounded up to MB, but also the 637 // entire space is committed upfront. 638 // 639 // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION' 640 // for CreateThread() that can treat 'stack_size' as stack size. However we 641 // are not supposed to call CreateThread() directly according to MSDN 642 // document because JVM uses C runtime library. The good news is that the 643 // flag appears to work with _beginthredex() as well. 644 645 const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION; 646 HANDLE thread_handle = 647 (HANDLE)_beginthreadex(NULL, 648 (unsigned)stack_size, 649 (unsigned (__stdcall *)(void*)) thread_native_entry, 650 thread, 651 initflag, 652 &thread_id); 653 654 char buf[64]; 655 if (thread_handle != NULL) { 656 log_info(os, thread)("Thread started (tid: %u, attributes: %s)", 657 thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 658 } else { 659 log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.", 660 os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag)); 661 } 662 663 if (thread_handle == NULL) { 664 // Need to clean up stuff we've allocated so far 665 CloseHandle(osthread->interrupt_event()); 666 thread->set_osthread(NULL); 667 delete osthread; 668 return NULL; 669 } 670 671 Atomic::inc(&os::win32::_os_thread_count); 672 673 // Store info on the Win32 thread into the OSThread 674 osthread->set_thread_handle(thread_handle); 675 osthread->set_thread_id(thread_id); 676 677 // Initial thread state is INITIALIZED, not SUSPENDED 678 osthread->set_state(INITIALIZED); 679 680 // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain 681 return true; 682 } 683 684 685 // Free Win32 resources related to the OSThread 686 void os::free_thread(OSThread* osthread) { 687 assert(osthread != NULL, "osthread not set"); 688 689 // We are told to free resources of the argument thread, 690 // but we can only really operate on the current thread. 691 assert(Thread::current()->osthread() == osthread, 692 "os::free_thread but not current thread"); 693 694 CloseHandle(osthread->thread_handle()); 695 CloseHandle(osthread->interrupt_event()); 696 delete osthread; 697 } 698 699 static jlong first_filetime; 700 static jlong initial_performance_count; 701 static jlong performance_frequency; 702 703 704 jlong as_long(LARGE_INTEGER x) { 705 jlong result = 0; // initialization to avoid warning 706 set_high(&result, x.HighPart); 707 set_low(&result, x.LowPart); 708 return result; 709 } 710 711 712 jlong os::elapsed_counter() { 713 LARGE_INTEGER count; 714 QueryPerformanceCounter(&count); 715 return as_long(count) - initial_performance_count; 716 } 717 718 719 jlong os::elapsed_frequency() { 720 return performance_frequency; 721 } 722 723 724 julong os::available_memory() { 725 return win32::available_memory(); 726 } 727 728 julong os::win32::available_memory() { 729 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 730 // value if total memory is larger than 4GB 731 MEMORYSTATUSEX ms; 732 ms.dwLength = sizeof(ms); 733 GlobalMemoryStatusEx(&ms); 734 735 return (julong)ms.ullAvailPhys; 736 } 737 738 julong os::physical_memory() { 739 return win32::physical_memory(); 740 } 741 742 bool os::has_allocatable_memory_limit(julong* limit) { 743 MEMORYSTATUSEX ms; 744 ms.dwLength = sizeof(ms); 745 GlobalMemoryStatusEx(&ms); 746 #ifdef _LP64 747 *limit = (julong)ms.ullAvailVirtual; 748 return true; 749 #else 750 // Limit to 1400m because of the 2gb address space wall 751 *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual); 752 return true; 753 #endif 754 } 755 756 int os::active_processor_count() { 757 // User has overridden the number of active processors 758 if (ActiveProcessorCount > 0) { 759 log_trace(os)("active_processor_count: " 760 "active processor count set by user : %d", 761 ActiveProcessorCount); 762 return ActiveProcessorCount; 763 } 764 765 DWORD_PTR lpProcessAffinityMask = 0; 766 DWORD_PTR lpSystemAffinityMask = 0; 767 int proc_count = processor_count(); 768 if (proc_count <= sizeof(UINT_PTR) * BitsPerByte && 769 GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) { 770 // Nof active processors is number of bits in process affinity mask 771 int bitcount = 0; 772 while (lpProcessAffinityMask != 0) { 773 lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1); 774 bitcount++; 775 } 776 return bitcount; 777 } else { 778 return proc_count; 779 } 780 } 781 782 void os::set_native_thread_name(const char *name) { 783 784 // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx 785 // 786 // Note that unfortunately this only works if the process 787 // is already attached to a debugger; debugger must observe 788 // the exception below to show the correct name. 789 790 // If there is no debugger attached skip raising the exception 791 if (!IsDebuggerPresent()) { 792 return; 793 } 794 795 const DWORD MS_VC_EXCEPTION = 0x406D1388; 796 struct { 797 DWORD dwType; // must be 0x1000 798 LPCSTR szName; // pointer to name (in user addr space) 799 DWORD dwThreadID; // thread ID (-1=caller thread) 800 DWORD dwFlags; // reserved for future use, must be zero 801 } info; 802 803 info.dwType = 0x1000; 804 info.szName = name; 805 info.dwThreadID = -1; 806 info.dwFlags = 0; 807 808 __try { 809 RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info ); 810 } __except(EXCEPTION_EXECUTE_HANDLER) {} 811 } 812 813 bool os::distribute_processes(uint length, uint* distribution) { 814 // Not yet implemented. 815 return false; 816 } 817 818 bool os::bind_to_processor(uint processor_id) { 819 // Not yet implemented. 820 return false; 821 } 822 823 void os::win32::initialize_performance_counter() { 824 LARGE_INTEGER count; 825 QueryPerformanceFrequency(&count); 826 performance_frequency = as_long(count); 827 QueryPerformanceCounter(&count); 828 initial_performance_count = as_long(count); 829 } 830 831 832 double os::elapsedTime() { 833 return (double) elapsed_counter() / (double) elapsed_frequency(); 834 } 835 836 837 // Windows format: 838 // The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601. 839 // Java format: 840 // Java standards require the number of milliseconds since 1/1/1970 841 842 // Constant offset - calculated using offset() 843 static jlong _offset = 116444736000000000; 844 // Fake time counter for reproducible results when debugging 845 static jlong fake_time = 0; 846 847 #ifdef ASSERT 848 // Just to be safe, recalculate the offset in debug mode 849 static jlong _calculated_offset = 0; 850 static int _has_calculated_offset = 0; 851 852 jlong offset() { 853 if (_has_calculated_offset) return _calculated_offset; 854 SYSTEMTIME java_origin; 855 java_origin.wYear = 1970; 856 java_origin.wMonth = 1; 857 java_origin.wDayOfWeek = 0; // ignored 858 java_origin.wDay = 1; 859 java_origin.wHour = 0; 860 java_origin.wMinute = 0; 861 java_origin.wSecond = 0; 862 java_origin.wMilliseconds = 0; 863 FILETIME jot; 864 if (!SystemTimeToFileTime(&java_origin, &jot)) { 865 fatal("Error = %d\nWindows error", GetLastError()); 866 } 867 _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime); 868 _has_calculated_offset = 1; 869 assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal"); 870 return _calculated_offset; 871 } 872 #else 873 jlong offset() { 874 return _offset; 875 } 876 #endif 877 878 jlong windows_to_java_time(FILETIME wt) { 879 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 880 return (a - offset()) / 10000; 881 } 882 883 // Returns time ticks in (10th of micro seconds) 884 jlong windows_to_time_ticks(FILETIME wt) { 885 jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime); 886 return (a - offset()); 887 } 888 889 FILETIME java_to_windows_time(jlong l) { 890 jlong a = (l * 10000) + offset(); 891 FILETIME result; 892 result.dwHighDateTime = high(a); 893 result.dwLowDateTime = low(a); 894 return result; 895 } 896 897 bool os::supports_vtime() { return true; } 898 bool os::enable_vtime() { return false; } 899 bool os::vtime_enabled() { return false; } 900 901 double os::elapsedVTime() { 902 FILETIME created; 903 FILETIME exited; 904 FILETIME kernel; 905 FILETIME user; 906 if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) { 907 // the resolution of windows_to_java_time() should be sufficient (ms) 908 return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS; 909 } else { 910 return elapsedTime(); 911 } 912 } 913 914 jlong os::javaTimeMillis() { 915 if (UseFakeTimers) { 916 return fake_time++; 917 } else { 918 FILETIME wt; 919 GetSystemTimeAsFileTime(&wt); 920 return windows_to_java_time(wt); 921 } 922 } 923 924 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) { 925 FILETIME wt; 926 GetSystemTimeAsFileTime(&wt); 927 jlong ticks = windows_to_time_ticks(wt); // 10th of micros 928 jlong secs = jlong(ticks / 10000000); // 10000 * 1000 929 seconds = secs; 930 nanos = jlong(ticks - (secs*10000000)) * 100; 931 } 932 933 jlong os::javaTimeNanos() { 934 LARGE_INTEGER current_count; 935 QueryPerformanceCounter(¤t_count); 936 double current = as_long(current_count); 937 double freq = performance_frequency; 938 jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC); 939 return time; 940 } 941 942 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) { 943 jlong freq = performance_frequency; 944 if (freq < NANOSECS_PER_SEC) { 945 // the performance counter is 64 bits and we will 946 // be multiplying it -- so no wrap in 64 bits 947 info_ptr->max_value = ALL_64_BITS; 948 } else if (freq > NANOSECS_PER_SEC) { 949 // use the max value the counter can reach to 950 // determine the max value which could be returned 951 julong max_counter = (julong)ALL_64_BITS; 952 info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC)); 953 } else { 954 // the performance counter is 64 bits and we will 955 // be using it directly -- so no wrap in 64 bits 956 info_ptr->max_value = ALL_64_BITS; 957 } 958 959 // using a counter, so no skipping 960 info_ptr->may_skip_backward = false; 961 info_ptr->may_skip_forward = false; 962 963 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time 964 } 965 966 char* os::local_time_string(char *buf, size_t buflen) { 967 SYSTEMTIME st; 968 GetLocalTime(&st); 969 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d", 970 st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond); 971 return buf; 972 } 973 974 bool os::getTimesSecs(double* process_real_time, 975 double* process_user_time, 976 double* process_system_time) { 977 HANDLE h_process = GetCurrentProcess(); 978 FILETIME create_time, exit_time, kernel_time, user_time; 979 BOOL result = GetProcessTimes(h_process, 980 &create_time, 981 &exit_time, 982 &kernel_time, 983 &user_time); 984 if (result != 0) { 985 FILETIME wt; 986 GetSystemTimeAsFileTime(&wt); 987 jlong rtc_millis = windows_to_java_time(wt); 988 *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS); 989 *process_user_time = 990 (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS); 991 *process_system_time = 992 (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS); 993 return true; 994 } else { 995 return false; 996 } 997 } 998 999 void os::shutdown() { 1000 // allow PerfMemory to attempt cleanup of any persistent resources 1001 perfMemory_exit(); 1002 1003 // flush buffered output, finish log files 1004 ostream_abort(); 1005 1006 // Check for abort hook 1007 abort_hook_t abort_hook = Arguments::abort_hook(); 1008 if (abort_hook != NULL) { 1009 abort_hook(); 1010 } 1011 } 1012 1013 1014 static HANDLE dumpFile = NULL; 1015 1016 // Check if dump file can be created. 1017 void os::check_dump_limit(char* buffer, size_t buffsz) { 1018 bool status = true; 1019 if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) { 1020 jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line"); 1021 status = false; 1022 } 1023 1024 #ifndef ASSERT 1025 if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) { 1026 jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows"); 1027 status = false; 1028 } 1029 #endif 1030 1031 if (status) { 1032 const char* cwd = get_current_directory(NULL, 0); 1033 int pid = current_process_id(); 1034 if (cwd != NULL) { 1035 jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid); 1036 } else { 1037 jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid); 1038 } 1039 1040 if (dumpFile == NULL && 1041 (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL)) 1042 == INVALID_HANDLE_VALUE) { 1043 jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError()); 1044 status = false; 1045 } 1046 } 1047 VMError::record_coredump_status(buffer, status); 1048 } 1049 1050 void os::abort(bool dump_core, void* siginfo, const void* context) { 1051 EXCEPTION_POINTERS ep; 1052 MINIDUMP_EXCEPTION_INFORMATION mei; 1053 MINIDUMP_EXCEPTION_INFORMATION* pmei; 1054 1055 HANDLE hProcess = GetCurrentProcess(); 1056 DWORD processId = GetCurrentProcessId(); 1057 MINIDUMP_TYPE dumpType; 1058 1059 shutdown(); 1060 if (!dump_core || dumpFile == NULL) { 1061 if (dumpFile != NULL) { 1062 CloseHandle(dumpFile); 1063 } 1064 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1065 } 1066 1067 dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData | 1068 MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules); 1069 1070 if (siginfo != NULL && context != NULL) { 1071 ep.ContextRecord = (PCONTEXT) context; 1072 ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo; 1073 1074 mei.ThreadId = GetCurrentThreadId(); 1075 mei.ExceptionPointers = &ep; 1076 pmei = &mei; 1077 } else { 1078 pmei = NULL; 1079 } 1080 1081 // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all 1082 // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then. 1083 if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) && 1084 !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) { 1085 jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError()); 1086 } 1087 CloseHandle(dumpFile); 1088 win32::exit_process_or_thread(win32::EPT_PROCESS, 1); 1089 } 1090 1091 // Die immediately, no exit hook, no abort hook, no cleanup. 1092 void os::die() { 1093 win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1); 1094 } 1095 1096 // Directory routines copied from src/win32/native/java/io/dirent_md.c 1097 // * dirent_md.c 1.15 00/02/02 1098 // 1099 // The declarations for DIR and struct dirent are in jvm_win32.h. 1100 1101 // Caller must have already run dirname through JVM_NativePath, which removes 1102 // duplicate slashes and converts all instances of '/' into '\\'. 1103 1104 DIR * os::opendir(const char *dirname) { 1105 assert(dirname != NULL, "just checking"); // hotspot change 1106 DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal); 1107 DWORD fattr; // hotspot change 1108 char alt_dirname[4] = { 0, 0, 0, 0 }; 1109 1110 if (dirp == 0) { 1111 errno = ENOMEM; 1112 return 0; 1113 } 1114 1115 // Win32 accepts "\" in its POSIX stat(), but refuses to treat it 1116 // as a directory in FindFirstFile(). We detect this case here and 1117 // prepend the current drive name. 1118 // 1119 if (dirname[1] == '\0' && dirname[0] == '\\') { 1120 alt_dirname[0] = _getdrive() + 'A' - 1; 1121 alt_dirname[1] = ':'; 1122 alt_dirname[2] = '\\'; 1123 alt_dirname[3] = '\0'; 1124 dirname = alt_dirname; 1125 } 1126 1127 dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal); 1128 if (dirp->path == 0) { 1129 free(dirp); 1130 errno = ENOMEM; 1131 return 0; 1132 } 1133 strcpy(dirp->path, dirname); 1134 1135 fattr = GetFileAttributes(dirp->path); 1136 if (fattr == 0xffffffff) { 1137 free(dirp->path); 1138 free(dirp); 1139 errno = ENOENT; 1140 return 0; 1141 } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) { 1142 free(dirp->path); 1143 free(dirp); 1144 errno = ENOTDIR; 1145 return 0; 1146 } 1147 1148 // Append "*.*", or possibly "\\*.*", to path 1149 if (dirp->path[1] == ':' && 1150 (dirp->path[2] == '\0' || 1151 (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) { 1152 // No '\\' needed for cases like "Z:" or "Z:\" 1153 strcat(dirp->path, "*.*"); 1154 } else { 1155 strcat(dirp->path, "\\*.*"); 1156 } 1157 1158 dirp->handle = FindFirstFile(dirp->path, &dirp->find_data); 1159 if (dirp->handle == INVALID_HANDLE_VALUE) { 1160 if (GetLastError() != ERROR_FILE_NOT_FOUND) { 1161 free(dirp->path); 1162 free(dirp); 1163 errno = EACCES; 1164 return 0; 1165 } 1166 } 1167 return dirp; 1168 } 1169 1170 struct dirent * os::readdir(DIR *dirp) { 1171 assert(dirp != NULL, "just checking"); // hotspot change 1172 if (dirp->handle == INVALID_HANDLE_VALUE) { 1173 return NULL; 1174 } 1175 1176 strcpy(dirp->dirent.d_name, dirp->find_data.cFileName); 1177 1178 if (!FindNextFile(dirp->handle, &dirp->find_data)) { 1179 if (GetLastError() == ERROR_INVALID_HANDLE) { 1180 errno = EBADF; 1181 return NULL; 1182 } 1183 FindClose(dirp->handle); 1184 dirp->handle = INVALID_HANDLE_VALUE; 1185 } 1186 1187 return &dirp->dirent; 1188 } 1189 1190 int os::closedir(DIR *dirp) { 1191 assert(dirp != NULL, "just checking"); // hotspot change 1192 if (dirp->handle != INVALID_HANDLE_VALUE) { 1193 if (!FindClose(dirp->handle)) { 1194 errno = EBADF; 1195 return -1; 1196 } 1197 dirp->handle = INVALID_HANDLE_VALUE; 1198 } 1199 free(dirp->path); 1200 free(dirp); 1201 return 0; 1202 } 1203 1204 // This must be hard coded because it's the system's temporary 1205 // directory not the java application's temp directory, ala java.io.tmpdir. 1206 const char* os::get_temp_directory() { 1207 static char path_buf[MAX_PATH]; 1208 if (GetTempPath(MAX_PATH, path_buf) > 0) { 1209 return path_buf; 1210 } else { 1211 path_buf[0] = '\0'; 1212 return path_buf; 1213 } 1214 } 1215 1216 // Needs to be in os specific directory because windows requires another 1217 // header file <direct.h> 1218 const char* os::get_current_directory(char *buf, size_t buflen) { 1219 int n = static_cast<int>(buflen); 1220 if (buflen > INT_MAX) n = INT_MAX; 1221 return _getcwd(buf, n); 1222 } 1223 1224 //----------------------------------------------------------- 1225 // Helper functions for fatal error handler 1226 #ifdef _WIN64 1227 // Helper routine which returns true if address in 1228 // within the NTDLL address space. 1229 // 1230 static bool _addr_in_ntdll(address addr) { 1231 HMODULE hmod; 1232 MODULEINFO minfo; 1233 1234 hmod = GetModuleHandle("NTDLL.DLL"); 1235 if (hmod == NULL) return false; 1236 if (!GetModuleInformation(GetCurrentProcess(), hmod, 1237 &minfo, sizeof(MODULEINFO))) { 1238 return false; 1239 } 1240 1241 if ((addr >= minfo.lpBaseOfDll) && 1242 (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) { 1243 return true; 1244 } else { 1245 return false; 1246 } 1247 } 1248 #endif 1249 1250 struct _modinfo { 1251 address addr; 1252 char* full_path; // point to a char buffer 1253 int buflen; // size of the buffer 1254 address base_addr; 1255 }; 1256 1257 static int _locate_module_by_addr(const char * mod_fname, address base_addr, 1258 address top_address, void * param) { 1259 struct _modinfo *pmod = (struct _modinfo *)param; 1260 if (!pmod) return -1; 1261 1262 if (base_addr <= pmod->addr && 1263 top_address > pmod->addr) { 1264 // if a buffer is provided, copy path name to the buffer 1265 if (pmod->full_path) { 1266 jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname); 1267 } 1268 pmod->base_addr = base_addr; 1269 return 1; 1270 } 1271 return 0; 1272 } 1273 1274 bool os::dll_address_to_library_name(address addr, char* buf, 1275 int buflen, int* offset) { 1276 // buf is not optional, but offset is optional 1277 assert(buf != NULL, "sanity check"); 1278 1279 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always 1280 // return the full path to the DLL file, sometimes it returns path 1281 // to the corresponding PDB file (debug info); sometimes it only 1282 // returns partial path, which makes life painful. 1283 1284 struct _modinfo mi; 1285 mi.addr = addr; 1286 mi.full_path = buf; 1287 mi.buflen = buflen; 1288 if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) { 1289 // buf already contains path name 1290 if (offset) *offset = addr - mi.base_addr; 1291 return true; 1292 } 1293 1294 buf[0] = '\0'; 1295 if (offset) *offset = -1; 1296 return false; 1297 } 1298 1299 bool os::dll_address_to_function_name(address addr, char *buf, 1300 int buflen, int *offset, 1301 bool demangle) { 1302 // buf is not optional, but offset is optional 1303 assert(buf != NULL, "sanity check"); 1304 1305 if (Decoder::decode(addr, buf, buflen, offset, demangle)) { 1306 return true; 1307 } 1308 if (offset != NULL) *offset = -1; 1309 buf[0] = '\0'; 1310 return false; 1311 } 1312 1313 // save the start and end address of jvm.dll into param[0] and param[1] 1314 static int _locate_jvm_dll(const char* mod_fname, address base_addr, 1315 address top_address, void * param) { 1316 if (!param) return -1; 1317 1318 if (base_addr <= (address)_locate_jvm_dll && 1319 top_address > (address)_locate_jvm_dll) { 1320 ((address*)param)[0] = base_addr; 1321 ((address*)param)[1] = top_address; 1322 return 1; 1323 } 1324 return 0; 1325 } 1326 1327 address vm_lib_location[2]; // start and end address of jvm.dll 1328 1329 // check if addr is inside jvm.dll 1330 bool os::address_is_in_vm(address addr) { 1331 if (!vm_lib_location[0] || !vm_lib_location[1]) { 1332 if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) { 1333 assert(false, "Can't find jvm module."); 1334 return false; 1335 } 1336 } 1337 1338 return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]); 1339 } 1340 1341 // print module info; param is outputStream* 1342 static int _print_module(const char* fname, address base_address, 1343 address top_address, void* param) { 1344 if (!param) return -1; 1345 1346 outputStream* st = (outputStream*)param; 1347 1348 st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname); 1349 return 0; 1350 } 1351 1352 // Loads .dll/.so and 1353 // in case of error it checks if .dll/.so was built for the 1354 // same architecture as Hotspot is running on 1355 void * os::dll_load(const char *name, char *ebuf, int ebuflen) { 1356 void * result = LoadLibrary(name); 1357 if (result != NULL) { 1358 // Recalculate pdb search path if a DLL was loaded successfully. 1359 SymbolEngine::recalc_search_path(); 1360 return result; 1361 } 1362 1363 DWORD errcode = GetLastError(); 1364 if (errcode == ERROR_MOD_NOT_FOUND) { 1365 strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1); 1366 ebuf[ebuflen - 1] = '\0'; 1367 return NULL; 1368 } 1369 1370 // Parsing dll below 1371 // If we can read dll-info and find that dll was built 1372 // for an architecture other than Hotspot is running in 1373 // - then print to buffer "DLL was built for a different architecture" 1374 // else call os::lasterror to obtain system error message 1375 1376 // Read system error message into ebuf 1377 // It may or may not be overwritten below (in the for loop and just above) 1378 lasterror(ebuf, (size_t) ebuflen); 1379 ebuf[ebuflen - 1] = '\0'; 1380 int fd = ::open(name, O_RDONLY | O_BINARY, 0); 1381 if (fd < 0) { 1382 return NULL; 1383 } 1384 1385 uint32_t signature_offset; 1386 uint16_t lib_arch = 0; 1387 bool failed_to_get_lib_arch = 1388 ( // Go to position 3c in the dll 1389 (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0) 1390 || 1391 // Read location of signature 1392 (sizeof(signature_offset) != 1393 (os::read(fd, (void*)&signature_offset, sizeof(signature_offset)))) 1394 || 1395 // Go to COFF File Header in dll 1396 // that is located after "signature" (4 bytes long) 1397 (os::seek_to_file_offset(fd, 1398 signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0) 1399 || 1400 // Read field that contains code of architecture 1401 // that dll was built for 1402 (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch)))) 1403 ); 1404 1405 ::close(fd); 1406 if (failed_to_get_lib_arch) { 1407 // file i/o error - report os::lasterror(...) msg 1408 return NULL; 1409 } 1410 1411 typedef struct { 1412 uint16_t arch_code; 1413 char* arch_name; 1414 } arch_t; 1415 1416 static const arch_t arch_array[] = { 1417 {IMAGE_FILE_MACHINE_I386, (char*)"IA 32"}, 1418 {IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"} 1419 }; 1420 #if (defined _M_AMD64) 1421 static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64; 1422 #elif (defined _M_IX86) 1423 static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386; 1424 #else 1425 #error Method os::dll_load requires that one of following \ 1426 is defined :_M_AMD64 or _M_IX86 1427 #endif 1428 1429 1430 // Obtain a string for printf operation 1431 // lib_arch_str shall contain string what platform this .dll was built for 1432 // running_arch_str shall string contain what platform Hotspot was built for 1433 char *running_arch_str = NULL, *lib_arch_str = NULL; 1434 for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) { 1435 if (lib_arch == arch_array[i].arch_code) { 1436 lib_arch_str = arch_array[i].arch_name; 1437 } 1438 if (running_arch == arch_array[i].arch_code) { 1439 running_arch_str = arch_array[i].arch_name; 1440 } 1441 } 1442 1443 assert(running_arch_str, 1444 "Didn't find running architecture code in arch_array"); 1445 1446 // If the architecture is right 1447 // but some other error took place - report os::lasterror(...) msg 1448 if (lib_arch == running_arch) { 1449 return NULL; 1450 } 1451 1452 if (lib_arch_str != NULL) { 1453 ::_snprintf(ebuf, ebuflen - 1, 1454 "Can't load %s-bit .dll on a %s-bit platform", 1455 lib_arch_str, running_arch_str); 1456 } else { 1457 // don't know what architecture this dll was build for 1458 ::_snprintf(ebuf, ebuflen - 1, 1459 "Can't load this .dll (machine code=0x%x) on a %s-bit platform", 1460 lib_arch, running_arch_str); 1461 } 1462 1463 return NULL; 1464 } 1465 1466 void os::print_dll_info(outputStream *st) { 1467 st->print_cr("Dynamic libraries:"); 1468 get_loaded_modules_info(_print_module, (void *)st); 1469 } 1470 1471 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) { 1472 HANDLE hProcess; 1473 1474 # define MAX_NUM_MODULES 128 1475 HMODULE modules[MAX_NUM_MODULES]; 1476 static char filename[MAX_PATH]; 1477 int result = 0; 1478 1479 int pid = os::current_process_id(); 1480 hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 1481 FALSE, pid); 1482 if (hProcess == NULL) return 0; 1483 1484 DWORD size_needed; 1485 if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) { 1486 CloseHandle(hProcess); 1487 return 0; 1488 } 1489 1490 // number of modules that are currently loaded 1491 int num_modules = size_needed / sizeof(HMODULE); 1492 1493 for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) { 1494 // Get Full pathname: 1495 if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) { 1496 filename[0] = '\0'; 1497 } 1498 1499 MODULEINFO modinfo; 1500 if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) { 1501 modinfo.lpBaseOfDll = NULL; 1502 modinfo.SizeOfImage = 0; 1503 } 1504 1505 // Invoke callback function 1506 result = callback(filename, (address)modinfo.lpBaseOfDll, 1507 (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param); 1508 if (result) break; 1509 } 1510 1511 CloseHandle(hProcess); 1512 return result; 1513 } 1514 1515 bool os::get_host_name(char* buf, size_t buflen) { 1516 DWORD size = (DWORD)buflen; 1517 return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE); 1518 } 1519 1520 void os::get_summary_os_info(char* buf, size_t buflen) { 1521 stringStream sst(buf, buflen); 1522 os::win32::print_windows_version(&sst); 1523 // chop off newline character 1524 char* nl = strchr(buf, '\n'); 1525 if (nl != NULL) *nl = '\0'; 1526 } 1527 1528 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) { 1529 #if _MSC_VER >= 1900 1530 // Starting with Visual Studio 2015, vsnprint is C99 compliant. 1531 int result = ::vsnprintf(buf, len, fmt, args); 1532 // If an encoding error occurred (result < 0) then it's not clear 1533 // whether the buffer is NUL terminated, so ensure it is. 1534 if ((result < 0) && (len > 0)) { 1535 buf[len - 1] = '\0'; 1536 } 1537 return result; 1538 #else 1539 // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use 1540 // _vsnprintf, whose behavior seems to be *mostly* consistent across 1541 // versions. However, when len == 0, avoid _vsnprintf too, and just 1542 // go straight to _vscprintf. The output is going to be truncated in 1543 // that case, except in the unusual case of empty output. More 1544 // importantly, the documentation for various versions of Visual Studio 1545 // are inconsistent about the behavior of _vsnprintf when len == 0, 1546 // including it possibly being an error. 1547 int result = -1; 1548 if (len > 0) { 1549 result = _vsnprintf(buf, len, fmt, args); 1550 // If output (including NUL terminator) is truncated, the buffer 1551 // won't be NUL terminated. Add the trailing NUL specified by C99. 1552 if ((result < 0) || ((size_t)result >= len)) { 1553 buf[len - 1] = '\0'; 1554 } 1555 } 1556 if (result < 0) { 1557 result = _vscprintf(fmt, args); 1558 } 1559 return result; 1560 #endif // _MSC_VER dispatch 1561 } 1562 1563 static inline time_t get_mtime(const char* filename) { 1564 struct stat st; 1565 int ret = os::stat(filename, &st); 1566 assert(ret == 0, "failed to stat() file '%s': %s", filename, strerror(errno)); 1567 return st.st_mtime; 1568 } 1569 1570 int os::compare_file_modified_times(const char* file1, const char* file2) { 1571 time_t t1 = get_mtime(file1); 1572 time_t t2 = get_mtime(file2); 1573 return t1 - t2; 1574 } 1575 1576 void os::print_os_info_brief(outputStream* st) { 1577 os::print_os_info(st); 1578 } 1579 1580 void os::print_os_info(outputStream* st) { 1581 #ifdef ASSERT 1582 char buffer[1024]; 1583 st->print("HostName: "); 1584 if (get_host_name(buffer, sizeof(buffer))) { 1585 st->print("%s ", buffer); 1586 } else { 1587 st->print("N/A "); 1588 } 1589 #endif 1590 st->print("OS:"); 1591 os::win32::print_windows_version(st); 1592 } 1593 1594 void os::win32::print_windows_version(outputStream* st) { 1595 OSVERSIONINFOEX osvi; 1596 VS_FIXEDFILEINFO *file_info; 1597 TCHAR kernel32_path[MAX_PATH]; 1598 UINT len, ret; 1599 1600 // Use the GetVersionEx information to see if we're on a server or 1601 // workstation edition of Windows. Starting with Windows 8.1 we can't 1602 // trust the OS version information returned by this API. 1603 ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX)); 1604 osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 1605 if (!GetVersionEx((OSVERSIONINFO *)&osvi)) { 1606 st->print_cr("Call to GetVersionEx failed"); 1607 return; 1608 } 1609 bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION); 1610 1611 // Get the full path to \Windows\System32\kernel32.dll and use that for 1612 // determining what version of Windows we're running on. 1613 len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1; 1614 ret = GetSystemDirectory(kernel32_path, len); 1615 if (ret == 0 || ret > len) { 1616 st->print_cr("Call to GetSystemDirectory failed"); 1617 return; 1618 } 1619 strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret); 1620 1621 DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL); 1622 if (version_size == 0) { 1623 st->print_cr("Call to GetFileVersionInfoSize failed"); 1624 return; 1625 } 1626 1627 LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal); 1628 if (version_info == NULL) { 1629 st->print_cr("Failed to allocate version_info"); 1630 return; 1631 } 1632 1633 if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) { 1634 os::free(version_info); 1635 st->print_cr("Call to GetFileVersionInfo failed"); 1636 return; 1637 } 1638 1639 if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) { 1640 os::free(version_info); 1641 st->print_cr("Call to VerQueryValue failed"); 1642 return; 1643 } 1644 1645 int major_version = HIWORD(file_info->dwProductVersionMS); 1646 int minor_version = LOWORD(file_info->dwProductVersionMS); 1647 int build_number = HIWORD(file_info->dwProductVersionLS); 1648 int build_minor = LOWORD(file_info->dwProductVersionLS); 1649 int os_vers = major_version * 1000 + minor_version; 1650 os::free(version_info); 1651 1652 st->print(" Windows "); 1653 switch (os_vers) { 1654 1655 case 6000: 1656 if (is_workstation) { 1657 st->print("Vista"); 1658 } else { 1659 st->print("Server 2008"); 1660 } 1661 break; 1662 1663 case 6001: 1664 if (is_workstation) { 1665 st->print("7"); 1666 } else { 1667 st->print("Server 2008 R2"); 1668 } 1669 break; 1670 1671 case 6002: 1672 if (is_workstation) { 1673 st->print("8"); 1674 } else { 1675 st->print("Server 2012"); 1676 } 1677 break; 1678 1679 case 6003: 1680 if (is_workstation) { 1681 st->print("8.1"); 1682 } else { 1683 st->print("Server 2012 R2"); 1684 } 1685 break; 1686 1687 case 10000: 1688 if (is_workstation) { 1689 st->print("10"); 1690 } else { 1691 st->print("Server 2016"); 1692 } 1693 break; 1694 1695 default: 1696 // Unrecognized windows, print out its major and minor versions 1697 st->print("%d.%d", major_version, minor_version); 1698 break; 1699 } 1700 1701 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 1702 // find out whether we are running on 64 bit processor or not 1703 SYSTEM_INFO si; 1704 ZeroMemory(&si, sizeof(SYSTEM_INFO)); 1705 GetNativeSystemInfo(&si); 1706 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) { 1707 st->print(" , 64 bit"); 1708 } 1709 1710 st->print(" Build %d", build_number); 1711 st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor); 1712 st->cr(); 1713 } 1714 1715 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) { 1716 // Nothing to do for now. 1717 } 1718 1719 void os::get_summary_cpu_info(char* buf, size_t buflen) { 1720 HKEY key; 1721 DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE, 1722 "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key); 1723 if (status == ERROR_SUCCESS) { 1724 DWORD size = (DWORD)buflen; 1725 status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size); 1726 if (status != ERROR_SUCCESS) { 1727 strncpy(buf, "## __CPU__", buflen); 1728 } 1729 RegCloseKey(key); 1730 } else { 1731 // Put generic cpu info to return 1732 strncpy(buf, "## __CPU__", buflen); 1733 } 1734 } 1735 1736 void os::print_memory_info(outputStream* st) { 1737 st->print("Memory:"); 1738 st->print(" %dk page", os::vm_page_size()>>10); 1739 1740 // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect 1741 // value if total memory is larger than 4GB 1742 MEMORYSTATUSEX ms; 1743 ms.dwLength = sizeof(ms); 1744 int r1 = GlobalMemoryStatusEx(&ms); 1745 1746 if (r1 != 0) { 1747 st->print(", system-wide physical " INT64_FORMAT "M ", 1748 (int64_t) ms.ullTotalPhys >> 20); 1749 st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20); 1750 1751 st->print("TotalPageFile size " INT64_FORMAT "M ", 1752 (int64_t) ms.ullTotalPageFile >> 20); 1753 st->print("(AvailPageFile size " INT64_FORMAT "M)", 1754 (int64_t) ms.ullAvailPageFile >> 20); 1755 1756 // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders) 1757 #if defined(_M_IX86) 1758 st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ", 1759 (int64_t) ms.ullTotalVirtual >> 20); 1760 st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20); 1761 #endif 1762 } else { 1763 st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values."); 1764 } 1765 1766 // extended memory statistics for a process 1767 PROCESS_MEMORY_COUNTERS_EX pmex; 1768 ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX)); 1769 pmex.cb = sizeof(pmex); 1770 int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex)); 1771 1772 if (r2 != 0) { 1773 st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ", 1774 (int64_t) pmex.WorkingSetSize >> 20); 1775 st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20); 1776 1777 st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ", 1778 (int64_t) pmex.PrivateUsage >> 20); 1779 st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20); 1780 } else { 1781 st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values."); 1782 } 1783 1784 st->cr(); 1785 } 1786 1787 void os::print_siginfo(outputStream *st, const void* siginfo) { 1788 const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo; 1789 st->print("siginfo:"); 1790 1791 char tmp[64]; 1792 if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) { 1793 strcpy(tmp, "EXCEPTION_??"); 1794 } 1795 st->print(" %s (0x%x)", tmp, er->ExceptionCode); 1796 1797 if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || 1798 er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) && 1799 er->NumberParameters >= 2) { 1800 switch (er->ExceptionInformation[0]) { 1801 case 0: st->print(", reading address"); break; 1802 case 1: st->print(", writing address"); break; 1803 case 8: st->print(", data execution prevention violation at address"); break; 1804 default: st->print(", ExceptionInformation=" INTPTR_FORMAT, 1805 er->ExceptionInformation[0]); 1806 } 1807 st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]); 1808 } else { 1809 int num = er->NumberParameters; 1810 if (num > 0) { 1811 st->print(", ExceptionInformation="); 1812 for (int i = 0; i < num; i++) { 1813 st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]); 1814 } 1815 } 1816 } 1817 st->cr(); 1818 } 1819 1820 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) { 1821 // do nothing 1822 } 1823 1824 static char saved_jvm_path[MAX_PATH] = {0}; 1825 1826 // Find the full path to the current module, jvm.dll 1827 void os::jvm_path(char *buf, jint buflen) { 1828 // Error checking. 1829 if (buflen < MAX_PATH) { 1830 assert(false, "must use a large-enough buffer"); 1831 buf[0] = '\0'; 1832 return; 1833 } 1834 // Lazy resolve the path to current module. 1835 if (saved_jvm_path[0] != 0) { 1836 strcpy(buf, saved_jvm_path); 1837 return; 1838 } 1839 1840 buf[0] = '\0'; 1841 if (Arguments::sun_java_launcher_is_altjvm()) { 1842 // Support for the java launcher's '-XXaltjvm=<path>' option. Check 1843 // for a JAVA_HOME environment variable and fix up the path so it 1844 // looks like jvm.dll is installed there (append a fake suffix 1845 // hotspot/jvm.dll). 1846 char* java_home_var = ::getenv("JAVA_HOME"); 1847 if (java_home_var != NULL && java_home_var[0] != 0 && 1848 strlen(java_home_var) < (size_t)buflen) { 1849 strncpy(buf, java_home_var, buflen); 1850 1851 // determine if this is a legacy image or modules image 1852 // modules image doesn't have "jre" subdirectory 1853 size_t len = strlen(buf); 1854 char* jrebin_p = buf + len; 1855 jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\"); 1856 if (0 != _access(buf, 0)) { 1857 jio_snprintf(jrebin_p, buflen-len, "\\bin\\"); 1858 } 1859 len = strlen(buf); 1860 jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll"); 1861 } 1862 } 1863 1864 if (buf[0] == '\0') { 1865 GetModuleFileName(vm_lib_handle, buf, buflen); 1866 } 1867 strncpy(saved_jvm_path, buf, MAX_PATH); 1868 saved_jvm_path[MAX_PATH - 1] = '\0'; 1869 } 1870 1871 1872 void os::print_jni_name_prefix_on(outputStream* st, int args_size) { 1873 #ifndef _WIN64 1874 st->print("_"); 1875 #endif 1876 } 1877 1878 1879 void os::print_jni_name_suffix_on(outputStream* st, int args_size) { 1880 #ifndef _WIN64 1881 st->print("@%d", args_size * sizeof(int)); 1882 #endif 1883 } 1884 1885 // This method is a copy of JDK's sysGetLastErrorString 1886 // from src/windows/hpi/src/system_md.c 1887 1888 size_t os::lasterror(char* buf, size_t len) { 1889 DWORD errval; 1890 1891 if ((errval = GetLastError()) != 0) { 1892 // DOS error 1893 size_t n = (size_t)FormatMessage( 1894 FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, 1895 NULL, 1896 errval, 1897 0, 1898 buf, 1899 (DWORD)len, 1900 NULL); 1901 if (n > 3) { 1902 // Drop final '.', CR, LF 1903 if (buf[n - 1] == '\n') n--; 1904 if (buf[n - 1] == '\r') n--; 1905 if (buf[n - 1] == '.') n--; 1906 buf[n] = '\0'; 1907 } 1908 return n; 1909 } 1910 1911 if (errno != 0) { 1912 // C runtime error that has no corresponding DOS error code 1913 const char* s = os::strerror(errno); 1914 size_t n = strlen(s); 1915 if (n >= len) n = len - 1; 1916 strncpy(buf, s, n); 1917 buf[n] = '\0'; 1918 return n; 1919 } 1920 1921 return 0; 1922 } 1923 1924 int os::get_last_error() { 1925 DWORD error = GetLastError(); 1926 if (error == 0) { 1927 error = errno; 1928 } 1929 return (int)error; 1930 } 1931 1932 // sun.misc.Signal 1933 // NOTE that this is a workaround for an apparent kernel bug where if 1934 // a signal handler for SIGBREAK is installed then that signal handler 1935 // takes priority over the console control handler for CTRL_CLOSE_EVENT. 1936 // See bug 4416763. 1937 static void (*sigbreakHandler)(int) = NULL; 1938 1939 static void UserHandler(int sig, void *siginfo, void *context) { 1940 os::signal_notify(sig); 1941 // We need to reinstate the signal handler each time... 1942 os::signal(sig, (void*)UserHandler); 1943 } 1944 1945 void* os::user_handler() { 1946 return (void*) UserHandler; 1947 } 1948 1949 void* os::signal(int signal_number, void* handler) { 1950 if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) { 1951 void (*oldHandler)(int) = sigbreakHandler; 1952 sigbreakHandler = (void (*)(int)) handler; 1953 return (void*) oldHandler; 1954 } else { 1955 return (void*)::signal(signal_number, (void (*)(int))handler); 1956 } 1957 } 1958 1959 void os::signal_raise(int signal_number) { 1960 raise(signal_number); 1961 } 1962 1963 // The Win32 C runtime library maps all console control events other than ^C 1964 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close, 1965 // logoff, and shutdown events. We therefore install our own console handler 1966 // that raises SIGTERM for the latter cases. 1967 // 1968 static BOOL WINAPI consoleHandler(DWORD event) { 1969 switch (event) { 1970 case CTRL_C_EVENT: 1971 if (VMError::is_error_reported()) { 1972 // Ctrl-C is pressed during error reporting, likely because the error 1973 // handler fails to abort. Let VM die immediately. 1974 os::die(); 1975 } 1976 1977 os::signal_raise(SIGINT); 1978 return TRUE; 1979 break; 1980 case CTRL_BREAK_EVENT: 1981 if (sigbreakHandler != NULL) { 1982 (*sigbreakHandler)(SIGBREAK); 1983 } 1984 return TRUE; 1985 break; 1986 case CTRL_LOGOFF_EVENT: { 1987 // Don't terminate JVM if it is running in a non-interactive session, 1988 // such as a service process. 1989 USEROBJECTFLAGS flags; 1990 HANDLE handle = GetProcessWindowStation(); 1991 if (handle != NULL && 1992 GetUserObjectInformation(handle, UOI_FLAGS, &flags, 1993 sizeof(USEROBJECTFLAGS), NULL)) { 1994 // If it is a non-interactive session, let next handler to deal 1995 // with it. 1996 if ((flags.dwFlags & WSF_VISIBLE) == 0) { 1997 return FALSE; 1998 } 1999 } 2000 } 2001 case CTRL_CLOSE_EVENT: 2002 case CTRL_SHUTDOWN_EVENT: 2003 os::signal_raise(SIGTERM); 2004 return TRUE; 2005 break; 2006 default: 2007 break; 2008 } 2009 return FALSE; 2010 } 2011 2012 // The following code is moved from os.cpp for making this 2013 // code platform specific, which it is by its very nature. 2014 2015 // Return maximum OS signal used + 1 for internal use only 2016 // Used as exit signal for signal_thread 2017 int os::sigexitnum_pd() { 2018 return NSIG; 2019 } 2020 2021 // a counter for each possible signal value, including signal_thread exit signal 2022 static volatile jint pending_signals[NSIG+1] = { 0 }; 2023 static Semaphore* sig_sem = NULL; 2024 2025 static void jdk_misc_signal_init() { 2026 // Initialize signal structures 2027 memset((void*)pending_signals, 0, sizeof(pending_signals)); 2028 2029 // Initialize signal semaphore 2030 sig_sem = new Semaphore(); 2031 2032 // Programs embedding the VM do not want it to attempt to receive 2033 // events like CTRL_LOGOFF_EVENT, which are used to implement the 2034 // shutdown hooks mechanism introduced in 1.3. For example, when 2035 // the VM is run as part of a Windows NT service (i.e., a servlet 2036 // engine in a web server), the correct behavior is for any console 2037 // control handler to return FALSE, not TRUE, because the OS's 2038 // "final" handler for such events allows the process to continue if 2039 // it is a service (while terminating it if it is not a service). 2040 // To make this behavior uniform and the mechanism simpler, we 2041 // completely disable the VM's usage of these console events if -Xrs 2042 // (=ReduceSignalUsage) is specified. This means, for example, that 2043 // the CTRL-BREAK thread dump mechanism is also disabled in this 2044 // case. See bugs 4323062, 4345157, and related bugs. 2045 2046 // Add a CTRL-C handler 2047 SetConsoleCtrlHandler(consoleHandler, TRUE); 2048 } 2049 2050 void os::signal_notify(int sig) { 2051 if (sig_sem != NULL) { 2052 Atomic::inc(&pending_signals[sig]); 2053 sig_sem->signal(); 2054 } else { 2055 // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init 2056 // initialization isn't called. 2057 assert(ReduceSignalUsage, "signal semaphore should be created"); 2058 } 2059 } 2060 2061 static int check_pending_signals() { 2062 while (true) { 2063 for (int i = 0; i < NSIG + 1; i++) { 2064 jint n = pending_signals[i]; 2065 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) { 2066 return i; 2067 } 2068 } 2069 JavaThread *thread = JavaThread::current(); 2070 2071 ThreadBlockInVM tbivm(thread); 2072 2073 bool threadIsSuspended; 2074 do { 2075 thread->set_suspend_equivalent(); 2076 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self() 2077 sig_sem->wait(); 2078 2079 // were we externally suspended while we were waiting? 2080 threadIsSuspended = thread->handle_special_suspend_equivalent_condition(); 2081 if (threadIsSuspended) { 2082 // The semaphore has been incremented, but while we were waiting 2083 // another thread suspended us. We don't want to continue running 2084 // while suspended because that would surprise the thread that 2085 // suspended us. 2086 sig_sem->signal(); 2087 2088 thread->java_suspend_self(); 2089 } 2090 } while (threadIsSuspended); 2091 } 2092 } 2093 2094 int os::signal_wait() { 2095 return check_pending_signals(); 2096 } 2097 2098 // Implicit OS exception handling 2099 2100 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo, 2101 address handler) { 2102 JavaThread* thread = (JavaThread*) Thread::current_or_null(); 2103 // Save pc in thread 2104 #ifdef _M_AMD64 2105 // Do not blow up if no thread info available. 2106 if (thread) { 2107 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip); 2108 } 2109 // Set pc to handler 2110 exceptionInfo->ContextRecord->Rip = (DWORD64)handler; 2111 #else 2112 // Do not blow up if no thread info available. 2113 if (thread) { 2114 thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip); 2115 } 2116 // Set pc to handler 2117 exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler; 2118 #endif 2119 2120 // Continue the execution 2121 return EXCEPTION_CONTINUE_EXECUTION; 2122 } 2123 2124 2125 // Used for PostMortemDump 2126 extern "C" void safepoints(); 2127 extern "C" void find(int x); 2128 extern "C" void events(); 2129 2130 // According to Windows API documentation, an illegal instruction sequence should generate 2131 // the 0xC000001C exception code. However, real world experience shows that occasionnaly 2132 // the execution of an illegal instruction can generate the exception code 0xC000001E. This 2133 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems). 2134 2135 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E 2136 2137 // From "Execution Protection in the Windows Operating System" draft 0.35 2138 // Once a system header becomes available, the "real" define should be 2139 // included or copied here. 2140 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08 2141 2142 // Windows Vista/2008 heap corruption check 2143 #define EXCEPTION_HEAP_CORRUPTION 0xC0000374 2144 2145 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual 2146 // C++ compiler contain this error code. Because this is a compiler-generated 2147 // error, the code is not listed in the Win32 API header files. 2148 // The code is actually a cryptic mnemonic device, with the initial "E" 2149 // standing for "exception" and the final 3 bytes (0x6D7363) representing the 2150 // ASCII values of "msc". 2151 2152 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 2153 2154 #define def_excpt(val) { #val, (val) } 2155 2156 static const struct { char* name; uint number; } exceptlabels[] = { 2157 def_excpt(EXCEPTION_ACCESS_VIOLATION), 2158 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 2159 def_excpt(EXCEPTION_BREAKPOINT), 2160 def_excpt(EXCEPTION_SINGLE_STEP), 2161 def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED), 2162 def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND), 2163 def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO), 2164 def_excpt(EXCEPTION_FLT_INEXACT_RESULT), 2165 def_excpt(EXCEPTION_FLT_INVALID_OPERATION), 2166 def_excpt(EXCEPTION_FLT_OVERFLOW), 2167 def_excpt(EXCEPTION_FLT_STACK_CHECK), 2168 def_excpt(EXCEPTION_FLT_UNDERFLOW), 2169 def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO), 2170 def_excpt(EXCEPTION_INT_OVERFLOW), 2171 def_excpt(EXCEPTION_PRIV_INSTRUCTION), 2172 def_excpt(EXCEPTION_IN_PAGE_ERROR), 2173 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION), 2174 def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2), 2175 def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION), 2176 def_excpt(EXCEPTION_STACK_OVERFLOW), 2177 def_excpt(EXCEPTION_INVALID_DISPOSITION), 2178 def_excpt(EXCEPTION_GUARD_PAGE), 2179 def_excpt(EXCEPTION_INVALID_HANDLE), 2180 def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 2181 def_excpt(EXCEPTION_HEAP_CORRUPTION) 2182 }; 2183 2184 #undef def_excpt 2185 2186 const char* os::exception_name(int exception_code, char *buf, size_t size) { 2187 uint code = static_cast<uint>(exception_code); 2188 for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) { 2189 if (exceptlabels[i].number == code) { 2190 jio_snprintf(buf, size, "%s", exceptlabels[i].name); 2191 return buf; 2192 } 2193 } 2194 2195 return NULL; 2196 } 2197 2198 //----------------------------------------------------------------------------- 2199 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2200 // handle exception caused by idiv; should only happen for -MinInt/-1 2201 // (division by zero is handled explicitly) 2202 #ifdef _M_AMD64 2203 PCONTEXT ctx = exceptionInfo->ContextRecord; 2204 address pc = (address)ctx->Rip; 2205 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode"); 2206 assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2207 if (pc[0] == 0xF7) { 2208 // set correct result values and continue after idiv instruction 2209 ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes 2210 } else { 2211 ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes 2212 } 2213 // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation) 2214 // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the 2215 // idiv opcode (0xF7). 2216 ctx->Rdx = (DWORD)0; // remainder 2217 // Continue the execution 2218 #else 2219 PCONTEXT ctx = exceptionInfo->ContextRecord; 2220 address pc = (address)ctx->Eip; 2221 assert(pc[0] == 0xF7, "not an idiv opcode"); 2222 assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands"); 2223 assert(ctx->Eax == min_jint, "unexpected idiv exception"); 2224 // set correct result values and continue after idiv instruction 2225 ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes 2226 ctx->Eax = (DWORD)min_jint; // result 2227 ctx->Edx = (DWORD)0; // remainder 2228 // Continue the execution 2229 #endif 2230 return EXCEPTION_CONTINUE_EXECUTION; 2231 } 2232 2233 //----------------------------------------------------------------------------- 2234 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) { 2235 PCONTEXT ctx = exceptionInfo->ContextRecord; 2236 #ifndef _WIN64 2237 // handle exception caused by native method modifying control word 2238 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2239 2240 switch (exception_code) { 2241 case EXCEPTION_FLT_DENORMAL_OPERAND: 2242 case EXCEPTION_FLT_DIVIDE_BY_ZERO: 2243 case EXCEPTION_FLT_INEXACT_RESULT: 2244 case EXCEPTION_FLT_INVALID_OPERATION: 2245 case EXCEPTION_FLT_OVERFLOW: 2246 case EXCEPTION_FLT_STACK_CHECK: 2247 case EXCEPTION_FLT_UNDERFLOW: 2248 jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std()); 2249 if (fp_control_word != ctx->FloatSave.ControlWord) { 2250 // Restore FPCW and mask out FLT exceptions 2251 ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0; 2252 // Mask out pending FLT exceptions 2253 ctx->FloatSave.StatusWord &= 0xffffff00; 2254 return EXCEPTION_CONTINUE_EXECUTION; 2255 } 2256 } 2257 2258 if (prev_uef_handler != NULL) { 2259 // We didn't handle this exception so pass it to the previous 2260 // UnhandledExceptionFilter. 2261 return (prev_uef_handler)(exceptionInfo); 2262 } 2263 #else // !_WIN64 2264 // On Windows, the mxcsr control bits are non-volatile across calls 2265 // See also CR 6192333 2266 // 2267 jint MxCsr = INITIAL_MXCSR; 2268 // we can't use StubRoutines::addr_mxcsr_std() 2269 // because in Win64 mxcsr is not saved there 2270 if (MxCsr != ctx->MxCsr) { 2271 ctx->MxCsr = MxCsr; 2272 return EXCEPTION_CONTINUE_EXECUTION; 2273 } 2274 #endif // !_WIN64 2275 2276 return EXCEPTION_CONTINUE_SEARCH; 2277 } 2278 2279 static inline void report_error(Thread* t, DWORD exception_code, 2280 address addr, void* siginfo, void* context) { 2281 VMError::report_and_die(t, exception_code, addr, siginfo, context); 2282 2283 // If UseOsErrorReporting, this will return here and save the error file 2284 // somewhere where we can find it in the minidump. 2285 } 2286 2287 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread, 2288 struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) { 2289 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2290 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2291 if (Interpreter::contains(pc)) { 2292 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2293 if (!fr->is_first_java_frame()) { 2294 // get_frame_at_stack_banging_point() is only called when we 2295 // have well defined stacks so java_sender() calls do not need 2296 // to assert safe_for_sender() first. 2297 *fr = fr->java_sender(); 2298 } 2299 } else { 2300 // more complex code with compiled code 2301 assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above"); 2302 CodeBlob* cb = CodeCache::find_blob(pc); 2303 if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) { 2304 // Not sure where the pc points to, fallback to default 2305 // stack overflow handling 2306 return false; 2307 } else { 2308 *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord); 2309 // in compiled code, the stack banging is performed just after the return pc 2310 // has been pushed on the stack 2311 *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp())); 2312 if (!fr->is_java_frame()) { 2313 // See java_sender() comment above. 2314 *fr = fr->java_sender(); 2315 } 2316 } 2317 } 2318 assert(fr->is_java_frame(), "Safety check"); 2319 return true; 2320 } 2321 2322 //----------------------------------------------------------------------------- 2323 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2324 if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH; 2325 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2326 #ifdef _M_AMD64 2327 address pc = (address) exceptionInfo->ContextRecord->Rip; 2328 #else 2329 address pc = (address) exceptionInfo->ContextRecord->Eip; 2330 #endif 2331 Thread* t = Thread::current_or_null_safe(); 2332 2333 // Handle SafeFetch32 and SafeFetchN exceptions. 2334 if (StubRoutines::is_safefetch_fault(pc)) { 2335 return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc)); 2336 } 2337 2338 #ifndef _WIN64 2339 // Execution protection violation - win32 running on AMD64 only 2340 // Handled first to avoid misdiagnosis as a "normal" access violation; 2341 // This is safe to do because we have a new/unique ExceptionInformation 2342 // code for this condition. 2343 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2344 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2345 int exception_subcode = (int) exceptionRecord->ExceptionInformation[0]; 2346 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2347 2348 if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) { 2349 int page_size = os::vm_page_size(); 2350 2351 // Make sure the pc and the faulting address are sane. 2352 // 2353 // If an instruction spans a page boundary, and the page containing 2354 // the beginning of the instruction is executable but the following 2355 // page is not, the pc and the faulting address might be slightly 2356 // different - we still want to unguard the 2nd page in this case. 2357 // 2358 // 15 bytes seems to be a (very) safe value for max instruction size. 2359 bool pc_is_near_addr = 2360 (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15); 2361 bool instr_spans_page_boundary = 2362 (align_down((intptr_t) pc ^ (intptr_t) addr, 2363 (intptr_t) page_size) > 0); 2364 2365 if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) { 2366 static volatile address last_addr = 2367 (address) os::non_memory_address_word(); 2368 2369 // In conservative mode, don't unguard unless the address is in the VM 2370 if (UnguardOnExecutionViolation > 0 && addr != last_addr && 2371 (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) { 2372 2373 // Set memory to RWX and retry 2374 address page_start = align_down(addr, page_size); 2375 bool res = os::protect_memory((char*) page_start, page_size, 2376 os::MEM_PROT_RWX); 2377 2378 log_debug(os)("Execution protection violation " 2379 "at " INTPTR_FORMAT 2380 ", unguarding " INTPTR_FORMAT ": %s", p2i(addr), 2381 p2i(page_start), (res ? "success" : os::strerror(errno))); 2382 2383 // Set last_addr so if we fault again at the same address, we don't 2384 // end up in an endless loop. 2385 // 2386 // There are two potential complications here. Two threads trapping 2387 // at the same address at the same time could cause one of the 2388 // threads to think it already unguarded, and abort the VM. Likely 2389 // very rare. 2390 // 2391 // The other race involves two threads alternately trapping at 2392 // different addresses and failing to unguard the page, resulting in 2393 // an endless loop. This condition is probably even more unlikely 2394 // than the first. 2395 // 2396 // Although both cases could be avoided by using locks or thread 2397 // local last_addr, these solutions are unnecessary complication: 2398 // this handler is a best-effort safety net, not a complete solution. 2399 // It is disabled by default and should only be used as a workaround 2400 // in case we missed any no-execute-unsafe VM code. 2401 2402 last_addr = addr; 2403 2404 return EXCEPTION_CONTINUE_EXECUTION; 2405 } 2406 } 2407 2408 // Last unguard failed or not unguarding 2409 tty->print_raw_cr("Execution protection violation"); 2410 report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord, 2411 exceptionInfo->ContextRecord); 2412 return EXCEPTION_CONTINUE_SEARCH; 2413 } 2414 } 2415 #endif // _WIN64 2416 2417 // Check to see if we caught the safepoint code in the 2418 // process of write protecting the memory serialization page. 2419 // It write enables the page immediately after protecting it 2420 // so just return. 2421 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2422 if (t != NULL && t->is_Java_thread()) { 2423 JavaThread* thread = (JavaThread*) t; 2424 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2425 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2426 if (os::is_memory_serialize_page(thread, addr)) { 2427 // Block current thread until the memory serialize page permission restored. 2428 os::block_on_serialize_page_trap(); 2429 return EXCEPTION_CONTINUE_EXECUTION; 2430 } 2431 } 2432 } 2433 2434 if ((exception_code == EXCEPTION_ACCESS_VIOLATION) && 2435 VM_Version::is_cpuinfo_segv_addr(pc)) { 2436 // Verify that OS save/restore AVX registers. 2437 return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr()); 2438 } 2439 2440 if (t != NULL && t->is_Java_thread()) { 2441 JavaThread* thread = (JavaThread*) t; 2442 bool in_java = thread->thread_state() == _thread_in_Java; 2443 2444 // Handle potential stack overflows up front. 2445 if (exception_code == EXCEPTION_STACK_OVERFLOW) { 2446 if (thread->stack_guards_enabled()) { 2447 if (in_java) { 2448 frame fr; 2449 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2450 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2451 if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) { 2452 assert(fr.is_java_frame(), "Must be a Java frame"); 2453 SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr); 2454 } 2455 } 2456 // Yellow zone violation. The o/s has unprotected the first yellow 2457 // zone page for us. Note: must call disable_stack_yellow_zone to 2458 // update the enabled status, even if the zone contains only one page. 2459 assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages"); 2460 thread->disable_stack_yellow_reserved_zone(); 2461 // If not in java code, return and hope for the best. 2462 return in_java 2463 ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)) 2464 : EXCEPTION_CONTINUE_EXECUTION; 2465 } else { 2466 // Fatal red zone violation. 2467 thread->disable_stack_red_zone(); 2468 tty->print_raw_cr("An unrecoverable stack overflow has occurred."); 2469 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2470 exceptionInfo->ContextRecord); 2471 return EXCEPTION_CONTINUE_SEARCH; 2472 } 2473 } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2474 // Either stack overflow or null pointer exception. 2475 if (in_java) { 2476 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2477 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2478 address stack_end = thread->stack_end(); 2479 if (addr < stack_end && addr >= stack_end - os::vm_page_size()) { 2480 // Stack overflow. 2481 assert(!os::uses_stack_guard_pages(), 2482 "should be caught by red zone code above."); 2483 return Handle_Exception(exceptionInfo, 2484 SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW)); 2485 } 2486 // Check for safepoint polling and implicit null 2487 // We only expect null pointers in the stubs (vtable) 2488 // the rest are checked explicitly now. 2489 CodeBlob* cb = CodeCache::find_blob(pc); 2490 if (cb != NULL) { 2491 if (os::is_poll_address(addr)) { 2492 address stub = SharedRuntime::get_poll_stub(pc); 2493 return Handle_Exception(exceptionInfo, stub); 2494 } 2495 } 2496 { 2497 #ifdef _WIN64 2498 // If it's a legal stack address map the entire region in 2499 // 2500 PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord; 2501 address addr = (address) exceptionRecord->ExceptionInformation[1]; 2502 if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) { 2503 addr = (address)((uintptr_t)addr & 2504 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 2505 os::commit_memory((char *)addr, thread->stack_base() - addr, 2506 !ExecMem); 2507 return EXCEPTION_CONTINUE_EXECUTION; 2508 } else 2509 #endif 2510 { 2511 // Null pointer exception. 2512 if (MacroAssembler::uses_implicit_null_check((void*)addr)) { 2513 address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL); 2514 if (stub != NULL) return Handle_Exception(exceptionInfo, stub); 2515 } 2516 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2517 exceptionInfo->ContextRecord); 2518 return EXCEPTION_CONTINUE_SEARCH; 2519 } 2520 } 2521 } 2522 2523 #ifdef _WIN64 2524 // Special care for fast JNI field accessors. 2525 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks 2526 // in and the heap gets shrunk before the field access. 2527 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2528 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2529 if (addr != (address)-1) { 2530 return Handle_Exception(exceptionInfo, addr); 2531 } 2532 } 2533 #endif 2534 2535 // Stack overflow or null pointer exception in native code. 2536 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2537 exceptionInfo->ContextRecord); 2538 return EXCEPTION_CONTINUE_SEARCH; 2539 } // /EXCEPTION_ACCESS_VIOLATION 2540 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 2541 2542 if (exception_code == EXCEPTION_IN_PAGE_ERROR) { 2543 CompiledMethod* nm = NULL; 2544 JavaThread* thread = (JavaThread*)t; 2545 if (in_java) { 2546 CodeBlob* cb = CodeCache::find_blob_unsafe(pc); 2547 nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; 2548 } 2549 if ((thread->thread_state() == _thread_in_vm && 2550 thread->doing_unsafe_access()) || 2551 (nm != NULL && nm->has_unsafe_access())) { 2552 return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, (address)Assembler::locate_next_instruction(pc))); 2553 } 2554 } 2555 2556 if (in_java) { 2557 switch (exception_code) { 2558 case EXCEPTION_INT_DIVIDE_BY_ZERO: 2559 return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO)); 2560 2561 case EXCEPTION_INT_OVERFLOW: 2562 return Handle_IDiv_Exception(exceptionInfo); 2563 2564 } // switch 2565 } 2566 if (((thread->thread_state() == _thread_in_Java) || 2567 (thread->thread_state() == _thread_in_native)) && 2568 exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) { 2569 LONG result=Handle_FLT_Exception(exceptionInfo); 2570 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 2571 } 2572 } 2573 2574 if (exception_code != EXCEPTION_BREAKPOINT) { 2575 report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 2576 exceptionInfo->ContextRecord); 2577 } 2578 return EXCEPTION_CONTINUE_SEARCH; 2579 } 2580 2581 #ifndef _WIN64 2582 // Special care for fast JNI accessors. 2583 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and 2584 // the heap gets shrunk before the field access. 2585 // Need to install our own structured exception handler since native code may 2586 // install its own. 2587 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) { 2588 DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode; 2589 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 2590 address pc = (address) exceptionInfo->ContextRecord->Eip; 2591 address addr = JNI_FastGetField::find_slowcase_pc(pc); 2592 if (addr != (address)-1) { 2593 return Handle_Exception(exceptionInfo, addr); 2594 } 2595 } 2596 return EXCEPTION_CONTINUE_SEARCH; 2597 } 2598 2599 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \ 2600 Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \ 2601 jobject obj, \ 2602 jfieldID fieldID) { \ 2603 __try { \ 2604 return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \ 2605 obj, \ 2606 fieldID); \ 2607 } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \ 2608 _exception_info())) { \ 2609 } \ 2610 return 0; \ 2611 } 2612 2613 DEFINE_FAST_GETFIELD(jboolean, bool, Boolean) 2614 DEFINE_FAST_GETFIELD(jbyte, byte, Byte) 2615 DEFINE_FAST_GETFIELD(jchar, char, Char) 2616 DEFINE_FAST_GETFIELD(jshort, short, Short) 2617 DEFINE_FAST_GETFIELD(jint, int, Int) 2618 DEFINE_FAST_GETFIELD(jlong, long, Long) 2619 DEFINE_FAST_GETFIELD(jfloat, float, Float) 2620 DEFINE_FAST_GETFIELD(jdouble, double, Double) 2621 2622 address os::win32::fast_jni_accessor_wrapper(BasicType type) { 2623 switch (type) { 2624 case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper; 2625 case T_BYTE: return (address)jni_fast_GetByteField_wrapper; 2626 case T_CHAR: return (address)jni_fast_GetCharField_wrapper; 2627 case T_SHORT: return (address)jni_fast_GetShortField_wrapper; 2628 case T_INT: return (address)jni_fast_GetIntField_wrapper; 2629 case T_LONG: return (address)jni_fast_GetLongField_wrapper; 2630 case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper; 2631 case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper; 2632 default: ShouldNotReachHere(); 2633 } 2634 return (address)-1; 2635 } 2636 #endif 2637 2638 // Virtual Memory 2639 2640 int os::vm_page_size() { return os::win32::vm_page_size(); } 2641 int os::vm_allocation_granularity() { 2642 return os::win32::vm_allocation_granularity(); 2643 } 2644 2645 // Windows large page support is available on Windows 2003. In order to use 2646 // large page memory, the administrator must first assign additional privilege 2647 // to the user: 2648 // + select Control Panel -> Administrative Tools -> Local Security Policy 2649 // + select Local Policies -> User Rights Assignment 2650 // + double click "Lock pages in memory", add users and/or groups 2651 // + reboot 2652 // Note the above steps are needed for administrator as well, as administrators 2653 // by default do not have the privilege to lock pages in memory. 2654 // 2655 // Note about Windows 2003: although the API supports committing large page 2656 // memory on a page-by-page basis and VirtualAlloc() returns success under this 2657 // scenario, I found through experiment it only uses large page if the entire 2658 // memory region is reserved and committed in a single VirtualAlloc() call. 2659 // This makes Windows large page support more or less like Solaris ISM, in 2660 // that the entire heap must be committed upfront. This probably will change 2661 // in the future, if so the code below needs to be revisited. 2662 2663 #ifndef MEM_LARGE_PAGES 2664 #define MEM_LARGE_PAGES 0x20000000 2665 #endif 2666 2667 static HANDLE _hProcess; 2668 static HANDLE _hToken; 2669 2670 // Container for NUMA node list info 2671 class NUMANodeListHolder { 2672 private: 2673 int *_numa_used_node_list; // allocated below 2674 int _numa_used_node_count; 2675 2676 void free_node_list() { 2677 if (_numa_used_node_list != NULL) { 2678 FREE_C_HEAP_ARRAY(int, _numa_used_node_list); 2679 } 2680 } 2681 2682 public: 2683 NUMANodeListHolder() { 2684 _numa_used_node_count = 0; 2685 _numa_used_node_list = NULL; 2686 // do rest of initialization in build routine (after function pointers are set up) 2687 } 2688 2689 ~NUMANodeListHolder() { 2690 free_node_list(); 2691 } 2692 2693 bool build() { 2694 DWORD_PTR proc_aff_mask; 2695 DWORD_PTR sys_aff_mask; 2696 if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false; 2697 ULONG highest_node_number; 2698 if (!GetNumaHighestNodeNumber(&highest_node_number)) return false; 2699 free_node_list(); 2700 _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal); 2701 for (unsigned int i = 0; i <= highest_node_number; i++) { 2702 ULONGLONG proc_mask_numa_node; 2703 if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false; 2704 if ((proc_aff_mask & proc_mask_numa_node)!=0) { 2705 _numa_used_node_list[_numa_used_node_count++] = i; 2706 } 2707 } 2708 return (_numa_used_node_count > 1); 2709 } 2710 2711 int get_count() { return _numa_used_node_count; } 2712 int get_node_list_entry(int n) { 2713 // for indexes out of range, returns -1 2714 return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1); 2715 } 2716 2717 } numa_node_list_holder; 2718 2719 2720 2721 static size_t _large_page_size = 0; 2722 2723 static bool request_lock_memory_privilege() { 2724 _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE, 2725 os::current_process_id()); 2726 2727 LUID luid; 2728 if (_hProcess != NULL && 2729 OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) && 2730 LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) { 2731 2732 TOKEN_PRIVILEGES tp; 2733 tp.PrivilegeCount = 1; 2734 tp.Privileges[0].Luid = luid; 2735 tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; 2736 2737 // AdjustTokenPrivileges() may return TRUE even when it couldn't change the 2738 // privilege. Check GetLastError() too. See MSDN document. 2739 if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) && 2740 (GetLastError() == ERROR_SUCCESS)) { 2741 return true; 2742 } 2743 } 2744 2745 return false; 2746 } 2747 2748 static void cleanup_after_large_page_init() { 2749 if (_hProcess) CloseHandle(_hProcess); 2750 _hProcess = NULL; 2751 if (_hToken) CloseHandle(_hToken); 2752 _hToken = NULL; 2753 } 2754 2755 static bool numa_interleaving_init() { 2756 bool success = false; 2757 bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving); 2758 2759 // print a warning if UseNUMAInterleaving flag is specified on command line 2760 bool warn_on_failure = use_numa_interleaving_specified; 2761 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2762 2763 // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages) 2764 size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2765 NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity); 2766 2767 if (numa_node_list_holder.build()) { 2768 if (log_is_enabled(Debug, os, cpu)) { 2769 Log(os, cpu) log; 2770 log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count()); 2771 for (int i = 0; i < numa_node_list_holder.get_count(); i++) { 2772 log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i)); 2773 } 2774 } 2775 success = true; 2776 } else { 2777 WARN("Process does not cover multiple NUMA nodes."); 2778 } 2779 if (!success) { 2780 if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag."); 2781 } 2782 return success; 2783 #undef WARN 2784 } 2785 2786 // this routine is used whenever we need to reserve a contiguous VA range 2787 // but we need to make separate VirtualAlloc calls for each piece of the range 2788 // Reasons for doing this: 2789 // * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise) 2790 // * UseNUMAInterleaving requires a separate node for each piece 2791 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags, 2792 DWORD prot, 2793 bool should_inject_error = false) { 2794 char * p_buf; 2795 // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size 2796 size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity(); 2797 size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size; 2798 2799 // first reserve enough address space in advance since we want to be 2800 // able to break a single contiguous virtual address range into multiple 2801 // large page commits but WS2003 does not allow reserving large page space 2802 // so we just use 4K pages for reserve, this gives us a legal contiguous 2803 // address space. then we will deallocate that reservation, and re alloc 2804 // using large pages 2805 const size_t size_of_reserve = bytes + chunk_size; 2806 if (bytes > size_of_reserve) { 2807 // Overflowed. 2808 return NULL; 2809 } 2810 p_buf = (char *) VirtualAlloc(addr, 2811 size_of_reserve, // size of Reserve 2812 MEM_RESERVE, 2813 PAGE_READWRITE); 2814 // If reservation failed, return NULL 2815 if (p_buf == NULL) return NULL; 2816 MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 2817 os::release_memory(p_buf, bytes + chunk_size); 2818 2819 // we still need to round up to a page boundary (in case we are using large pages) 2820 // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size) 2821 // instead we handle this in the bytes_to_rq computation below 2822 p_buf = align_up(p_buf, page_size); 2823 2824 // now go through and allocate one chunk at a time until all bytes are 2825 // allocated 2826 size_t bytes_remaining = bytes; 2827 // An overflow of align_up() would have been caught above 2828 // in the calculation of size_of_reserve. 2829 char * next_alloc_addr = p_buf; 2830 HANDLE hProc = GetCurrentProcess(); 2831 2832 #ifdef ASSERT 2833 // Variable for the failure injection 2834 int ran_num = os::random(); 2835 size_t fail_after = ran_num % bytes; 2836 #endif 2837 2838 int count=0; 2839 while (bytes_remaining) { 2840 // select bytes_to_rq to get to the next chunk_size boundary 2841 2842 size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size)); 2843 // Note allocate and commit 2844 char * p_new; 2845 2846 #ifdef ASSERT 2847 bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after); 2848 #else 2849 const bool inject_error_now = false; 2850 #endif 2851 2852 if (inject_error_now) { 2853 p_new = NULL; 2854 } else { 2855 if (!UseNUMAInterleaving) { 2856 p_new = (char *) VirtualAlloc(next_alloc_addr, 2857 bytes_to_rq, 2858 flags, 2859 prot); 2860 } else { 2861 // get the next node to use from the used_node_list 2862 assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected"); 2863 DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count()); 2864 p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node); 2865 } 2866 } 2867 2868 if (p_new == NULL) { 2869 // Free any allocated pages 2870 if (next_alloc_addr > p_buf) { 2871 // Some memory was committed so release it. 2872 size_t bytes_to_release = bytes - bytes_remaining; 2873 // NMT has yet to record any individual blocks, so it 2874 // need to create a dummy 'reserve' record to match 2875 // the release. 2876 MemTracker::record_virtual_memory_reserve((address)p_buf, 2877 bytes_to_release, CALLER_PC); 2878 os::release_memory(p_buf, bytes_to_release); 2879 } 2880 #ifdef ASSERT 2881 if (should_inject_error) { 2882 log_develop_debug(pagesize)("Reserving pages individually failed."); 2883 } 2884 #endif 2885 return NULL; 2886 } 2887 2888 bytes_remaining -= bytes_to_rq; 2889 next_alloc_addr += bytes_to_rq; 2890 count++; 2891 } 2892 // Although the memory is allocated individually, it is returned as one. 2893 // NMT records it as one block. 2894 if ((flags & MEM_COMMIT) != 0) { 2895 MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC); 2896 } else { 2897 MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC); 2898 } 2899 2900 // made it this far, success 2901 return p_buf; 2902 } 2903 2904 2905 2906 void os::large_page_init() { 2907 if (!UseLargePages) return; 2908 2909 // print a warning if any large page related flag is specified on command line 2910 bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) || 2911 !FLAG_IS_DEFAULT(LargePageSizeInBytes); 2912 bool success = false; 2913 2914 #define WARN(msg) if (warn_on_failure) { warning(msg); } 2915 if (request_lock_memory_privilege()) { 2916 size_t s = GetLargePageMinimum(); 2917 if (s) { 2918 #if defined(IA32) || defined(AMD64) 2919 if (s > 4*M || LargePageSizeInBytes > 4*M) { 2920 WARN("JVM cannot use large pages bigger than 4mb."); 2921 } else { 2922 #endif 2923 if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) { 2924 _large_page_size = LargePageSizeInBytes; 2925 } else { 2926 _large_page_size = s; 2927 } 2928 success = true; 2929 #if defined(IA32) || defined(AMD64) 2930 } 2931 #endif 2932 } else { 2933 WARN("Large page is not supported by the processor."); 2934 } 2935 } else { 2936 WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory."); 2937 } 2938 #undef WARN 2939 2940 const size_t default_page_size = (size_t) vm_page_size(); 2941 if (success && _large_page_size > default_page_size) { 2942 _page_sizes[0] = _large_page_size; 2943 _page_sizes[1] = default_page_size; 2944 _page_sizes[2] = 0; 2945 } 2946 2947 cleanup_after_large_page_init(); 2948 UseLargePages = success; 2949 } 2950 2951 int os::create_file_for_heap(const char* dir) { 2952 2953 const char name_template[] = "/jvmheap.XXXXXX"; 2954 char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal); 2955 if (fullname == NULL) { 2956 vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno))); 2957 return -1; 2958 } 2959 2960 (void)strncpy(fullname, dir, strlen(dir)+1); 2961 (void)strncat(fullname, name_template, strlen(name_template)); 2962 2963 os::native_path(fullname); 2964 2965 char *path = _mktemp(fullname); 2966 if (path == NULL) { 2967 warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno)); 2968 os::free(fullname); 2969 return -1; 2970 } 2971 2972 int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD); 2973 2974 os::free(fullname); 2975 if (fd < 0) { 2976 warning("Problem opening file for heap (%s)", os::strerror(errno)); 2977 return -1; 2978 } 2979 return fd; 2980 } 2981 2982 // If 'base' is not NULL, function will return NULL if it cannot get 'base' 2983 char* os::map_memory_to_file(char* base, size_t size, int fd) { 2984 assert(fd != -1, "File descriptor is not valid"); 2985 2986 HANDLE fh = (HANDLE)_get_osfhandle(fd); 2987 #ifdef _LP64 2988 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2989 (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL); 2990 #else 2991 HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE, 2992 0, (DWORD)size, NULL); 2993 #endif 2994 if (fileMapping == NULL) { 2995 if (GetLastError() == ERROR_DISK_FULL) { 2996 vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap")); 2997 } 2998 else { 2999 vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory")); 3000 } 3001 3002 return NULL; 3003 } 3004 3005 LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base); 3006 3007 CloseHandle(fileMapping); 3008 3009 return (char*)addr; 3010 } 3011 3012 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) { 3013 assert(fd != -1, "File descriptor is not valid"); 3014 assert(base != NULL, "Base address cannot be NULL"); 3015 3016 release_memory(base, size); 3017 return map_memory_to_file(base, size, fd); 3018 } 3019 3020 // On win32, one cannot release just a part of reserved memory, it's an 3021 // all or nothing deal. When we split a reservation, we must break the 3022 // reservation into two reservations. 3023 void os::pd_split_reserved_memory(char *base, size_t size, size_t split, 3024 bool realloc) { 3025 if (size > 0) { 3026 release_memory(base, size); 3027 if (realloc) { 3028 reserve_memory(split, base); 3029 } 3030 if (size != split) { 3031 reserve_memory(size - split, base + split); 3032 } 3033 } 3034 } 3035 3036 // Multiple threads can race in this code but it's not possible to unmap small sections of 3037 // virtual space to get requested alignment, like posix-like os's. 3038 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. 3039 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) { 3040 assert((alignment & (os::vm_allocation_granularity() - 1)) == 0, 3041 "Alignment must be a multiple of allocation granularity (page size)"); 3042 assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned"); 3043 3044 size_t extra_size = size + alignment; 3045 assert(extra_size >= size, "overflow, size is too large to allow alignment"); 3046 3047 char* aligned_base = NULL; 3048 3049 do { 3050 char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc); 3051 if (extra_base == NULL) { 3052 return NULL; 3053 } 3054 // Do manual alignment 3055 aligned_base = align_up(extra_base, alignment); 3056 3057 if (file_desc != -1) { 3058 os::unmap_memory(extra_base, extra_size); 3059 } else { 3060 os::release_memory(extra_base, extra_size); 3061 } 3062 3063 aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc); 3064 3065 } while (aligned_base == NULL); 3066 3067 return aligned_base; 3068 } 3069 3070 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 3071 assert((size_t)addr % os::vm_allocation_granularity() == 0, 3072 "reserve alignment"); 3073 assert(bytes % os::vm_page_size() == 0, "reserve page size"); 3074 char* res; 3075 // note that if UseLargePages is on, all the areas that require interleaving 3076 // will go thru reserve_memory_special rather than thru here. 3077 bool use_individual = (UseNUMAInterleaving && !UseLargePages); 3078 if (!use_individual) { 3079 res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE); 3080 } else { 3081 elapsedTimer reserveTimer; 3082 if (Verbose && PrintMiscellaneous) reserveTimer.start(); 3083 // in numa interleaving, we have to allocate pages individually 3084 // (well really chunks of NUMAInterleaveGranularity size) 3085 res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE); 3086 if (res == NULL) { 3087 warning("NUMA page allocation failed"); 3088 } 3089 if (Verbose && PrintMiscellaneous) { 3090 reserveTimer.stop(); 3091 tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes, 3092 reserveTimer.milliseconds(), reserveTimer.ticks()); 3093 } 3094 } 3095 assert(res == NULL || addr == NULL || addr == res, 3096 "Unexpected address from reserve."); 3097 3098 return res; 3099 } 3100 3101 // Reserve memory at an arbitrary address, only if that area is 3102 // available (and not reserved for something else). 3103 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) { 3104 // Windows os::reserve_memory() fails of the requested address range is 3105 // not avilable. 3106 return reserve_memory(bytes, requested_addr); 3107 } 3108 3109 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) { 3110 assert(file_desc >= 0, "file_desc is not valid"); 3111 return map_memory_to_file(requested_addr, bytes, file_desc); 3112 } 3113 3114 size_t os::large_page_size() { 3115 return _large_page_size; 3116 } 3117 3118 bool os::can_commit_large_page_memory() { 3119 // Windows only uses large page memory when the entire region is reserved 3120 // and committed in a single VirtualAlloc() call. This may change in the 3121 // future, but with Windows 2003 it's not possible to commit on demand. 3122 return false; 3123 } 3124 3125 bool os::can_execute_large_page_memory() { 3126 return true; 3127 } 3128 3129 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr, 3130 bool exec) { 3131 assert(UseLargePages, "only for large pages"); 3132 3133 if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) { 3134 return NULL; // Fallback to small pages. 3135 } 3136 3137 const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; 3138 const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3139 3140 // with large pages, there are two cases where we need to use Individual Allocation 3141 // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003) 3142 // 2) NUMA Interleaving is enabled, in which case we use a different node for each page 3143 if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) { 3144 log_debug(pagesize)("Reserving large pages individually."); 3145 3146 char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError); 3147 if (p_buf == NULL) { 3148 // give an appropriate warning message 3149 if (UseNUMAInterleaving) { 3150 warning("NUMA large page allocation failed, UseLargePages flag ignored"); 3151 } 3152 if (UseLargePagesIndividualAllocation) { 3153 warning("Individually allocated large pages failed, " 3154 "use -XX:-UseLargePagesIndividualAllocation to turn off"); 3155 } 3156 return NULL; 3157 } 3158 3159 return p_buf; 3160 3161 } else { 3162 log_debug(pagesize)("Reserving large pages in a single large chunk."); 3163 3164 // normal policy just allocate it all at once 3165 DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES; 3166 char * res = (char *)VirtualAlloc(addr, bytes, flag, prot); 3167 if (res != NULL) { 3168 MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC); 3169 } 3170 3171 return res; 3172 } 3173 } 3174 3175 bool os::release_memory_special(char* base, size_t bytes) { 3176 assert(base != NULL, "Sanity check"); 3177 return release_memory(base, bytes); 3178 } 3179 3180 void os::print_statistics() { 3181 } 3182 3183 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 3184 int err = os::get_last_error(); 3185 char buf[256]; 3186 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3187 warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 3188 ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3189 exec, buf_len != 0 ? buf : "<no_error_string>", err); 3190 } 3191 3192 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 3193 if (bytes == 0) { 3194 // Don't bother the OS with noops. 3195 return true; 3196 } 3197 assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries"); 3198 assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks"); 3199 // Don't attempt to print anything if the OS call fails. We're 3200 // probably low on resources, so the print itself may cause crashes. 3201 3202 // unless we have NUMAInterleaving enabled, the range of a commit 3203 // is always within a reserve covered by a single VirtualAlloc 3204 // in that case we can just do a single commit for the requested size 3205 if (!UseNUMAInterleaving) { 3206 if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 3207 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3208 return false; 3209 } 3210 if (exec) { 3211 DWORD oldprot; 3212 // Windows doc says to use VirtualProtect to get execute permissions 3213 if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 3214 NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 3215 return false; 3216 } 3217 } 3218 return true; 3219 } else { 3220 3221 // when NUMAInterleaving is enabled, the commit might cover a range that 3222 // came from multiple VirtualAlloc reserves (using allocate_pages_individually). 3223 // VirtualQuery can help us determine that. The RegionSize that VirtualQuery 3224 // returns represents the number of bytes that can be committed in one step. 3225 size_t bytes_remaining = bytes; 3226 char * next_alloc_addr = addr; 3227 while (bytes_remaining > 0) { 3228 MEMORY_BASIC_INFORMATION alloc_info; 3229 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 3230 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3231 if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 3232 PAGE_READWRITE) == NULL) { 3233 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3234 exec);) 3235 return false; 3236 } 3237 if (exec) { 3238 DWORD oldprot; 3239 if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 3240 PAGE_EXECUTE_READWRITE, &oldprot)) { 3241 NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 3242 exec);) 3243 return false; 3244 } 3245 } 3246 bytes_remaining -= bytes_to_rq; 3247 next_alloc_addr += bytes_to_rq; 3248 } 3249 } 3250 // if we made it this far, return true 3251 return true; 3252 } 3253 3254 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 3255 bool exec) { 3256 // alignment_hint is ignored on this OS 3257 return pd_commit_memory(addr, size, exec); 3258 } 3259 3260 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 3261 const char* mesg) { 3262 assert(mesg != NULL, "mesg must be specified"); 3263 if (!pd_commit_memory(addr, size, exec)) { 3264 warn_fail_commit_memory(addr, size, exec); 3265 vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg); 3266 } 3267 } 3268 3269 void os::pd_commit_memory_or_exit(char* addr, size_t size, 3270 size_t alignment_hint, bool exec, 3271 const char* mesg) { 3272 // alignment_hint is ignored on this OS 3273 pd_commit_memory_or_exit(addr, size, exec, mesg); 3274 } 3275 3276 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 3277 if (bytes == 0) { 3278 // Don't bother the OS with noops. 3279 return true; 3280 } 3281 assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries"); 3282 assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks"); 3283 return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0); 3284 } 3285 3286 bool os::pd_release_memory(char* addr, size_t bytes) { 3287 return VirtualFree(addr, 0, MEM_RELEASE) != 0; 3288 } 3289 3290 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 3291 return os::commit_memory(addr, size, !ExecMem); 3292 } 3293 3294 bool os::remove_stack_guard_pages(char* addr, size_t size) { 3295 return os::uncommit_memory(addr, size); 3296 } 3297 3298 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) { 3299 uint count = 0; 3300 bool ret = false; 3301 size_t bytes_remaining = bytes; 3302 char * next_protect_addr = addr; 3303 3304 // Use VirtualQuery() to get the chunk size. 3305 while (bytes_remaining) { 3306 MEMORY_BASIC_INFORMATION alloc_info; 3307 if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) { 3308 return false; 3309 } 3310 3311 size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 3312 // We used different API at allocate_pages_individually() based on UseNUMAInterleaving, 3313 // but we don't distinguish here as both cases are protected by same API. 3314 ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0; 3315 warning("Failed protecting pages individually for chunk #%u", count); 3316 if (!ret) { 3317 return false; 3318 } 3319 3320 bytes_remaining -= bytes_to_protect; 3321 next_protect_addr += bytes_to_protect; 3322 count++; 3323 } 3324 return ret; 3325 } 3326 3327 // Set protections specified 3328 bool os::protect_memory(char* addr, size_t bytes, ProtType prot, 3329 bool is_committed) { 3330 unsigned int p = 0; 3331 switch (prot) { 3332 case MEM_PROT_NONE: p = PAGE_NOACCESS; break; 3333 case MEM_PROT_READ: p = PAGE_READONLY; break; 3334 case MEM_PROT_RW: p = PAGE_READWRITE; break; 3335 case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break; 3336 default: 3337 ShouldNotReachHere(); 3338 } 3339 3340 DWORD old_status; 3341 3342 // Strange enough, but on Win32 one can change protection only for committed 3343 // memory, not a big deal anyway, as bytes less or equal than 64K 3344 if (!is_committed) { 3345 commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 3346 "cannot commit protection page"); 3347 } 3348 // One cannot use os::guard_memory() here, as on Win32 guard page 3349 // have different (one-shot) semantics, from MSDN on PAGE_GUARD: 3350 // 3351 // Pages in the region become guard pages. Any attempt to access a guard page 3352 // causes the system to raise a STATUS_GUARD_PAGE exception and turn off 3353 // the guard page status. Guard pages thus act as a one-time access alarm. 3354 bool ret; 3355 if (UseNUMAInterleaving) { 3356 // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time, 3357 // so we must protect the chunks individually. 3358 ret = protect_pages_individually(addr, bytes, p, &old_status); 3359 } else { 3360 ret = VirtualProtect(addr, bytes, p, &old_status) != 0; 3361 } 3362 #ifdef ASSERT 3363 if (!ret) { 3364 int err = os::get_last_error(); 3365 char buf[256]; 3366 size_t buf_len = os::lasterror(buf, sizeof(buf)); 3367 warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT 3368 ") failed; error='%s' (DOS error/errno=%d)", addr, bytes, 3369 buf_len != 0 ? buf : "<no_error_string>", err); 3370 } 3371 #endif 3372 return ret; 3373 } 3374 3375 bool os::guard_memory(char* addr, size_t bytes) { 3376 DWORD old_status; 3377 return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0; 3378 } 3379 3380 bool os::unguard_memory(char* addr, size_t bytes) { 3381 DWORD old_status; 3382 return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0; 3383 } 3384 3385 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3386 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { } 3387 void os::numa_make_global(char *addr, size_t bytes) { } 3388 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { } 3389 bool os::numa_topology_changed() { return false; } 3390 size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); } 3391 int os::numa_get_group_id() { return 0; } 3392 size_t os::numa_get_leaf_groups(int *ids, size_t size) { 3393 if (numa_node_list_holder.get_count() == 0 && size > 0) { 3394 // Provide an answer for UMA systems 3395 ids[0] = 0; 3396 return 1; 3397 } else { 3398 // check for size bigger than actual groups_num 3399 size = MIN2(size, numa_get_groups_num()); 3400 for (int i = 0; i < (int)size; i++) { 3401 ids[i] = numa_node_list_holder.get_node_list_entry(i); 3402 } 3403 return size; 3404 } 3405 } 3406 3407 bool os::get_page_info(char *start, page_info* info) { 3408 return false; 3409 } 3410 3411 char *os::scan_pages(char *start, char* end, page_info* page_expected, 3412 page_info* page_found) { 3413 return end; 3414 } 3415 3416 char* os::non_memory_address_word() { 3417 // Must never look like an address returned by reserve_memory, 3418 // even in its subfields (as defined by the CPU immediate fields, 3419 // if the CPU splits constants across multiple instructions). 3420 return (char*)-1; 3421 } 3422 3423 #define MAX_ERROR_COUNT 100 3424 #define SYS_THREAD_ERROR 0xffffffffUL 3425 3426 void os::pd_start_thread(Thread* thread) { 3427 DWORD ret = ResumeThread(thread->osthread()->thread_handle()); 3428 // Returns previous suspend state: 3429 // 0: Thread was not suspended 3430 // 1: Thread is running now 3431 // >1: Thread is still suspended. 3432 assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back 3433 } 3434 3435 class HighResolutionInterval : public CHeapObj<mtThread> { 3436 // The default timer resolution seems to be 10 milliseconds. 3437 // (Where is this written down?) 3438 // If someone wants to sleep for only a fraction of the default, 3439 // then we set the timer resolution down to 1 millisecond for 3440 // the duration of their interval. 3441 // We carefully set the resolution back, since otherwise we 3442 // seem to incur an overhead (3%?) that we don't need. 3443 // CONSIDER: if ms is small, say 3, then we should run with a high resolution time. 3444 // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod(). 3445 // Alternatively, we could compute the relative error (503/500 = .6%) and only use 3446 // timeBeginPeriod() if the relative error exceeded some threshold. 3447 // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and 3448 // to decreased efficiency related to increased timer "tick" rates. We want to minimize 3449 // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high 3450 // resolution timers running. 3451 private: 3452 jlong resolution; 3453 public: 3454 HighResolutionInterval(jlong ms) { 3455 resolution = ms % 10L; 3456 if (resolution != 0) { 3457 MMRESULT result = timeBeginPeriod(1L); 3458 } 3459 } 3460 ~HighResolutionInterval() { 3461 if (resolution != 0) { 3462 MMRESULT result = timeEndPeriod(1L); 3463 } 3464 resolution = 0L; 3465 } 3466 }; 3467 3468 int os::sleep(Thread* thread, jlong ms, bool interruptable) { 3469 jlong limit = (jlong) MAXDWORD; 3470 3471 while (ms > limit) { 3472 int res; 3473 if ((res = sleep(thread, limit, interruptable)) != OS_TIMEOUT) { 3474 return res; 3475 } 3476 ms -= limit; 3477 } 3478 3479 assert(thread == Thread::current(), "thread consistency check"); 3480 OSThread* osthread = thread->osthread(); 3481 OSThreadWaitState osts(osthread, false /* not Object.wait() */); 3482 int result; 3483 if (interruptable) { 3484 assert(thread->is_Java_thread(), "must be java thread"); 3485 JavaThread *jt = (JavaThread *) thread; 3486 ThreadBlockInVM tbivm(jt); 3487 3488 jt->set_suspend_equivalent(); 3489 // cleared by handle_special_suspend_equivalent_condition() or 3490 // java_suspend_self() via check_and_wait_while_suspended() 3491 3492 HANDLE events[1]; 3493 events[0] = osthread->interrupt_event(); 3494 HighResolutionInterval *phri=NULL; 3495 if (!ForceTimeHighResolution) { 3496 phri = new HighResolutionInterval(ms); 3497 } 3498 if (WaitForMultipleObjects(1, events, FALSE, (DWORD)ms) == WAIT_TIMEOUT) { 3499 result = OS_TIMEOUT; 3500 } else { 3501 ResetEvent(osthread->interrupt_event()); 3502 osthread->set_interrupted(false); 3503 result = OS_INTRPT; 3504 } 3505 delete phri; //if it is NULL, harmless 3506 3507 // were we externally suspended while we were waiting? 3508 jt->check_and_wait_while_suspended(); 3509 } else { 3510 assert(!thread->is_Java_thread(), "must not be java thread"); 3511 Sleep((long) ms); 3512 result = OS_TIMEOUT; 3513 } 3514 return result; 3515 } 3516 3517 // Short sleep, direct OS call. 3518 // 3519 // ms = 0, means allow others (if any) to run. 3520 // 3521 void os::naked_short_sleep(jlong ms) { 3522 assert(ms < 1000, "Un-interruptable sleep, short time use only"); 3523 Sleep(ms); 3524 } 3525 3526 // Sleep forever; naked call to OS-specific sleep; use with CAUTION 3527 void os::infinite_sleep() { 3528 while (true) { // sleep forever ... 3529 Sleep(100000); // ... 100 seconds at a time 3530 } 3531 } 3532 3533 typedef BOOL (WINAPI * STTSignature)(void); 3534 3535 void os::naked_yield() { 3536 // Consider passing back the return value from SwitchToThread(). 3537 SwitchToThread(); 3538 } 3539 3540 // Win32 only gives you access to seven real priorities at a time, 3541 // so we compress Java's ten down to seven. It would be better 3542 // if we dynamically adjusted relative priorities. 3543 3544 int os::java_to_os_priority[CriticalPriority + 1] = { 3545 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3546 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3547 THREAD_PRIORITY_LOWEST, // 2 3548 THREAD_PRIORITY_BELOW_NORMAL, // 3 3549 THREAD_PRIORITY_BELOW_NORMAL, // 4 3550 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3551 THREAD_PRIORITY_NORMAL, // 6 3552 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3553 THREAD_PRIORITY_ABOVE_NORMAL, // 8 3554 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3555 THREAD_PRIORITY_HIGHEST, // 10 MaxPriority 3556 THREAD_PRIORITY_HIGHEST // 11 CriticalPriority 3557 }; 3558 3559 int prio_policy1[CriticalPriority + 1] = { 3560 THREAD_PRIORITY_IDLE, // 0 Entry should never be used 3561 THREAD_PRIORITY_LOWEST, // 1 MinPriority 3562 THREAD_PRIORITY_LOWEST, // 2 3563 THREAD_PRIORITY_BELOW_NORMAL, // 3 3564 THREAD_PRIORITY_BELOW_NORMAL, // 4 3565 THREAD_PRIORITY_NORMAL, // 5 NormPriority 3566 THREAD_PRIORITY_ABOVE_NORMAL, // 6 3567 THREAD_PRIORITY_ABOVE_NORMAL, // 7 3568 THREAD_PRIORITY_HIGHEST, // 8 3569 THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority 3570 THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority 3571 THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority 3572 }; 3573 3574 static int prio_init() { 3575 // If ThreadPriorityPolicy is 1, switch tables 3576 if (ThreadPriorityPolicy == 1) { 3577 int i; 3578 for (i = 0; i < CriticalPriority + 1; i++) { 3579 os::java_to_os_priority[i] = prio_policy1[i]; 3580 } 3581 } 3582 if (UseCriticalJavaThreadPriority) { 3583 os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority]; 3584 } 3585 return 0; 3586 } 3587 3588 OSReturn os::set_native_priority(Thread* thread, int priority) { 3589 if (!UseThreadPriorities) return OS_OK; 3590 bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0; 3591 return ret ? OS_OK : OS_ERR; 3592 } 3593 3594 OSReturn os::get_native_priority(const Thread* const thread, 3595 int* priority_ptr) { 3596 if (!UseThreadPriorities) { 3597 *priority_ptr = java_to_os_priority[NormPriority]; 3598 return OS_OK; 3599 } 3600 int os_prio = GetThreadPriority(thread->osthread()->thread_handle()); 3601 if (os_prio == THREAD_PRIORITY_ERROR_RETURN) { 3602 assert(false, "GetThreadPriority failed"); 3603 return OS_ERR; 3604 } 3605 *priority_ptr = os_prio; 3606 return OS_OK; 3607 } 3608 3609 void os::interrupt(Thread* thread) { 3610 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3611 3612 OSThread* osthread = thread->osthread(); 3613 osthread->set_interrupted(true); 3614 // More than one thread can get here with the same value of osthread, 3615 // resulting in multiple notifications. We do, however, want the store 3616 // to interrupted() to be visible to other threads before we post 3617 // the interrupt event. 3618 OrderAccess::release(); 3619 SetEvent(osthread->interrupt_event()); 3620 // For JSR166: unpark after setting status 3621 if (thread->is_Java_thread()) { 3622 ((JavaThread*)thread)->parker()->unpark(); 3623 } 3624 3625 ParkEvent * ev = thread->_ParkEvent; 3626 if (ev != NULL) ev->unpark(); 3627 } 3628 3629 3630 bool os::is_interrupted(Thread* thread, bool clear_interrupted) { 3631 debug_only(Thread::check_for_dangling_thread_pointer(thread);) 3632 3633 OSThread* osthread = thread->osthread(); 3634 // There is no synchronization between the setting of the interrupt 3635 // and it being cleared here. It is critical - see 6535709 - that 3636 // we only clear the interrupt state, and reset the interrupt event, 3637 // if we are going to report that we were indeed interrupted - else 3638 // an interrupt can be "lost", leading to spurious wakeups or lost wakeups 3639 // depending on the timing. By checking thread interrupt event to see 3640 // if the thread gets real interrupt thus prevent spurious wakeup. 3641 bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0); 3642 if (interrupted && clear_interrupted) { 3643 osthread->set_interrupted(false); 3644 ResetEvent(osthread->interrupt_event()); 3645 } // Otherwise leave the interrupted state alone 3646 3647 return interrupted; 3648 } 3649 3650 // GetCurrentThreadId() returns DWORD 3651 intx os::current_thread_id() { return GetCurrentThreadId(); } 3652 3653 static int _initial_pid = 0; 3654 3655 int os::current_process_id() { 3656 return (_initial_pid ? _initial_pid : _getpid()); 3657 } 3658 3659 int os::win32::_vm_page_size = 0; 3660 int os::win32::_vm_allocation_granularity = 0; 3661 int os::win32::_processor_type = 0; 3662 // Processor level is not available on non-NT systems, use vm_version instead 3663 int os::win32::_processor_level = 0; 3664 julong os::win32::_physical_memory = 0; 3665 size_t os::win32::_default_stack_size = 0; 3666 3667 intx os::win32::_os_thread_limit = 0; 3668 volatile intx os::win32::_os_thread_count = 0; 3669 3670 bool os::win32::_is_windows_server = false; 3671 3672 // 6573254 3673 // Currently, the bug is observed across all the supported Windows releases, 3674 // including the latest one (as of this writing - Windows Server 2012 R2) 3675 bool os::win32::_has_exit_bug = true; 3676 3677 void os::win32::initialize_system_info() { 3678 SYSTEM_INFO si; 3679 GetSystemInfo(&si); 3680 _vm_page_size = si.dwPageSize; 3681 _vm_allocation_granularity = si.dwAllocationGranularity; 3682 _processor_type = si.dwProcessorType; 3683 _processor_level = si.wProcessorLevel; 3684 set_processor_count(si.dwNumberOfProcessors); 3685 3686 MEMORYSTATUSEX ms; 3687 ms.dwLength = sizeof(ms); 3688 3689 // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual, 3690 // dwMemoryLoad (% of memory in use) 3691 GlobalMemoryStatusEx(&ms); 3692 _physical_memory = ms.ullTotalPhys; 3693 3694 if (FLAG_IS_DEFAULT(MaxRAM)) { 3695 // Adjust MaxRAM according to the maximum virtual address space available. 3696 FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual)); 3697 } 3698 3699 OSVERSIONINFOEX oi; 3700 oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX); 3701 GetVersionEx((OSVERSIONINFO*)&oi); 3702 switch (oi.dwPlatformId) { 3703 case VER_PLATFORM_WIN32_NT: 3704 { 3705 int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion; 3706 if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER || 3707 oi.wProductType == VER_NT_SERVER) { 3708 _is_windows_server = true; 3709 } 3710 } 3711 break; 3712 default: fatal("Unknown platform"); 3713 } 3714 3715 _default_stack_size = os::current_stack_size(); 3716 assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size"); 3717 assert((_default_stack_size & (_vm_page_size - 1)) == 0, 3718 "stack size not a multiple of page size"); 3719 3720 initialize_performance_counter(); 3721 } 3722 3723 3724 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf, 3725 int ebuflen) { 3726 char path[MAX_PATH]; 3727 DWORD size; 3728 DWORD pathLen = (DWORD)sizeof(path); 3729 HINSTANCE result = NULL; 3730 3731 // only allow library name without path component 3732 assert(strchr(name, '\\') == NULL, "path not allowed"); 3733 assert(strchr(name, ':') == NULL, "path not allowed"); 3734 if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) { 3735 jio_snprintf(ebuf, ebuflen, 3736 "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name); 3737 return NULL; 3738 } 3739 3740 // search system directory 3741 if ((size = GetSystemDirectory(path, pathLen)) > 0) { 3742 if (size >= pathLen) { 3743 return NULL; // truncated 3744 } 3745 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3746 return NULL; // truncated 3747 } 3748 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3749 return result; 3750 } 3751 } 3752 3753 // try Windows directory 3754 if ((size = GetWindowsDirectory(path, pathLen)) > 0) { 3755 if (size >= pathLen) { 3756 return NULL; // truncated 3757 } 3758 if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) { 3759 return NULL; // truncated 3760 } 3761 if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) { 3762 return result; 3763 } 3764 } 3765 3766 jio_snprintf(ebuf, ebuflen, 3767 "os::win32::load_windows_dll() cannot load %s from system directories.", name); 3768 return NULL; 3769 } 3770 3771 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS) 3772 #define EXIT_TIMEOUT 300000 /* 5 minutes */ 3773 3774 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) { 3775 InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect); 3776 return TRUE; 3777 } 3778 3779 int os::win32::exit_process_or_thread(Ept what, int exit_code) { 3780 // Basic approach: 3781 // - Each exiting thread registers its intent to exit and then does so. 3782 // - A thread trying to terminate the process must wait for all 3783 // threads currently exiting to complete their exit. 3784 3785 if (os::win32::has_exit_bug()) { 3786 // The array holds handles of the threads that have started exiting by calling 3787 // _endthreadex(). 3788 // Should be large enough to avoid blocking the exiting thread due to lack of 3789 // a free slot. 3790 static HANDLE handles[MAXIMUM_THREADS_TO_KEEP]; 3791 static int handle_count = 0; 3792 3793 static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT; 3794 static CRITICAL_SECTION crit_sect; 3795 static volatile DWORD process_exiting = 0; 3796 int i, j; 3797 DWORD res; 3798 HANDLE hproc, hthr; 3799 3800 // We only attempt to register threads until a process exiting 3801 // thread manages to set the process_exiting flag. Any threads 3802 // that come through here after the process_exiting flag is set 3803 // are unregistered and will be caught in the SuspendThread() 3804 // infinite loop below. 3805 bool registered = false; 3806 3807 // The first thread that reached this point, initializes the critical section. 3808 if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) { 3809 warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__); 3810 } else if (OrderAccess::load_acquire(&process_exiting) == 0) { 3811 if (what != EPT_THREAD) { 3812 // Atomically set process_exiting before the critical section 3813 // to increase the visibility between racing threads. 3814 Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0); 3815 } 3816 EnterCriticalSection(&crit_sect); 3817 3818 if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) { 3819 // Remove from the array those handles of the threads that have completed exiting. 3820 for (i = 0, j = 0; i < handle_count; ++i) { 3821 res = WaitForSingleObject(handles[i], 0 /* don't wait */); 3822 if (res == WAIT_TIMEOUT) { 3823 handles[j++] = handles[i]; 3824 } else { 3825 if (res == WAIT_FAILED) { 3826 warning("WaitForSingleObject failed (%u) in %s: %d\n", 3827 GetLastError(), __FILE__, __LINE__); 3828 } 3829 // Don't keep the handle, if we failed waiting for it. 3830 CloseHandle(handles[i]); 3831 } 3832 } 3833 3834 // If there's no free slot in the array of the kept handles, we'll have to 3835 // wait until at least one thread completes exiting. 3836 if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) { 3837 // Raise the priority of the oldest exiting thread to increase its chances 3838 // to complete sooner. 3839 SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL); 3840 res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT); 3841 if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) { 3842 i = (res - WAIT_OBJECT_0); 3843 handle_count = MAXIMUM_THREADS_TO_KEEP - 1; 3844 for (; i < handle_count; ++i) { 3845 handles[i] = handles[i + 1]; 3846 } 3847 } else { 3848 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3849 (res == WAIT_FAILED ? "failed" : "timed out"), 3850 GetLastError(), __FILE__, __LINE__); 3851 // Don't keep handles, if we failed waiting for them. 3852 for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) { 3853 CloseHandle(handles[i]); 3854 } 3855 handle_count = 0; 3856 } 3857 } 3858 3859 // Store a duplicate of the current thread handle in the array of handles. 3860 hproc = GetCurrentProcess(); 3861 hthr = GetCurrentThread(); 3862 if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count], 3863 0, FALSE, DUPLICATE_SAME_ACCESS)) { 3864 warning("DuplicateHandle failed (%u) in %s: %d\n", 3865 GetLastError(), __FILE__, __LINE__); 3866 3867 // We can't register this thread (no more handles) so this thread 3868 // may be racing with a thread that is calling exit(). If the thread 3869 // that is calling exit() has managed to set the process_exiting 3870 // flag, then this thread will be caught in the SuspendThread() 3871 // infinite loop below which closes that race. A small timing 3872 // window remains before the process_exiting flag is set, but it 3873 // is only exposed when we are out of handles. 3874 } else { 3875 ++handle_count; 3876 registered = true; 3877 3878 // The current exiting thread has stored its handle in the array, and now 3879 // should leave the critical section before calling _endthreadex(). 3880 } 3881 3882 } else if (what != EPT_THREAD && handle_count > 0) { 3883 jlong start_time, finish_time, timeout_left; 3884 // Before ending the process, make sure all the threads that had called 3885 // _endthreadex() completed. 3886 3887 // Set the priority level of the current thread to the same value as 3888 // the priority level of exiting threads. 3889 // This is to ensure it will be given a fair chance to execute if 3890 // the timeout expires. 3891 hthr = GetCurrentThread(); 3892 SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL); 3893 start_time = os::javaTimeNanos(); 3894 finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L); 3895 for (i = 0; ; ) { 3896 int portion_count = handle_count - i; 3897 if (portion_count > MAXIMUM_WAIT_OBJECTS) { 3898 portion_count = MAXIMUM_WAIT_OBJECTS; 3899 } 3900 for (j = 0; j < portion_count; ++j) { 3901 SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL); 3902 } 3903 timeout_left = (finish_time - start_time) / 1000000L; 3904 if (timeout_left < 0) { 3905 timeout_left = 0; 3906 } 3907 res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left); 3908 if (res == WAIT_FAILED || res == WAIT_TIMEOUT) { 3909 warning("WaitForMultipleObjects %s (%u) in %s: %d\n", 3910 (res == WAIT_FAILED ? "failed" : "timed out"), 3911 GetLastError(), __FILE__, __LINE__); 3912 // Reset portion_count so we close the remaining 3913 // handles due to this error. 3914 portion_count = handle_count - i; 3915 } 3916 for (j = 0; j < portion_count; ++j) { 3917 CloseHandle(handles[i + j]); 3918 } 3919 if ((i += portion_count) >= handle_count) { 3920 break; 3921 } 3922 start_time = os::javaTimeNanos(); 3923 } 3924 handle_count = 0; 3925 } 3926 3927 LeaveCriticalSection(&crit_sect); 3928 } 3929 3930 if (!registered && 3931 OrderAccess::load_acquire(&process_exiting) != 0 && 3932 process_exiting != GetCurrentThreadId()) { 3933 // Some other thread is about to call exit(), so we don't let 3934 // the current unregistered thread proceed to exit() or _endthreadex() 3935 while (true) { 3936 SuspendThread(GetCurrentThread()); 3937 // Avoid busy-wait loop, if SuspendThread() failed. 3938 Sleep(EXIT_TIMEOUT); 3939 } 3940 } 3941 } 3942 3943 // We are here if either 3944 // - there's no 'race at exit' bug on this OS release; 3945 // - initialization of the critical section failed (unlikely); 3946 // - the current thread has registered itself and left the critical section; 3947 // - the process-exiting thread has raised the flag and left the critical section. 3948 if (what == EPT_THREAD) { 3949 _endthreadex((unsigned)exit_code); 3950 } else if (what == EPT_PROCESS) { 3951 ::exit(exit_code); 3952 } else { 3953 _exit(exit_code); 3954 } 3955 3956 // Should not reach here 3957 return exit_code; 3958 } 3959 3960 #undef EXIT_TIMEOUT 3961 3962 void os::win32::setmode_streams() { 3963 _setmode(_fileno(stdin), _O_BINARY); 3964 _setmode(_fileno(stdout), _O_BINARY); 3965 _setmode(_fileno(stderr), _O_BINARY); 3966 } 3967 3968 3969 bool os::is_debugger_attached() { 3970 return IsDebuggerPresent() ? true : false; 3971 } 3972 3973 3974 void os::wait_for_keypress_at_exit(void) { 3975 if (PauseAtExit) { 3976 fprintf(stderr, "Press any key to continue...\n"); 3977 fgetc(stdin); 3978 } 3979 } 3980 3981 3982 bool os::message_box(const char* title, const char* message) { 3983 int result = MessageBox(NULL, message, title, 3984 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY); 3985 return result == IDYES; 3986 } 3987 3988 #ifndef PRODUCT 3989 #ifndef _WIN64 3990 // Helpers to check whether NX protection is enabled 3991 int nx_exception_filter(_EXCEPTION_POINTERS *pex) { 3992 if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && 3993 pex->ExceptionRecord->NumberParameters > 0 && 3994 pex->ExceptionRecord->ExceptionInformation[0] == 3995 EXCEPTION_INFO_EXEC_VIOLATION) { 3996 return EXCEPTION_EXECUTE_HANDLER; 3997 } 3998 return EXCEPTION_CONTINUE_SEARCH; 3999 } 4000 4001 void nx_check_protection() { 4002 // If NX is enabled we'll get an exception calling into code on the stack 4003 char code[] = { (char)0xC3 }; // ret 4004 void *code_ptr = (void *)code; 4005 __try { 4006 __asm call code_ptr 4007 } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) { 4008 tty->print_raw_cr("NX protection detected."); 4009 } 4010 } 4011 #endif // _WIN64 4012 #endif // PRODUCT 4013 4014 // This is called _before_ the global arguments have been parsed 4015 void os::init(void) { 4016 _initial_pid = _getpid(); 4017 4018 init_random(1234567); 4019 4020 win32::initialize_system_info(); 4021 win32::setmode_streams(); 4022 init_page_sizes((size_t) win32::vm_page_size()); 4023 4024 // This may be overridden later when argument processing is done. 4025 FLAG_SET_ERGO(bool, UseLargePagesIndividualAllocation, false); 4026 4027 // Initialize main_process and main_thread 4028 main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle 4029 if (!DuplicateHandle(main_process, GetCurrentThread(), main_process, 4030 &main_thread, THREAD_ALL_ACCESS, false, 0)) { 4031 fatal("DuplicateHandle failed\n"); 4032 } 4033 main_thread_id = (int) GetCurrentThreadId(); 4034 4035 // initialize fast thread access - only used for 32-bit 4036 win32::initialize_thread_ptr_offset(); 4037 } 4038 4039 // To install functions for atexit processing 4040 extern "C" { 4041 static void perfMemory_exit_helper() { 4042 perfMemory_exit(); 4043 } 4044 } 4045 4046 static jint initSock(); 4047 4048 // this is called _after_ the global arguments have been parsed 4049 jint os::init_2(void) { 4050 // Setup Windows Exceptions 4051 4052 // for debugging float code generation bugs 4053 if (ForceFloatExceptions) { 4054 #ifndef _WIN64 4055 static long fp_control_word = 0; 4056 __asm { fstcw fp_control_word } 4057 // see Intel PPro Manual, Vol. 2, p 7-16 4058 const long precision = 0x20; 4059 const long underflow = 0x10; 4060 const long overflow = 0x08; 4061 const long zero_div = 0x04; 4062 const long denorm = 0x02; 4063 const long invalid = 0x01; 4064 fp_control_word |= invalid; 4065 __asm { fldcw fp_control_word } 4066 #endif 4067 } 4068 4069 // If stack_commit_size is 0, windows will reserve the default size, 4070 // but only commit a small portion of it. 4071 size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size()); 4072 size_t default_reserve_size = os::win32::default_stack_size(); 4073 size_t actual_reserve_size = stack_commit_size; 4074 if (stack_commit_size < default_reserve_size) { 4075 // If stack_commit_size == 0, we want this too 4076 actual_reserve_size = default_reserve_size; 4077 } 4078 4079 // Check minimum allowable stack size for thread creation and to initialize 4080 // the java system classes, including StackOverflowError - depends on page 4081 // size. Add two 4K pages for compiler2 recursion in main thread. 4082 // Add in 4*BytesPerWord 4K pages to account for VM stack during 4083 // class initialization depending on 32 or 64 bit VM. 4084 size_t min_stack_allowed = 4085 (size_t)(JavaThread::stack_guard_zone_size() + 4086 JavaThread::stack_shadow_zone_size() + 4087 (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K); 4088 4089 min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size()); 4090 4091 if (actual_reserve_size < min_stack_allowed) { 4092 tty->print_cr("\nThe Java thread stack size specified is too small. " 4093 "Specify at least %dk", 4094 min_stack_allowed / K); 4095 return JNI_ERR; 4096 } 4097 4098 JavaThread::set_stack_size_at_create(stack_commit_size); 4099 4100 // Calculate theoretical max. size of Threads to guard gainst artifical 4101 // out-of-memory situations, where all available address-space has been 4102 // reserved by thread stacks. 4103 assert(actual_reserve_size != 0, "Must have a stack"); 4104 4105 // Calculate the thread limit when we should start doing Virtual Memory 4106 // banging. Currently when the threads will have used all but 200Mb of space. 4107 // 4108 // TODO: consider performing a similar calculation for commit size instead 4109 // as reserve size, since on a 64-bit platform we'll run into that more 4110 // often than running out of virtual memory space. We can use the 4111 // lower value of the two calculations as the os_thread_limit. 4112 size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K); 4113 win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size); 4114 4115 // at exit methods are called in the reverse order of their registration. 4116 // there is no limit to the number of functions registered. atexit does 4117 // not set errno. 4118 4119 if (PerfAllowAtExitRegistration) { 4120 // only register atexit functions if PerfAllowAtExitRegistration is set. 4121 // atexit functions can be delayed until process exit time, which 4122 // can be problematic for embedded VM situations. Embedded VMs should 4123 // call DestroyJavaVM() to assure that VM resources are released. 4124 4125 // note: perfMemory_exit_helper atexit function may be removed in 4126 // the future if the appropriate cleanup code can be added to the 4127 // VM_Exit VMOperation's doit method. 4128 if (atexit(perfMemory_exit_helper) != 0) { 4129 warning("os::init_2 atexit(perfMemory_exit_helper) failed"); 4130 } 4131 } 4132 4133 #ifndef _WIN64 4134 // Print something if NX is enabled (win32 on AMD64) 4135 NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection()); 4136 #endif 4137 4138 // initialize thread priority policy 4139 prio_init(); 4140 4141 if (UseNUMA && !ForceNUMA) { 4142 UseNUMA = false; // We don't fully support this yet 4143 } 4144 4145 if (UseNUMAInterleaving) { 4146 // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag 4147 bool success = numa_interleaving_init(); 4148 if (!success) UseNUMAInterleaving = false; 4149 } 4150 4151 if (initSock() != JNI_OK) { 4152 return JNI_ERR; 4153 } 4154 4155 SymbolEngine::recalc_search_path(); 4156 4157 // Initialize data for jdk.internal.misc.Signal 4158 if (!ReduceSignalUsage) { 4159 jdk_misc_signal_init(); 4160 } 4161 4162 return JNI_OK; 4163 } 4164 4165 // Mark the polling page as unreadable 4166 void os::make_polling_page_unreadable(void) { 4167 DWORD old_status; 4168 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4169 PAGE_NOACCESS, &old_status)) { 4170 fatal("Could not disable polling page"); 4171 } 4172 } 4173 4174 // Mark the polling page as readable 4175 void os::make_polling_page_readable(void) { 4176 DWORD old_status; 4177 if (!VirtualProtect((char *)_polling_page, os::vm_page_size(), 4178 PAGE_READONLY, &old_status)) { 4179 fatal("Could not enable polling page"); 4180 } 4181 } 4182 4183 // combine the high and low DWORD into a ULONGLONG 4184 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) { 4185 ULONGLONG value = high_word; 4186 value <<= sizeof(high_word) * 8; 4187 value |= low_word; 4188 return value; 4189 } 4190 4191 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat 4192 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) { 4193 ::memset((void*)sbuf, 0, sizeof(struct stat)); 4194 sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow); 4195 sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime, 4196 file_data.ftLastWriteTime.dwLowDateTime); 4197 sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime, 4198 file_data.ftCreationTime.dwLowDateTime); 4199 sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime, 4200 file_data.ftLastAccessTime.dwLowDateTime); 4201 if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) { 4202 sbuf->st_mode |= S_IFDIR; 4203 } else { 4204 sbuf->st_mode |= S_IFREG; 4205 } 4206 } 4207 4208 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c 4209 // Creates an UNC path from a single byte path. Return buffer is 4210 // allocated in C heap and needs to be freed by the caller. 4211 // Returns NULL on error. 4212 static wchar_t* create_unc_path(const char* path, errno_t &err) { 4213 wchar_t* wpath = NULL; 4214 size_t converted_chars = 0; 4215 size_t path_len = strlen(path) + 1; // includes the terminating NULL 4216 if (path[0] == '\\' && path[1] == '\\') { 4217 if (path[2] == '?' && path[3] == '\\'){ 4218 // if it already has a \\?\ don't do the prefix 4219 wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal); 4220 if (wpath != NULL) { 4221 err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len); 4222 } else { 4223 err = ENOMEM; 4224 } 4225 } else { 4226 // only UNC pathname includes double slashes here 4227 wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal); 4228 if (wpath != NULL) { 4229 ::wcscpy(wpath, L"\\\\?\\UNC\0"); 4230 err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len); 4231 } else { 4232 err = ENOMEM; 4233 } 4234 } 4235 } else { 4236 wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal); 4237 if (wpath != NULL) { 4238 ::wcscpy(wpath, L"\\\\?\\\0"); 4239 err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len); 4240 } else { 4241 err = ENOMEM; 4242 } 4243 } 4244 return wpath; 4245 } 4246 4247 static void destroy_unc_path(wchar_t* wpath) { 4248 os::free(wpath); 4249 } 4250 4251 int os::stat(const char *path, struct stat *sbuf) { 4252 char* pathbuf = (char*)os::strdup(path, mtInternal); 4253 if (pathbuf == NULL) { 4254 errno = ENOMEM; 4255 return -1; 4256 } 4257 os::native_path(pathbuf); 4258 int ret; 4259 WIN32_FILE_ATTRIBUTE_DATA file_data; 4260 // Not using stat() to avoid the problem described in JDK-6539723 4261 if (strlen(path) < MAX_PATH) { 4262 BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data); 4263 if (!bret) { 4264 errno = ::GetLastError(); 4265 ret = -1; 4266 } 4267 else { 4268 file_attribute_data_to_stat(sbuf, file_data); 4269 ret = 0; 4270 } 4271 } else { 4272 errno_t err = ERROR_SUCCESS; 4273 wchar_t* wpath = create_unc_path(pathbuf, err); 4274 if (err != ERROR_SUCCESS) { 4275 if (wpath != NULL) { 4276 destroy_unc_path(wpath); 4277 } 4278 os::free(pathbuf); 4279 errno = err; 4280 return -1; 4281 } 4282 BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data); 4283 if (!bret) { 4284 errno = ::GetLastError(); 4285 ret = -1; 4286 } else { 4287 file_attribute_data_to_stat(sbuf, file_data); 4288 ret = 0; 4289 } 4290 destroy_unc_path(wpath); 4291 } 4292 os::free(pathbuf); 4293 return ret; 4294 } 4295 4296 4297 #define FT2INT64(ft) \ 4298 ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime)) 4299 4300 4301 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool) 4302 // are used by JVM M&M and JVMTI to get user+sys or user CPU time 4303 // of a thread. 4304 // 4305 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns 4306 // the fast estimate available on the platform. 4307 4308 // current_thread_cpu_time() is not optimized for Windows yet 4309 jlong os::current_thread_cpu_time() { 4310 // return user + sys since the cost is the same 4311 return os::thread_cpu_time(Thread::current(), true /* user+sys */); 4312 } 4313 4314 jlong os::thread_cpu_time(Thread* thread) { 4315 // consistent with what current_thread_cpu_time() returns. 4316 return os::thread_cpu_time(thread, true /* user+sys */); 4317 } 4318 4319 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { 4320 return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); 4321 } 4322 4323 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) { 4324 // This code is copy from clasic VM -> hpi::sysThreadCPUTime 4325 // If this function changes, os::is_thread_cpu_time_supported() should too 4326 FILETIME CreationTime; 4327 FILETIME ExitTime; 4328 FILETIME KernelTime; 4329 FILETIME UserTime; 4330 4331 if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime, 4332 &ExitTime, &KernelTime, &UserTime) == 0) { 4333 return -1; 4334 } else if (user_sys_cpu_time) { 4335 return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100; 4336 } else { 4337 return FT2INT64(UserTime) * 100; 4338 } 4339 } 4340 4341 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4342 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4343 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4344 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4345 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4346 } 4347 4348 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) { 4349 info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits 4350 info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time 4351 info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time 4352 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned 4353 } 4354 4355 bool os::is_thread_cpu_time_supported() { 4356 // see os::thread_cpu_time 4357 FILETIME CreationTime; 4358 FILETIME ExitTime; 4359 FILETIME KernelTime; 4360 FILETIME UserTime; 4361 4362 if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime, 4363 &KernelTime, &UserTime) == 0) { 4364 return false; 4365 } else { 4366 return true; 4367 } 4368 } 4369 4370 // Windows does't provide a loadavg primitive so this is stubbed out for now. 4371 // It does have primitives (PDH API) to get CPU usage and run queue length. 4372 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length" 4373 // If we wanted to implement loadavg on Windows, we have a few options: 4374 // 4375 // a) Query CPU usage and run queue length and "fake" an answer by 4376 // returning the CPU usage if it's under 100%, and the run queue 4377 // length otherwise. It turns out that querying is pretty slow 4378 // on Windows, on the order of 200 microseconds on a fast machine. 4379 // Note that on the Windows the CPU usage value is the % usage 4380 // since the last time the API was called (and the first call 4381 // returns 100%), so we'd have to deal with that as well. 4382 // 4383 // b) Sample the "fake" answer using a sampling thread and store 4384 // the answer in a global variable. The call to loadavg would 4385 // just return the value of the global, avoiding the slow query. 4386 // 4387 // c) Sample a better answer using exponential decay to smooth the 4388 // value. This is basically the algorithm used by UNIX kernels. 4389 // 4390 // Note that sampling thread starvation could affect both (b) and (c). 4391 int os::loadavg(double loadavg[], int nelem) { 4392 return -1; 4393 } 4394 4395 4396 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield() 4397 bool os::dont_yield() { 4398 return DontYieldALot; 4399 } 4400 4401 // This method is a slightly reworked copy of JDK's sysOpen 4402 // from src/windows/hpi/src/sys_api_md.c 4403 4404 int os::open(const char *path, int oflag, int mode) { 4405 char* pathbuf = (char*)os::strdup(path, mtInternal); 4406 if (pathbuf == NULL) { 4407 errno = ENOMEM; 4408 return -1; 4409 } 4410 os::native_path(pathbuf); 4411 int ret; 4412 if (strlen(path) < MAX_PATH) { 4413 ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode); 4414 } else { 4415 errno_t err = ERROR_SUCCESS; 4416 wchar_t* wpath = create_unc_path(pathbuf, err); 4417 if (err != ERROR_SUCCESS) { 4418 if (wpath != NULL) { 4419 destroy_unc_path(wpath); 4420 } 4421 os::free(pathbuf); 4422 errno = err; 4423 return -1; 4424 } 4425 ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode); 4426 if (ret == -1) { 4427 errno = ::GetLastError(); 4428 } 4429 destroy_unc_path(wpath); 4430 } 4431 os::free(pathbuf); 4432 return ret; 4433 } 4434 4435 FILE* os::open(int fd, const char* mode) { 4436 return ::_fdopen(fd, mode); 4437 } 4438 4439 // Is a (classpath) directory empty? 4440 bool os::dir_is_empty(const char* path) { 4441 char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal); 4442 if (search_path == NULL) { 4443 errno = ENOMEM; 4444 return false; 4445 } 4446 strcpy(search_path, path); 4447 os::native_path(search_path); 4448 // Append "*", or possibly "\\*", to path 4449 if (search_path[1] == ':' && 4450 (search_path[2] == '\0' || 4451 (search_path[2] == '\\' && search_path[3] == '\0'))) { 4452 // No '\\' needed for cases like "Z:" or "Z:\" 4453 strcat(search_path, "*"); 4454 } 4455 else { 4456 strcat(search_path, "\\*"); 4457 } 4458 errno_t err = ERROR_SUCCESS; 4459 wchar_t* wpath = create_unc_path(search_path, err); 4460 if (err != ERROR_SUCCESS) { 4461 if (wpath != NULL) { 4462 destroy_unc_path(wpath); 4463 } 4464 os::free(search_path); 4465 errno = err; 4466 return false; 4467 } 4468 WIN32_FIND_DATAW fd; 4469 HANDLE f = ::FindFirstFileW(wpath, &fd); 4470 destroy_unc_path(wpath); 4471 bool is_empty = true; 4472 if (f != INVALID_HANDLE_VALUE) { 4473 while (is_empty && ::FindNextFileW(f, &fd)) { 4474 // An empty directory contains only the current directory file 4475 // and the previous directory file. 4476 if ((wcscmp(fd.cFileName, L".") != 0) && 4477 (wcscmp(fd.cFileName, L"..") != 0)) { 4478 is_empty = false; 4479 } 4480 } 4481 FindClose(f); 4482 } 4483 os::free(search_path); 4484 return is_empty; 4485 } 4486 4487 // create binary file, rewriting existing file if required 4488 int os::create_binary_file(const char* path, bool rewrite_existing) { 4489 int oflags = _O_CREAT | _O_WRONLY | _O_BINARY; 4490 if (!rewrite_existing) { 4491 oflags |= _O_EXCL; 4492 } 4493 return ::open(path, oflags, _S_IREAD | _S_IWRITE); 4494 } 4495 4496 // return current position of file pointer 4497 jlong os::current_file_offset(int fd) { 4498 return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR); 4499 } 4500 4501 // move file pointer to the specified offset 4502 jlong os::seek_to_file_offset(int fd, jlong offset) { 4503 return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET); 4504 } 4505 4506 4507 jlong os::lseek(int fd, jlong offset, int whence) { 4508 return (jlong) ::_lseeki64(fd, offset, whence); 4509 } 4510 4511 size_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) { 4512 OVERLAPPED ov; 4513 DWORD nread; 4514 BOOL result; 4515 4516 ZeroMemory(&ov, sizeof(ov)); 4517 ov.Offset = (DWORD)offset; 4518 ov.OffsetHigh = (DWORD)(offset >> 32); 4519 4520 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4521 4522 result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov); 4523 4524 return result ? nread : 0; 4525 } 4526 4527 4528 // This method is a slightly reworked copy of JDK's sysNativePath 4529 // from src/windows/hpi/src/path_md.c 4530 4531 // Convert a pathname to native format. On win32, this involves forcing all 4532 // separators to be '\\' rather than '/' (both are legal inputs, but Win95 4533 // sometimes rejects '/') and removing redundant separators. The input path is 4534 // assumed to have been converted into the character encoding used by the local 4535 // system. Because this might be a double-byte encoding, care is taken to 4536 // treat double-byte lead characters correctly. 4537 // 4538 // This procedure modifies the given path in place, as the result is never 4539 // longer than the original. There is no error return; this operation always 4540 // succeeds. 4541 char * os::native_path(char *path) { 4542 char *src = path, *dst = path, *end = path; 4543 char *colon = NULL; // If a drive specifier is found, this will 4544 // point to the colon following the drive letter 4545 4546 // Assumption: '/', '\\', ':', and drive letters are never lead bytes 4547 assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\')) 4548 && (!::IsDBCSLeadByte(':'))), "Illegal lead byte"); 4549 4550 // Check for leading separators 4551 #define isfilesep(c) ((c) == '/' || (c) == '\\') 4552 while (isfilesep(*src)) { 4553 src++; 4554 } 4555 4556 if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') { 4557 // Remove leading separators if followed by drive specifier. This 4558 // hack is necessary to support file URLs containing drive 4559 // specifiers (e.g., "file://c:/path"). As a side effect, 4560 // "/c:/path" can be used as an alternative to "c:/path". 4561 *dst++ = *src++; 4562 colon = dst; 4563 *dst++ = ':'; 4564 src++; 4565 } else { 4566 src = path; 4567 if (isfilesep(src[0]) && isfilesep(src[1])) { 4568 // UNC pathname: Retain first separator; leave src pointed at 4569 // second separator so that further separators will be collapsed 4570 // into the second separator. The result will be a pathname 4571 // beginning with "\\\\" followed (most likely) by a host name. 4572 src = dst = path + 1; 4573 path[0] = '\\'; // Force first separator to '\\' 4574 } 4575 } 4576 4577 end = dst; 4578 4579 // Remove redundant separators from remainder of path, forcing all 4580 // separators to be '\\' rather than '/'. Also, single byte space 4581 // characters are removed from the end of the path because those 4582 // are not legal ending characters on this operating system. 4583 // 4584 while (*src != '\0') { 4585 if (isfilesep(*src)) { 4586 *dst++ = '\\'; src++; 4587 while (isfilesep(*src)) src++; 4588 if (*src == '\0') { 4589 // Check for trailing separator 4590 end = dst; 4591 if (colon == dst - 2) break; // "z:\\" 4592 if (dst == path + 1) break; // "\\" 4593 if (dst == path + 2 && isfilesep(path[0])) { 4594 // "\\\\" is not collapsed to "\\" because "\\\\" marks the 4595 // beginning of a UNC pathname. Even though it is not, by 4596 // itself, a valid UNC pathname, we leave it as is in order 4597 // to be consistent with the path canonicalizer as well 4598 // as the win32 APIs, which treat this case as an invalid 4599 // UNC pathname rather than as an alias for the root 4600 // directory of the current drive. 4601 break; 4602 } 4603 end = --dst; // Path does not denote a root directory, so 4604 // remove trailing separator 4605 break; 4606 } 4607 end = dst; 4608 } else { 4609 if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character 4610 *dst++ = *src++; 4611 if (*src) *dst++ = *src++; 4612 end = dst; 4613 } else { // Copy a single-byte character 4614 char c = *src++; 4615 *dst++ = c; 4616 // Space is not a legal ending character 4617 if (c != ' ') end = dst; 4618 } 4619 } 4620 } 4621 4622 *end = '\0'; 4623 4624 // For "z:", add "." to work around a bug in the C runtime library 4625 if (colon == dst - 1) { 4626 path[2] = '.'; 4627 path[3] = '\0'; 4628 } 4629 4630 return path; 4631 } 4632 4633 // This code is a copy of JDK's sysSetLength 4634 // from src/windows/hpi/src/sys_api_md.c 4635 4636 int os::ftruncate(int fd, jlong length) { 4637 HANDLE h = (HANDLE)::_get_osfhandle(fd); 4638 long high = (long)(length >> 32); 4639 DWORD ret; 4640 4641 if (h == (HANDLE)(-1)) { 4642 return -1; 4643 } 4644 4645 ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN); 4646 if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) { 4647 return -1; 4648 } 4649 4650 if (::SetEndOfFile(h) == FALSE) { 4651 return -1; 4652 } 4653 4654 return 0; 4655 } 4656 4657 int os::get_fileno(FILE* fp) { 4658 return _fileno(fp); 4659 } 4660 4661 // This code is a copy of JDK's sysSync 4662 // from src/windows/hpi/src/sys_api_md.c 4663 // except for the legacy workaround for a bug in Win 98 4664 4665 int os::fsync(int fd) { 4666 HANDLE handle = (HANDLE)::_get_osfhandle(fd); 4667 4668 if ((!::FlushFileBuffers(handle)) && 4669 (GetLastError() != ERROR_ACCESS_DENIED)) { 4670 // from winerror.h 4671 return -1; 4672 } 4673 return 0; 4674 } 4675 4676 static int nonSeekAvailable(int, long *); 4677 static int stdinAvailable(int, long *); 4678 4679 #define S_ISCHR(mode) (((mode) & _S_IFCHR) == _S_IFCHR) 4680 #define S_ISFIFO(mode) (((mode) & _S_IFIFO) == _S_IFIFO) 4681 4682 // This code is a copy of JDK's sysAvailable 4683 // from src/windows/hpi/src/sys_api_md.c 4684 4685 int os::available(int fd, jlong *bytes) { 4686 jlong cur, end; 4687 struct _stati64 stbuf64; 4688 4689 if (::_fstati64(fd, &stbuf64) >= 0) { 4690 int mode = stbuf64.st_mode; 4691 if (S_ISCHR(mode) || S_ISFIFO(mode)) { 4692 int ret; 4693 long lpbytes; 4694 if (fd == 0) { 4695 ret = stdinAvailable(fd, &lpbytes); 4696 } else { 4697 ret = nonSeekAvailable(fd, &lpbytes); 4698 } 4699 (*bytes) = (jlong)(lpbytes); 4700 return ret; 4701 } 4702 if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) { 4703 return FALSE; 4704 } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) { 4705 return FALSE; 4706 } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) { 4707 return FALSE; 4708 } 4709 *bytes = end - cur; 4710 return TRUE; 4711 } else { 4712 return FALSE; 4713 } 4714 } 4715 4716 void os::flockfile(FILE* fp) { 4717 _lock_file(fp); 4718 } 4719 4720 void os::funlockfile(FILE* fp) { 4721 _unlock_file(fp); 4722 } 4723 4724 // This code is a copy of JDK's nonSeekAvailable 4725 // from src/windows/hpi/src/sys_api_md.c 4726 4727 static int nonSeekAvailable(int fd, long *pbytes) { 4728 // This is used for available on non-seekable devices 4729 // (like both named and anonymous pipes, such as pipes 4730 // connected to an exec'd process). 4731 // Standard Input is a special case. 4732 HANDLE han; 4733 4734 if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) { 4735 return FALSE; 4736 } 4737 4738 if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) { 4739 // PeekNamedPipe fails when at EOF. In that case we 4740 // simply make *pbytes = 0 which is consistent with the 4741 // behavior we get on Solaris when an fd is at EOF. 4742 // The only alternative is to raise an Exception, 4743 // which isn't really warranted. 4744 // 4745 if (::GetLastError() != ERROR_BROKEN_PIPE) { 4746 return FALSE; 4747 } 4748 *pbytes = 0; 4749 } 4750 return TRUE; 4751 } 4752 4753 #define MAX_INPUT_EVENTS 2000 4754 4755 // This code is a copy of JDK's stdinAvailable 4756 // from src/windows/hpi/src/sys_api_md.c 4757 4758 static int stdinAvailable(int fd, long *pbytes) { 4759 HANDLE han; 4760 DWORD numEventsRead = 0; // Number of events read from buffer 4761 DWORD numEvents = 0; // Number of events in buffer 4762 DWORD i = 0; // Loop index 4763 DWORD curLength = 0; // Position marker 4764 DWORD actualLength = 0; // Number of bytes readable 4765 BOOL error = FALSE; // Error holder 4766 INPUT_RECORD *lpBuffer; // Pointer to records of input events 4767 4768 if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) { 4769 return FALSE; 4770 } 4771 4772 // Construct an array of input records in the console buffer 4773 error = ::GetNumberOfConsoleInputEvents(han, &numEvents); 4774 if (error == 0) { 4775 return nonSeekAvailable(fd, pbytes); 4776 } 4777 4778 // lpBuffer must fit into 64K or else PeekConsoleInput fails 4779 if (numEvents > MAX_INPUT_EVENTS) { 4780 numEvents = MAX_INPUT_EVENTS; 4781 } 4782 4783 lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal); 4784 if (lpBuffer == NULL) { 4785 return FALSE; 4786 } 4787 4788 error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead); 4789 if (error == 0) { 4790 os::free(lpBuffer); 4791 return FALSE; 4792 } 4793 4794 // Examine input records for the number of bytes available 4795 for (i=0; i<numEvents; i++) { 4796 if (lpBuffer[i].EventType == KEY_EVENT) { 4797 4798 KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *) 4799 &(lpBuffer[i].Event); 4800 if (keyRecord->bKeyDown == TRUE) { 4801 CHAR *keyPressed = (CHAR *) &(keyRecord->uChar); 4802 curLength++; 4803 if (*keyPressed == '\r') { 4804 actualLength = curLength; 4805 } 4806 } 4807 } 4808 } 4809 4810 if (lpBuffer != NULL) { 4811 os::free(lpBuffer); 4812 } 4813 4814 *pbytes = (long) actualLength; 4815 return TRUE; 4816 } 4817 4818 // Map a block of memory. 4819 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset, 4820 char *addr, size_t bytes, bool read_only, 4821 bool allow_exec) { 4822 HANDLE hFile; 4823 char* base; 4824 4825 hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL, 4826 OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); 4827 if (hFile == NULL) { 4828 log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError()); 4829 return NULL; 4830 } 4831 4832 if (allow_exec) { 4833 // CreateFileMapping/MapViewOfFileEx can't map executable memory 4834 // unless it comes from a PE image (which the shared archive is not.) 4835 // Even VirtualProtect refuses to give execute access to mapped memory 4836 // that was not previously executable. 4837 // 4838 // Instead, stick the executable region in anonymous memory. Yuck. 4839 // Penalty is that ~4 pages will not be shareable - in the future 4840 // we might consider DLLizing the shared archive with a proper PE 4841 // header so that mapping executable + sharing is possible. 4842 4843 base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE, 4844 PAGE_READWRITE); 4845 if (base == NULL) { 4846 log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError()); 4847 CloseHandle(hFile); 4848 return NULL; 4849 } 4850 4851 DWORD bytes_read; 4852 OVERLAPPED overlapped; 4853 overlapped.Offset = (DWORD)file_offset; 4854 overlapped.OffsetHigh = 0; 4855 overlapped.hEvent = NULL; 4856 // ReadFile guarantees that if the return value is true, the requested 4857 // number of bytes were read before returning. 4858 bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0; 4859 if (!res) { 4860 log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError()); 4861 release_memory(base, bytes); 4862 CloseHandle(hFile); 4863 return NULL; 4864 } 4865 } else { 4866 HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0, 4867 NULL /* file_name */); 4868 if (hMap == NULL) { 4869 log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError()); 4870 CloseHandle(hFile); 4871 return NULL; 4872 } 4873 4874 DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY; 4875 base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset, 4876 (DWORD)bytes, addr); 4877 if (base == NULL) { 4878 log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError()); 4879 CloseHandle(hMap); 4880 CloseHandle(hFile); 4881 return NULL; 4882 } 4883 4884 if (CloseHandle(hMap) == 0) { 4885 log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError()); 4886 CloseHandle(hFile); 4887 return base; 4888 } 4889 } 4890 4891 if (allow_exec) { 4892 DWORD old_protect; 4893 DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE; 4894 bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0; 4895 4896 if (!res) { 4897 log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError()); 4898 // Don't consider this a hard error, on IA32 even if the 4899 // VirtualProtect fails, we should still be able to execute 4900 CloseHandle(hFile); 4901 return base; 4902 } 4903 } 4904 4905 if (CloseHandle(hFile) == 0) { 4906 log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError()); 4907 return base; 4908 } 4909 4910 return base; 4911 } 4912 4913 4914 // Remap a block of memory. 4915 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset, 4916 char *addr, size_t bytes, bool read_only, 4917 bool allow_exec) { 4918 // This OS does not allow existing memory maps to be remapped so we 4919 // have to unmap the memory before we remap it. 4920 if (!os::unmap_memory(addr, bytes)) { 4921 return NULL; 4922 } 4923 4924 // There is a very small theoretical window between the unmap_memory() 4925 // call above and the map_memory() call below where a thread in native 4926 // code may be able to access an address that is no longer mapped. 4927 4928 return os::map_memory(fd, file_name, file_offset, addr, bytes, 4929 read_only, allow_exec); 4930 } 4931 4932 4933 // Unmap a block of memory. 4934 // Returns true=success, otherwise false. 4935 4936 bool os::pd_unmap_memory(char* addr, size_t bytes) { 4937 MEMORY_BASIC_INFORMATION mem_info; 4938 if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) { 4939 log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError()); 4940 return false; 4941 } 4942 4943 // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx. 4944 // Instead, executable region was allocated using VirtualAlloc(). See 4945 // pd_map_memory() above. 4946 // 4947 // The following flags should match the 'exec_access' flages used for 4948 // VirtualProtect() in pd_map_memory(). 4949 if (mem_info.Protect == PAGE_EXECUTE_READ || 4950 mem_info.Protect == PAGE_EXECUTE_READWRITE) { 4951 return pd_release_memory(addr, bytes); 4952 } 4953 4954 BOOL result = UnmapViewOfFile(addr); 4955 if (result == 0) { 4956 log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError()); 4957 return false; 4958 } 4959 return true; 4960 } 4961 4962 void os::pause() { 4963 char filename[MAX_PATH]; 4964 if (PauseAtStartupFile && PauseAtStartupFile[0]) { 4965 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile); 4966 } else { 4967 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id()); 4968 } 4969 4970 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666); 4971 if (fd != -1) { 4972 struct stat buf; 4973 ::close(fd); 4974 while (::stat(filename, &buf) == 0) { 4975 Sleep(100); 4976 } 4977 } else { 4978 jio_fprintf(stderr, 4979 "Could not open pause file '%s', continuing immediately.\n", filename); 4980 } 4981 } 4982 4983 Thread* os::ThreadCrashProtection::_protected_thread = NULL; 4984 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL; 4985 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0; 4986 4987 os::ThreadCrashProtection::ThreadCrashProtection() { 4988 } 4989 4990 // See the caveats for this class in os_windows.hpp 4991 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back 4992 // into this method and returns false. If no OS EXCEPTION was raised, returns 4993 // true. 4994 // The callback is supposed to provide the method that should be protected. 4995 // 4996 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) { 4997 4998 Thread::muxAcquire(&_crash_mux, "CrashProtection"); 4999 5000 _protected_thread = Thread::current_or_null(); 5001 assert(_protected_thread != NULL, "Cannot crash protect a NULL thread"); 5002 5003 bool success = true; 5004 __try { 5005 _crash_protection = this; 5006 cb.call(); 5007 } __except(EXCEPTION_EXECUTE_HANDLER) { 5008 // only for protection, nothing to do 5009 success = false; 5010 } 5011 _crash_protection = NULL; 5012 _protected_thread = NULL; 5013 Thread::muxRelease(&_crash_mux); 5014 return success; 5015 } 5016 5017 // An Event wraps a win32 "CreateEvent" kernel handle. 5018 // 5019 // We have a number of choices regarding "CreateEvent" win32 handle leakage: 5020 // 5021 // 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle 5022 // field, and call CloseHandle() on the win32 event handle. Unpark() would 5023 // need to be modified to tolerate finding a NULL (invalid) win32 event handle. 5024 // In addition, an unpark() operation might fetch the handle field, but the 5025 // event could recycle between the fetch and the SetEvent() operation. 5026 // SetEvent() would either fail because the handle was invalid, or inadvertently work, 5027 // as the win32 handle value had been recycled. In an ideal world calling SetEvent() 5028 // on an stale but recycled handle would be harmless, but in practice this might 5029 // confuse other non-Sun code, so it's not a viable approach. 5030 // 5031 // 2: Once a win32 event handle is associated with an Event, it remains associated 5032 // with the Event. The event handle is never closed. This could be construed 5033 // as handle leakage, but only up to the maximum # of threads that have been extant 5034 // at any one time. This shouldn't be an issue, as windows platforms typically 5035 // permit a process to have hundreds of thousands of open handles. 5036 // 5037 // 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList 5038 // and release unused handles. 5039 // 5040 // 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle. 5041 // It's not clear, however, that we wouldn't be trading one type of leak for another. 5042 // 5043 // 5. Use an RCU-like mechanism (Read-Copy Update). 5044 // Or perhaps something similar to Maged Michael's "Hazard pointers". 5045 // 5046 // We use (2). 5047 // 5048 // TODO-FIXME: 5049 // 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation. 5050 // 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks 5051 // to recover from (or at least detect) the dreaded Windows 841176 bug. 5052 // 3. Collapse the interrupt_event, the JSR166 parker event, and the objectmonitor ParkEvent 5053 // into a single win32 CreateEvent() handle. 5054 // 5055 // Assumption: 5056 // Only one parker can exist on an event, which is why we allocate 5057 // them per-thread. Multiple unparkers can coexist. 5058 // 5059 // _Event transitions in park() 5060 // -1 => -1 : illegal 5061 // 1 => 0 : pass - return immediately 5062 // 0 => -1 : block; then set _Event to 0 before returning 5063 // 5064 // _Event transitions in unpark() 5065 // 0 => 1 : just return 5066 // 1 => 1 : just return 5067 // -1 => either 0 or 1; must signal target thread 5068 // That is, we can safely transition _Event from -1 to either 5069 // 0 or 1. 5070 // 5071 // _Event serves as a restricted-range semaphore. 5072 // -1 : thread is blocked, i.e. there is a waiter 5073 // 0 : neutral: thread is running or ready, 5074 // could have been signaled after a wait started 5075 // 1 : signaled - thread is running or ready 5076 // 5077 // Another possible encoding of _Event would be with 5078 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits. 5079 // 5080 5081 int os::PlatformEvent::park(jlong Millis) { 5082 // Transitions for _Event: 5083 // -1 => -1 : illegal 5084 // 1 => 0 : pass - return immediately 5085 // 0 => -1 : block; then set _Event to 0 before returning 5086 5087 guarantee(_ParkHandle != NULL , "Invariant"); 5088 guarantee(Millis > 0 , "Invariant"); 5089 5090 // CONSIDER: defer assigning a CreateEvent() handle to the Event until 5091 // the initial park() operation. 5092 // Consider: use atomic decrement instead of CAS-loop 5093 5094 int v; 5095 for (;;) { 5096 v = _Event; 5097 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5098 } 5099 guarantee((v == 0) || (v == 1), "invariant"); 5100 if (v != 0) return OS_OK; 5101 5102 // Do this the hard way by blocking ... 5103 // TODO: consider a brief spin here, gated on the success of recent 5104 // spin attempts by this thread. 5105 // 5106 // We decompose long timeouts into series of shorter timed waits. 5107 // Evidently large timo values passed in WaitForSingleObject() are problematic on some 5108 // versions of Windows. See EventWait() for details. This may be superstition. Or not. 5109 // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time 5110 // with os::javaTimeNanos(). Furthermore, we assume that spurious returns from 5111 // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend 5112 // to happen early in the wait interval. Specifically, after a spurious wakeup (rv == 5113 // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate 5114 // for the already waited time. This policy does not admit any new outcomes. 5115 // In the future, however, we might want to track the accumulated wait time and 5116 // adjust Millis accordingly if we encounter a spurious wakeup. 5117 5118 const int MAXTIMEOUT = 0x10000000; 5119 DWORD rv = WAIT_TIMEOUT; 5120 while (_Event < 0 && Millis > 0) { 5121 DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT) 5122 if (Millis > MAXTIMEOUT) { 5123 prd = MAXTIMEOUT; 5124 } 5125 rv = ::WaitForSingleObject(_ParkHandle, prd); 5126 assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed"); 5127 if (rv == WAIT_TIMEOUT) { 5128 Millis -= prd; 5129 } 5130 } 5131 v = _Event; 5132 _Event = 0; 5133 // see comment at end of os::PlatformEvent::park() below: 5134 OrderAccess::fence(); 5135 // If we encounter a nearly simultanous timeout expiry and unpark() 5136 // we return OS_OK indicating we awoke via unpark(). 5137 // Implementor's license -- returning OS_TIMEOUT would be equally valid, however. 5138 return (v >= 0) ? OS_OK : OS_TIMEOUT; 5139 } 5140 5141 void os::PlatformEvent::park() { 5142 // Transitions for _Event: 5143 // -1 => -1 : illegal 5144 // 1 => 0 : pass - return immediately 5145 // 0 => -1 : block; then set _Event to 0 before returning 5146 5147 guarantee(_ParkHandle != NULL, "Invariant"); 5148 // Invariant: Only the thread associated with the Event/PlatformEvent 5149 // may call park(). 5150 // Consider: use atomic decrement instead of CAS-loop 5151 int v; 5152 for (;;) { 5153 v = _Event; 5154 if (Atomic::cmpxchg(v-1, &_Event, v) == v) break; 5155 } 5156 guarantee((v == 0) || (v == 1), "invariant"); 5157 if (v != 0) return; 5158 5159 // Do this the hard way by blocking ... 5160 // TODO: consider a brief spin here, gated on the success of recent 5161 // spin attempts by this thread. 5162 while (_Event < 0) { 5163 DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE); 5164 assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed"); 5165 } 5166 5167 // Usually we'll find _Event == 0 at this point, but as 5168 // an optional optimization we clear it, just in case can 5169 // multiple unpark() operations drove _Event up to 1. 5170 _Event = 0; 5171 OrderAccess::fence(); 5172 guarantee(_Event >= 0, "invariant"); 5173 } 5174 5175 void os::PlatformEvent::unpark() { 5176 guarantee(_ParkHandle != NULL, "Invariant"); 5177 5178 // Transitions for _Event: 5179 // 0 => 1 : just return 5180 // 1 => 1 : just return 5181 // -1 => either 0 or 1; must signal target thread 5182 // That is, we can safely transition _Event from -1 to either 5183 // 0 or 1. 5184 // See also: "Semaphores in Plan 9" by Mullender & Cox 5185 // 5186 // Note: Forcing a transition from "-1" to "1" on an unpark() means 5187 // that it will take two back-to-back park() calls for the owning 5188 // thread to block. This has the benefit of forcing a spurious return 5189 // from the first park() call after an unpark() call which will help 5190 // shake out uses of park() and unpark() without condition variables. 5191 5192 if (Atomic::xchg(1, &_Event) >= 0) return; 5193 5194 ::SetEvent(_ParkHandle); 5195 } 5196 5197 5198 // JSR166 5199 // ------------------------------------------------------- 5200 5201 // The Windows implementation of Park is very straightforward: Basic 5202 // operations on Win32 Events turn out to have the right semantics to 5203 // use them directly. We opportunistically resuse the event inherited 5204 // from Monitor. 5205 5206 void Parker::park(bool isAbsolute, jlong time) { 5207 guarantee(_ParkEvent != NULL, "invariant"); 5208 // First, demultiplex/decode time arguments 5209 if (time < 0) { // don't wait 5210 return; 5211 } else if (time == 0 && !isAbsolute) { 5212 time = INFINITE; 5213 } else if (isAbsolute) { 5214 time -= os::javaTimeMillis(); // convert to relative time 5215 if (time <= 0) { // already elapsed 5216 return; 5217 } 5218 } else { // relative 5219 time /= 1000000; // Must coarsen from nanos to millis 5220 if (time == 0) { // Wait for the minimal time unit if zero 5221 time = 1; 5222 } 5223 } 5224 5225 JavaThread* thread = JavaThread::current(); 5226 5227 // Don't wait if interrupted or already triggered 5228 if (Thread::is_interrupted(thread, false) || 5229 WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) { 5230 ResetEvent(_ParkEvent); 5231 return; 5232 } else { 5233 ThreadBlockInVM tbivm(thread); 5234 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */); 5235 thread->set_suspend_equivalent(); 5236 5237 WaitForSingleObject(_ParkEvent, time); 5238 ResetEvent(_ParkEvent); 5239 5240 // If externally suspended while waiting, re-suspend 5241 if (thread->handle_special_suspend_equivalent_condition()) { 5242 thread->java_suspend_self(); 5243 } 5244 } 5245 } 5246 5247 void Parker::unpark() { 5248 guarantee(_ParkEvent != NULL, "invariant"); 5249 SetEvent(_ParkEvent); 5250 } 5251 5252 // Run the specified command in a separate process. Return its exit value, 5253 // or -1 on failure (e.g. can't create a new process). 5254 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) { 5255 STARTUPINFO si; 5256 PROCESS_INFORMATION pi; 5257 DWORD exit_code; 5258 5259 char * cmd_string; 5260 char * cmd_prefix = "cmd /C "; 5261 size_t len = strlen(cmd) + strlen(cmd_prefix) + 1; 5262 cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal); 5263 if (cmd_string == NULL) { 5264 return -1; 5265 } 5266 cmd_string[0] = '\0'; 5267 strcat(cmd_string, cmd_prefix); 5268 strcat(cmd_string, cmd); 5269 5270 // now replace all '\n' with '&' 5271 char * substring = cmd_string; 5272 while ((substring = strchr(substring, '\n')) != NULL) { 5273 substring[0] = '&'; 5274 substring++; 5275 } 5276 memset(&si, 0, sizeof(si)); 5277 si.cb = sizeof(si); 5278 memset(&pi, 0, sizeof(pi)); 5279 BOOL rslt = CreateProcess(NULL, // executable name - use command line 5280 cmd_string, // command line 5281 NULL, // process security attribute 5282 NULL, // thread security attribute 5283 TRUE, // inherits system handles 5284 0, // no creation flags 5285 NULL, // use parent's environment block 5286 NULL, // use parent's starting directory 5287 &si, // (in) startup information 5288 &pi); // (out) process information 5289 5290 if (rslt) { 5291 // Wait until child process exits. 5292 WaitForSingleObject(pi.hProcess, INFINITE); 5293 5294 GetExitCodeProcess(pi.hProcess, &exit_code); 5295 5296 // Close process and thread handles. 5297 CloseHandle(pi.hProcess); 5298 CloseHandle(pi.hThread); 5299 } else { 5300 exit_code = -1; 5301 } 5302 5303 FREE_C_HEAP_ARRAY(char, cmd_string); 5304 return (int)exit_code; 5305 } 5306 5307 bool os::find(address addr, outputStream* st) { 5308 int offset = -1; 5309 bool result = false; 5310 char buf[256]; 5311 if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) { 5312 st->print(PTR_FORMAT " ", addr); 5313 if (strlen(buf) < sizeof(buf) - 1) { 5314 char* p = strrchr(buf, '\\'); 5315 if (p) { 5316 st->print("%s", p + 1); 5317 } else { 5318 st->print("%s", buf); 5319 } 5320 } else { 5321 // The library name is probably truncated. Let's omit the library name. 5322 // See also JDK-8147512. 5323 } 5324 if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) { 5325 st->print("::%s + 0x%x", buf, offset); 5326 } 5327 st->cr(); 5328 result = true; 5329 } 5330 return result; 5331 } 5332 5333 LONG WINAPI os::win32::serialize_fault_filter(struct _EXCEPTION_POINTERS* e) { 5334 DWORD exception_code = e->ExceptionRecord->ExceptionCode; 5335 5336 if (exception_code == EXCEPTION_ACCESS_VIOLATION) { 5337 JavaThread* thread = JavaThread::current(); 5338 PEXCEPTION_RECORD exceptionRecord = e->ExceptionRecord; 5339 address addr = (address) exceptionRecord->ExceptionInformation[1]; 5340 5341 if (os::is_memory_serialize_page(thread, addr)) { 5342 return EXCEPTION_CONTINUE_EXECUTION; 5343 } 5344 } 5345 5346 return EXCEPTION_CONTINUE_SEARCH; 5347 } 5348 5349 static jint initSock() { 5350 WSADATA wsadata; 5351 5352 if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) { 5353 jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n", 5354 ::GetLastError()); 5355 return JNI_ERR; 5356 } 5357 return JNI_OK; 5358 } 5359 5360 struct hostent* os::get_host_by_name(char* name) { 5361 return (struct hostent*)gethostbyname(name); 5362 } 5363 5364 int os::socket_close(int fd) { 5365 return ::closesocket(fd); 5366 } 5367 5368 int os::socket(int domain, int type, int protocol) { 5369 return ::socket(domain, type, protocol); 5370 } 5371 5372 int os::connect(int fd, struct sockaddr* him, socklen_t len) { 5373 return ::connect(fd, him, len); 5374 } 5375 5376 int os::recv(int fd, char* buf, size_t nBytes, uint flags) { 5377 return ::recv(fd, buf, (int)nBytes, flags); 5378 } 5379 5380 int os::send(int fd, char* buf, size_t nBytes, uint flags) { 5381 return ::send(fd, buf, (int)nBytes, flags); 5382 } 5383 5384 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) { 5385 return ::send(fd, buf, (int)nBytes, flags); 5386 } 5387 5388 // WINDOWS CONTEXT Flags for THREAD_SAMPLING 5389 #if defined(IA32) 5390 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS) 5391 #elif defined (AMD64) 5392 #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT) 5393 #endif 5394 5395 // returns true if thread could be suspended, 5396 // false otherwise 5397 static bool do_suspend(HANDLE* h) { 5398 if (h != NULL) { 5399 if (SuspendThread(*h) != ~0) { 5400 return true; 5401 } 5402 } 5403 return false; 5404 } 5405 5406 // resume the thread 5407 // calling resume on an active thread is a no-op 5408 static void do_resume(HANDLE* h) { 5409 if (h != NULL) { 5410 ResumeThread(*h); 5411 } 5412 } 5413 5414 // retrieve a suspend/resume context capable handle 5415 // from the tid. Caller validates handle return value. 5416 void get_thread_handle_for_extended_context(HANDLE* h, 5417 OSThread::thread_id_t tid) { 5418 if (h != NULL) { 5419 *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid); 5420 } 5421 } 5422 5423 // Thread sampling implementation 5424 // 5425 void os::SuspendedThreadTask::internal_do_task() { 5426 CONTEXT ctxt; 5427 HANDLE h = NULL; 5428 5429 // get context capable handle for thread 5430 get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id()); 5431 5432 // sanity 5433 if (h == NULL || h == INVALID_HANDLE_VALUE) { 5434 return; 5435 } 5436 5437 // suspend the thread 5438 if (do_suspend(&h)) { 5439 ctxt.ContextFlags = sampling_context_flags; 5440 // get thread context 5441 GetThreadContext(h, &ctxt); 5442 SuspendedThreadTaskContext context(_thread, &ctxt); 5443 // pass context to Thread Sampling impl 5444 do_task(context); 5445 // resume thread 5446 do_resume(&h); 5447 } 5448 5449 // close handle 5450 CloseHandle(h); 5451 } 5452 5453 bool os::start_debugging(char *buf, int buflen) { 5454 int len = (int)strlen(buf); 5455 char *p = &buf[len]; 5456 5457 jio_snprintf(p, buflen-len, 5458 "\n\n" 5459 "Do you want to debug the problem?\n\n" 5460 "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n" 5461 "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n" 5462 "Otherwise, select 'No' to abort...", 5463 os::current_process_id(), os::current_thread_id()); 5464 5465 bool yes = os::message_box("Unexpected Error", buf); 5466 5467 if (yes) { 5468 // os::breakpoint() calls DebugBreak(), which causes a breakpoint 5469 // exception. If VM is running inside a debugger, the debugger will 5470 // catch the exception. Otherwise, the breakpoint exception will reach 5471 // the default windows exception handler, which can spawn a debugger and 5472 // automatically attach to the dying VM. 5473 os::breakpoint(); 5474 yes = false; 5475 } 5476 return yes; 5477 } 5478 5479 void* os::get_default_process_handle() { 5480 return (void*)GetModuleHandle(NULL); 5481 } 5482 5483 // Builds a platform dependent Agent_OnLoad_<lib_name> function name 5484 // which is used to find statically linked in agents. 5485 // Additionally for windows, takes into account __stdcall names. 5486 // Parameters: 5487 // sym_name: Symbol in library we are looking for 5488 // lib_name: Name of library to look in, NULL for shared libs. 5489 // is_absolute_path == true if lib_name is absolute path to agent 5490 // such as "C:/a/b/L.dll" 5491 // == false if only the base name of the library is passed in 5492 // such as "L" 5493 char* os::build_agent_function_name(const char *sym_name, const char *lib_name, 5494 bool is_absolute_path) { 5495 char *agent_entry_name; 5496 size_t len; 5497 size_t name_len; 5498 size_t prefix_len = strlen(JNI_LIB_PREFIX); 5499 size_t suffix_len = strlen(JNI_LIB_SUFFIX); 5500 const char *start; 5501 5502 if (lib_name != NULL) { 5503 len = name_len = strlen(lib_name); 5504 if (is_absolute_path) { 5505 // Need to strip path, prefix and suffix 5506 if ((start = strrchr(lib_name, *os::file_separator())) != NULL) { 5507 lib_name = ++start; 5508 } else { 5509 // Need to check for drive prefix 5510 if ((start = strchr(lib_name, ':')) != NULL) { 5511 lib_name = ++start; 5512 } 5513 } 5514 if (len <= (prefix_len + suffix_len)) { 5515 return NULL; 5516 } 5517 lib_name += prefix_len; 5518 name_len = strlen(lib_name) - suffix_len; 5519 } 5520 } 5521 len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2; 5522 agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread); 5523 if (agent_entry_name == NULL) { 5524 return NULL; 5525 } 5526 if (lib_name != NULL) { 5527 const char *p = strrchr(sym_name, '@'); 5528 if (p != NULL && p != sym_name) { 5529 // sym_name == _Agent_OnLoad@XX 5530 strncpy(agent_entry_name, sym_name, (p - sym_name)); 5531 agent_entry_name[(p-sym_name)] = '\0'; 5532 // agent_entry_name == _Agent_OnLoad 5533 strcat(agent_entry_name, "_"); 5534 strncat(agent_entry_name, lib_name, name_len); 5535 strcat(agent_entry_name, p); 5536 // agent_entry_name == _Agent_OnLoad_lib_name@XX 5537 } else { 5538 strcpy(agent_entry_name, sym_name); 5539 strcat(agent_entry_name, "_"); 5540 strncat(agent_entry_name, lib_name, name_len); 5541 } 5542 } else { 5543 strcpy(agent_entry_name, sym_name); 5544 } 5545 return agent_entry_name; 5546 } 5547 5548 #ifndef PRODUCT 5549 5550 // test the code path in reserve_memory_special() that tries to allocate memory in a single 5551 // contiguous memory block at a particular address. 5552 // The test first tries to find a good approximate address to allocate at by using the same 5553 // method to allocate some memory at any address. The test then tries to allocate memory in 5554 // the vicinity (not directly after it to avoid possible by-chance use of that location) 5555 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of 5556 // the previously allocated memory is available for allocation. The only actual failure 5557 // that is reported is when the test tries to allocate at a particular location but gets a 5558 // different valid one. A NULL return value at this point is not considered an error but may 5559 // be legitimate. 5560 void TestReserveMemorySpecial_test() { 5561 if (!UseLargePages) { 5562 return; 5563 } 5564 // save current value of globals 5565 bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation; 5566 bool old_use_numa_interleaving = UseNUMAInterleaving; 5567 5568 // set globals to make sure we hit the correct code path 5569 UseLargePagesIndividualAllocation = UseNUMAInterleaving = false; 5570 5571 // do an allocation at an address selected by the OS to get a good one. 5572 const size_t large_allocation_size = os::large_page_size() * 4; 5573 char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false); 5574 if (result == NULL) { 5575 } else { 5576 os::release_memory_special(result, large_allocation_size); 5577 5578 // allocate another page within the recently allocated memory area which seems to be a good location. At least 5579 // we managed to get it once. 5580 const size_t expected_allocation_size = os::large_page_size(); 5581 char* expected_location = result + os::large_page_size(); 5582 char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false); 5583 if (actual_location == NULL) { 5584 } else { 5585 // release memory 5586 os::release_memory_special(actual_location, expected_allocation_size); 5587 // only now check, after releasing any memory to avoid any leaks. 5588 assert(actual_location == expected_location, 5589 "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead", 5590 expected_location, expected_allocation_size, actual_location); 5591 } 5592 } 5593 5594 // restore globals 5595 UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation; 5596 UseNUMAInterleaving = old_use_numa_interleaving; 5597 } 5598 #endif // PRODUCT 5599 5600 /* 5601 All the defined signal names for Windows. 5602 5603 NOTE that not all of these names are accepted by FindSignal! 5604 5605 For various reasons some of these may be rejected at runtime. 5606 5607 Here are the names currently accepted by a user of sun.misc.Signal with 5608 1.4.1 (ignoring potential interaction with use of chaining, etc): 5609 5610 (LIST TBD) 5611 5612 */ 5613 int os::get_signal_number(const char* name) { 5614 static const struct { 5615 char* name; 5616 int number; 5617 } siglabels [] = 5618 // derived from version 6.0 VC98/include/signal.h 5619 {"ABRT", SIGABRT, // abnormal termination triggered by abort cl 5620 "FPE", SIGFPE, // floating point exception 5621 "SEGV", SIGSEGV, // segment violation 5622 "INT", SIGINT, // interrupt 5623 "TERM", SIGTERM, // software term signal from kill 5624 "BREAK", SIGBREAK, // Ctrl-Break sequence 5625 "ILL", SIGILL}; // illegal instruction 5626 for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) { 5627 if (strcmp(name, siglabels[i].name) == 0) { 5628 return siglabels[i].number; 5629 } 5630 } 5631 return -1; 5632 } 5633 5634 // Fast current thread access 5635 5636 int os::win32::_thread_ptr_offset = 0; 5637 5638 static void call_wrapper_dummy() {} 5639 5640 // We need to call the os_exception_wrapper once so that it sets 5641 // up the offset from FS of the thread pointer. 5642 void os::win32::initialize_thread_ptr_offset() { 5643 os::os_exception_wrapper((java_call_t)call_wrapper_dummy, 5644 NULL, NULL, NULL, NULL); 5645 }