1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/nativeInst.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "logging/log.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "os_share_windows.hpp"
  45 #include "os_windows.inline.hpp"
  46 #include "prims/jniFastGetField.hpp"
  47 #include "prims/jvm_misc.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "runtime/extendedPC.hpp"
  51 #include "runtime/globals.hpp"
  52 #include "runtime/interfaceSupport.inline.hpp"
  53 #include "runtime/java.hpp"
  54 #include "runtime/javaCalls.hpp"
  55 #include "runtime/mutexLocker.hpp"
  56 #include "runtime/objectMonitor.hpp"
  57 #include "runtime/orderAccess.hpp"
  58 #include "runtime/osThread.hpp"
  59 #include "runtime/perfMemory.hpp"
  60 #include "runtime/safepointMechanism.hpp"
  61 #include "runtime/sharedRuntime.hpp"
  62 #include "runtime/statSampler.hpp"
  63 #include "runtime/stubRoutines.hpp"
  64 #include "runtime/thread.inline.hpp"
  65 #include "runtime/threadCritical.hpp"
  66 #include "runtime/timer.hpp"
  67 #include "runtime/vm_version.hpp"
  68 #include "services/attachListener.hpp"
  69 #include "services/memTracker.hpp"
  70 #include "services/runtimeService.hpp"
  71 #include "utilities/align.hpp"
  72 #include "utilities/decoder.hpp"
  73 #include "utilities/defaultStream.hpp"
  74 #include "utilities/events.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/vmError.hpp"
  77 #include "symbolengine.hpp"
  78 #include "windbghelp.hpp"
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 #include <windows.h>
  85 #include <sys/types.h>
  86 #include <sys/stat.h>
  87 #include <sys/timeb.h>
  88 #include <objidl.h>
  89 #include <shlobj.h>
  90 
  91 #include <malloc.h>
  92 #include <signal.h>
  93 #include <direct.h>
  94 #include <errno.h>
  95 #include <fcntl.h>
  96 #include <io.h>
  97 #include <process.h>              // For _beginthreadex(), _endthreadex()
  98 #include <imagehlp.h>             // For os::dll_address_to_function_name
  99 // for enumerating dll libraries
 100 #include <vdmdbg.h>
 101 #include <psapi.h>
 102 #include <mmsystem.h>
 103 #include <winsock2.h>
 104 
 105 // for timer info max values which include all bits
 106 #define ALL_64_BITS CONST64(-1)
 107 
 108 // For DLL loading/load error detection
 109 // Values of PE COFF
 110 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 111 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 112 
 113 static HANDLE main_process;
 114 static HANDLE main_thread;
 115 static int    main_thread_id;
 116 
 117 static FILETIME process_creation_time;
 118 static FILETIME process_exit_time;
 119 static FILETIME process_user_time;
 120 static FILETIME process_kernel_time;
 121 
 122 #if defined(_M_ARM64)
 123   #define __CPU__ aarch64
 124 #elif defined(_M_AMD64)
 125   #define __CPU__ amd64
 126 #else
 127   #define __CPU__ i486
 128 #endif
 129 
 130 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
 131 PVOID  topLevelVectoredExceptionHandler = NULL;
 132 #elif INCLUDE_AOT
 133 PVOID  topLevelVectoredExceptionHandler = NULL;
 134 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 135 #endif
 136 
 137 // save DLL module handle, used by GetModuleFileName
 138 
 139 HINSTANCE vm_lib_handle;
 140 
 141 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 142   switch (reason) {
 143   case DLL_PROCESS_ATTACH:
 144     vm_lib_handle = hinst;
 145     if (ForceTimeHighResolution) {
 146       timeBeginPeriod(1L);
 147     }
 148     WindowsDbgHelp::pre_initialize();
 149     SymbolEngine::pre_initialize();
 150     break;
 151   case DLL_PROCESS_DETACH:
 152     if (ForceTimeHighResolution) {
 153       timeEndPeriod(1L);
 154     }
 155 #if defined(USE_VECTORED_EXCEPTION_HANDLING) || INCLUDE_AOT
 156     if (topLevelVectoredExceptionHandler != NULL) {
 157       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 158       topLevelVectoredExceptionHandler = NULL;
 159     }
 160 #endif
 161     break;
 162   default:
 163     break;
 164   }
 165   return true;
 166 }
 167 
 168 static inline double fileTimeAsDouble(FILETIME* time) {
 169   const double high  = (double) ((unsigned int) ~0);
 170   const double split = 10000000.0;
 171   double result = (time->dwLowDateTime / split) +
 172                    time->dwHighDateTime * (high/split);
 173   return result;
 174 }
 175 
 176 // Implementation of os
 177 
 178 bool os::unsetenv(const char* name) {
 179   assert(name != NULL, "Null pointer");
 180   return (SetEnvironmentVariable(name, NULL) == TRUE);
 181 }
 182 
 183 // No setuid programs under Windows.
 184 bool os::have_special_privileges() {
 185   return false;
 186 }
 187 
 188 
 189 // This method is  a periodic task to check for misbehaving JNI applications
 190 // under CheckJNI, we can add any periodic checks here.
 191 // For Windows at the moment does nothing
 192 void os::run_periodic_checks() {
 193   return;
 194 }
 195 
 196 // previous UnhandledExceptionFilter, if there is one
 197 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 198 
 199 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 200 
 201 void os::init_system_properties_values() {
 202   // sysclasspath, java_home, dll_dir
 203   {
 204     char *home_path;
 205     char *dll_path;
 206     char *pslash;
 207     const char *bin = "\\bin";
 208     char home_dir[MAX_PATH + 1];
 209     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 210 
 211     if (alt_home_dir != NULL)  {
 212       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 213       home_dir[MAX_PATH] = '\0';
 214     } else {
 215       os::jvm_path(home_dir, sizeof(home_dir));
 216       // Found the full path to jvm.dll.
 217       // Now cut the path to <java_home>/jre if we can.
 218       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 219       pslash = strrchr(home_dir, '\\');
 220       if (pslash != NULL) {
 221         *pslash = '\0';                   // get rid of \{client|server}
 222         pslash = strrchr(home_dir, '\\');
 223         if (pslash != NULL) {
 224           *pslash = '\0';                 // get rid of \bin
 225         }
 226       }
 227     }
 228 
 229     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 230     strcpy(home_path, home_dir);
 231     Arguments::set_java_home(home_path);
 232     FREE_C_HEAP_ARRAY(char, home_path);
 233 
 234     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 235                                 mtInternal);
 236     strcpy(dll_path, home_dir);
 237     strcat(dll_path, bin);
 238     Arguments::set_dll_dir(dll_path);
 239     FREE_C_HEAP_ARRAY(char, dll_path);
 240 
 241     if (!set_boot_path('\\', ';')) {
 242       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 243     }
 244   }
 245 
 246 // library_path
 247 #define EXT_DIR "\\lib\\ext"
 248 #define BIN_DIR "\\bin"
 249 #define PACKAGE_DIR "\\Sun\\Java"
 250   {
 251     // Win32 library search order (See the documentation for LoadLibrary):
 252     //
 253     // 1. The directory from which application is loaded.
 254     // 2. The system wide Java Extensions directory (Java only)
 255     // 3. System directory (GetSystemDirectory)
 256     // 4. Windows directory (GetWindowsDirectory)
 257     // 5. The PATH environment variable
 258     // 6. The current directory
 259 
 260     char *library_path;
 261     char tmp[MAX_PATH];
 262     char *path_str = ::getenv("PATH");
 263 
 264     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 265                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 266 
 267     library_path[0] = '\0';
 268 
 269     GetModuleFileName(NULL, tmp, sizeof(tmp));
 270     *(strrchr(tmp, '\\')) = '\0';
 271     strcat(library_path, tmp);
 272 
 273     GetWindowsDirectory(tmp, sizeof(tmp));
 274     strcat(library_path, ";");
 275     strcat(library_path, tmp);
 276     strcat(library_path, PACKAGE_DIR BIN_DIR);
 277 
 278     GetSystemDirectory(tmp, sizeof(tmp));
 279     strcat(library_path, ";");
 280     strcat(library_path, tmp);
 281 
 282     GetWindowsDirectory(tmp, sizeof(tmp));
 283     strcat(library_path, ";");
 284     strcat(library_path, tmp);
 285 
 286     if (path_str) {
 287       strcat(library_path, ";");
 288       strcat(library_path, path_str);
 289     }
 290 
 291     strcat(library_path, ";.");
 292 
 293     Arguments::set_library_path(library_path);
 294     FREE_C_HEAP_ARRAY(char, library_path);
 295   }
 296 
 297   // Default extensions directory
 298   {
 299     char path[MAX_PATH];
 300     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 301     GetWindowsDirectory(path, MAX_PATH);
 302     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 303             path, PACKAGE_DIR, EXT_DIR);
 304     Arguments::set_ext_dirs(buf);
 305   }
 306   #undef EXT_DIR
 307   #undef BIN_DIR
 308   #undef PACKAGE_DIR
 309 
 310 #ifndef _WIN64
 311   // set our UnhandledExceptionFilter and save any previous one
 312   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 313 #endif
 314 
 315   // Done
 316   return;
 317 }
 318 
 319 void os::breakpoint() {
 320   DebugBreak();
 321 }
 322 
 323 // Invoked from the BREAKPOINT Macro
 324 extern "C" void breakpoint() {
 325   os::breakpoint();
 326 }
 327 
 328 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 329 // So far, this method is only used by Native Memory Tracking, which is
 330 // only supported on Windows XP or later.
 331 //
 332 int os::get_native_stack(address* stack, int frames, int toSkip) {
 333   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 334   for (int index = captured; index < frames; index ++) {
 335     stack[index] = NULL;
 336   }
 337   return captured;
 338 }
 339 
 340 
 341 // os::current_stack_base()
 342 //
 343 //   Returns the base of the stack, which is the stack's
 344 //   starting address.  This function must be called
 345 //   while running on the stack of the thread being queried.
 346 
 347 address os::current_stack_base() {
 348   MEMORY_BASIC_INFORMATION minfo;
 349   address stack_bottom;
 350   size_t stack_size;
 351 
 352   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 353   stack_bottom =  (address)minfo.AllocationBase;
 354   stack_size = minfo.RegionSize;
 355 
 356   // Add up the sizes of all the regions with the same
 357   // AllocationBase.
 358   while (1) {
 359     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 360     if (stack_bottom == (address)minfo.AllocationBase) {
 361       stack_size += minfo.RegionSize;
 362     } else {
 363       break;
 364     }
 365   }
 366   return stack_bottom + stack_size;
 367 }
 368 
 369 size_t os::current_stack_size() {
 370   size_t sz;
 371   MEMORY_BASIC_INFORMATION minfo;
 372   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 373   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 374   return sz;
 375 }
 376 
 377 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 378   MEMORY_BASIC_INFORMATION minfo;
 379   committed_start = NULL;
 380   committed_size = 0;
 381   address top = start + size;
 382   const address start_addr = start;
 383   while (start < top) {
 384     VirtualQuery(start, &minfo, sizeof(minfo));
 385     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 386       if (committed_start != NULL) {
 387         break;
 388       }
 389     } else {  // committed
 390       if (committed_start == NULL) {
 391         committed_start = start;
 392       }
 393       size_t offset = start - (address)minfo.BaseAddress;
 394       committed_size += minfo.RegionSize - offset;
 395     }
 396     start = (address)minfo.BaseAddress + minfo.RegionSize;
 397   }
 398 
 399   if (committed_start == NULL) {
 400     assert(committed_size == 0, "Sanity");
 401     return false;
 402   } else {
 403     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 404     // current region may go beyond the limit, trim to the limit
 405     committed_size = MIN2(committed_size, size_t(top - committed_start));
 406     return true;
 407   }
 408 }
 409 
 410 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 411   const struct tm* time_struct_ptr = localtime(clock);
 412   if (time_struct_ptr != NULL) {
 413     *res = *time_struct_ptr;
 414     return res;
 415   }
 416   return NULL;
 417 }
 418 
 419 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 420   const struct tm* time_struct_ptr = gmtime(clock);
 421   if (time_struct_ptr != NULL) {
 422     *res = *time_struct_ptr;
 423     return res;
 424   }
 425   return NULL;
 426 }
 427 
 428 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 429 
 430 // Thread start routine for all newly created threads
 431 static unsigned __stdcall thread_native_entry(Thread* thread) {
 432 
 433   thread->record_stack_base_and_size();
 434 
 435   // Try to randomize the cache line index of hot stack frames.
 436   // This helps when threads of the same stack traces evict each other's
 437   // cache lines. The threads can be either from the same JVM instance, or
 438   // from different JVM instances. The benefit is especially true for
 439   // processors with hyperthreading technology.
 440   static int counter = 0;
 441   int pid = os::current_process_id();
 442   _alloca(((pid ^ counter++) & 7) * 128);
 443 
 444   thread->initialize_thread_current();
 445 
 446   OSThread* osthr = thread->osthread();
 447   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 448 
 449   if (UseNUMA) {
 450     int lgrp_id = os::numa_get_group_id();
 451     if (lgrp_id != -1) {
 452       thread->set_lgrp_id(lgrp_id);
 453     }
 454   }
 455 
 456   // Diagnostic code to investigate JDK-6573254
 457   int res = 30115;  // non-java thread
 458   if (thread->is_Java_thread()) {
 459     res = 20115;    // java thread
 460   }
 461 
 462   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 463 
 464 #ifdef USE_VECTORED_EXCEPTION_HANDLING
 465   // Any exception is caught by the Vectored Exception Handler, so VM can
 466   // generate error dump when an exception occurred in non-Java thread
 467   // (e.g. VM thread).
 468   thread->call_run();
 469 #else
 470   // Install a win32 structured exception handler around every thread created
 471   // by VM, so VM can generate error dump when an exception occurred in non-
 472   // Java thread (e.g. VM thread).
 473   __try {
 474     thread->call_run();
 475   } __except(topLevelExceptionFilter(
 476                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 477     // Nothing to do.
 478   }
 479 #endif
 480 
 481   // Note: at this point the thread object may already have deleted itself.
 482   // Do not dereference it from here on out.
 483 
 484   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 485 
 486   // One less thread is executing
 487   // When the VMThread gets here, the main thread may have already exited
 488   // which frees the CodeHeap containing the Atomic::add code
 489   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 490     Atomic::dec(&os::win32::_os_thread_count);
 491   }
 492 
 493   // Thread must not return from exit_process_or_thread(), but if it does,
 494   // let it proceed to exit normally
 495   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 496 }
 497 
 498 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 499                                   int thread_id) {
 500   // Allocate the OSThread object
 501   OSThread* osthread = new OSThread(NULL, NULL);
 502   if (osthread == NULL) return NULL;
 503 
 504   // Initialize the JDK library's interrupt event.
 505   // This should really be done when OSThread is constructed,
 506   // but there is no way for a constructor to report failure to
 507   // allocate the event.
 508   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 509   if (interrupt_event == NULL) {
 510     delete osthread;
 511     return NULL;
 512   }
 513   osthread->set_interrupt_event(interrupt_event);
 514 
 515   // Store info on the Win32 thread into the OSThread
 516   osthread->set_thread_handle(thread_handle);
 517   osthread->set_thread_id(thread_id);
 518 
 519   if (UseNUMA) {
 520     int lgrp_id = os::numa_get_group_id();
 521     if (lgrp_id != -1) {
 522       thread->set_lgrp_id(lgrp_id);
 523     }
 524   }
 525 
 526   // Initial thread state is INITIALIZED, not SUSPENDED
 527   osthread->set_state(INITIALIZED);
 528 
 529   return osthread;
 530 }
 531 
 532 
 533 bool os::create_attached_thread(JavaThread* thread) {
 534 #ifdef ASSERT
 535   thread->verify_not_published();
 536 #endif
 537   HANDLE thread_h;
 538   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 539                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 540     fatal("DuplicateHandle failed\n");
 541   }
 542   OSThread* osthread = create_os_thread(thread, thread_h,
 543                                         (int)current_thread_id());
 544   if (osthread == NULL) {
 545     return false;
 546   }
 547 
 548   // Initial thread state is RUNNABLE
 549   osthread->set_state(RUNNABLE);
 550 
 551   thread->set_osthread(osthread);
 552 
 553   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 554     os::current_thread_id());
 555 
 556   return true;
 557 }
 558 
 559 bool os::create_main_thread(JavaThread* thread) {
 560 #ifdef ASSERT
 561   thread->verify_not_published();
 562 #endif
 563   if (_starting_thread == NULL) {
 564     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 565     if (_starting_thread == NULL) {
 566       return false;
 567     }
 568   }
 569 
 570   // The primordial thread is runnable from the start)
 571   _starting_thread->set_state(RUNNABLE);
 572 
 573   thread->set_osthread(_starting_thread);
 574   return true;
 575 }
 576 
 577 // Helper function to trace _beginthreadex attributes,
 578 //  similar to os::Posix::describe_pthread_attr()
 579 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 580                                                size_t stacksize, unsigned initflag) {
 581   stringStream ss(buf, buflen);
 582   if (stacksize == 0) {
 583     ss.print("stacksize: default, ");
 584   } else {
 585     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 586   }
 587   ss.print("flags: ");
 588   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 589   #define ALL(X) \
 590     X(CREATE_SUSPENDED) \
 591     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 592   ALL(PRINT_FLAG)
 593   #undef ALL
 594   #undef PRINT_FLAG
 595   return buf;
 596 }
 597 
 598 // Allocate and initialize a new OSThread
 599 bool os::create_thread(Thread* thread, ThreadType thr_type,
 600                        size_t stack_size) {
 601   unsigned thread_id;
 602 
 603   // Allocate the OSThread object
 604   OSThread* osthread = new OSThread(NULL, NULL);
 605   if (osthread == NULL) {
 606     return false;
 607   }
 608 
 609   // Initialize the JDK library's interrupt event.
 610   // This should really be done when OSThread is constructed,
 611   // but there is no way for a constructor to report failure to
 612   // allocate the event.
 613   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 614   if (interrupt_event == NULL) {
 615     delete osthread;
 616     return false;
 617   }
 618   osthread->set_interrupt_event(interrupt_event);
 619   // We don't call set_interrupted(false) as it will trip the assert in there
 620   // as we are not operating on the current thread. We don't need to call it
 621   // because the initial state is already correct.
 622 
 623   thread->set_osthread(osthread);
 624 
 625   if (stack_size == 0) {
 626     switch (thr_type) {
 627     case os::java_thread:
 628       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 629       if (JavaThread::stack_size_at_create() > 0) {
 630         stack_size = JavaThread::stack_size_at_create();
 631       }
 632       break;
 633     case os::compiler_thread:
 634       if (CompilerThreadStackSize > 0) {
 635         stack_size = (size_t)(CompilerThreadStackSize * K);
 636         break;
 637       } // else fall through:
 638         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 639     case os::vm_thread:
 640     case os::pgc_thread:
 641     case os::cgc_thread:
 642     case os::watcher_thread:
 643       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 644       break;
 645     }
 646   }
 647 
 648   // Create the Win32 thread
 649   //
 650   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 651   // does not specify stack size. Instead, it specifies the size of
 652   // initially committed space. The stack size is determined by
 653   // PE header in the executable. If the committed "stack_size" is larger
 654   // than default value in the PE header, the stack is rounded up to the
 655   // nearest multiple of 1MB. For example if the launcher has default
 656   // stack size of 320k, specifying any size less than 320k does not
 657   // affect the actual stack size at all, it only affects the initial
 658   // commitment. On the other hand, specifying 'stack_size' larger than
 659   // default value may cause significant increase in memory usage, because
 660   // not only the stack space will be rounded up to MB, but also the
 661   // entire space is committed upfront.
 662   //
 663   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 664   // for CreateThread() that can treat 'stack_size' as stack size. However we
 665   // are not supposed to call CreateThread() directly according to MSDN
 666   // document because JVM uses C runtime library. The good news is that the
 667   // flag appears to work with _beginthredex() as well.
 668 
 669   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 670   HANDLE thread_handle =
 671     (HANDLE)_beginthreadex(NULL,
 672                            (unsigned)stack_size,
 673                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 674                            thread,
 675                            initflag,
 676                            &thread_id);
 677 
 678   char buf[64];
 679   if (thread_handle != NULL) {
 680     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 681       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 682   } else {
 683     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 684       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 685     // Log some OS information which might explain why creating the thread failed.
 686     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 687     LogStream st(Log(os, thread)::info());
 688     os::print_memory_info(&st);
 689   }
 690 
 691   if (thread_handle == NULL) {
 692     // Need to clean up stuff we've allocated so far
 693     thread->set_osthread(NULL);
 694     delete osthread;
 695     return false;
 696   }
 697 
 698   Atomic::inc(&os::win32::_os_thread_count);
 699 
 700   // Store info on the Win32 thread into the OSThread
 701   osthread->set_thread_handle(thread_handle);
 702   osthread->set_thread_id(thread_id);
 703 
 704   // Initial thread state is INITIALIZED, not SUSPENDED
 705   osthread->set_state(INITIALIZED);
 706 
 707   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 708   return true;
 709 }
 710 
 711 
 712 // Free Win32 resources related to the OSThread
 713 void os::free_thread(OSThread* osthread) {
 714   assert(osthread != NULL, "osthread not set");
 715 
 716   // We are told to free resources of the argument thread,
 717   // but we can only really operate on the current thread.
 718   assert(Thread::current()->osthread() == osthread,
 719          "os::free_thread but not current thread");
 720 
 721   CloseHandle(osthread->thread_handle());
 722   delete osthread;
 723 }
 724 
 725 static jlong first_filetime;
 726 static jlong initial_performance_count;
 727 static jlong performance_frequency;
 728 
 729 
 730 jlong as_long(LARGE_INTEGER x) {
 731   jlong result = 0; // initialization to avoid warning
 732   set_high(&result, x.HighPart);
 733   set_low(&result, x.LowPart);
 734   return result;
 735 }
 736 
 737 
 738 jlong os::elapsed_counter() {
 739   LARGE_INTEGER count;
 740   QueryPerformanceCounter(&count);
 741   return as_long(count) - initial_performance_count;
 742 }
 743 
 744 
 745 jlong os::elapsed_frequency() {
 746   return performance_frequency;
 747 }
 748 
 749 
 750 julong os::available_memory() {
 751   return win32::available_memory();
 752 }
 753 
 754 julong os::win32::available_memory() {
 755   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 756   // value if total memory is larger than 4GB
 757   MEMORYSTATUSEX ms;
 758   ms.dwLength = sizeof(ms);
 759   GlobalMemoryStatusEx(&ms);
 760 
 761   return (julong)ms.ullAvailPhys;
 762 }
 763 
 764 julong os::physical_memory() {
 765   return win32::physical_memory();
 766 }
 767 
 768 bool os::has_allocatable_memory_limit(julong* limit) {
 769   MEMORYSTATUSEX ms;
 770   ms.dwLength = sizeof(ms);
 771   GlobalMemoryStatusEx(&ms);
 772 #ifdef _LP64
 773   *limit = (julong)ms.ullAvailVirtual;
 774   return true;
 775 #else
 776   // Limit to 1400m because of the 2gb address space wall
 777   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 778   return true;
 779 #endif
 780 }
 781 
 782 int os::active_processor_count() {
 783   // User has overridden the number of active processors
 784   if (ActiveProcessorCount > 0) {
 785     log_trace(os)("active_processor_count: "
 786                   "active processor count set by user : %d",
 787                   ActiveProcessorCount);
 788     return ActiveProcessorCount;
 789   }
 790 
 791   DWORD_PTR lpProcessAffinityMask = 0;
 792   DWORD_PTR lpSystemAffinityMask = 0;
 793   int proc_count = processor_count();
 794   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 795       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 796     // Nof active processors is number of bits in process affinity mask
 797     int bitcount = 0;
 798     while (lpProcessAffinityMask != 0) {
 799       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 800       bitcount++;
 801     }
 802     return bitcount;
 803   } else {
 804     return proc_count;
 805   }
 806 }
 807 
 808 uint os::processor_id() {
 809   return (uint)GetCurrentProcessorNumber();
 810 }
 811 
 812 void os::set_native_thread_name(const char *name) {
 813 
 814   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 815   //
 816   // Note that unfortunately this only works if the process
 817   // is already attached to a debugger; debugger must observe
 818   // the exception below to show the correct name.
 819 
 820   // If there is no debugger attached skip raising the exception
 821   if (!IsDebuggerPresent()) {
 822     return;
 823   }
 824 
 825   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 826   struct {
 827     DWORD dwType;     // must be 0x1000
 828     LPCSTR szName;    // pointer to name (in user addr space)
 829     DWORD dwThreadID; // thread ID (-1=caller thread)
 830     DWORD dwFlags;    // reserved for future use, must be zero
 831   } info;
 832 
 833   info.dwType = 0x1000;
 834   info.szName = name;
 835   info.dwThreadID = -1;
 836   info.dwFlags = 0;
 837 
 838   __try {
 839     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 840   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 841 }
 842 
 843 bool os::bind_to_processor(uint processor_id) {
 844   // Not yet implemented.
 845   return false;
 846 }
 847 
 848 void os::win32::initialize_performance_counter() {
 849   LARGE_INTEGER count;
 850   QueryPerformanceFrequency(&count);
 851   performance_frequency = as_long(count);
 852   QueryPerformanceCounter(&count);
 853   initial_performance_count = as_long(count);
 854 }
 855 
 856 
 857 double os::elapsedTime() {
 858   return (double) elapsed_counter() / (double) elapsed_frequency();
 859 }
 860 
 861 
 862 // Windows format:
 863 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 864 // Java format:
 865 //   Java standards require the number of milliseconds since 1/1/1970
 866 
 867 // Constant offset - calculated using offset()
 868 static jlong  _offset   = 116444736000000000;
 869 // Fake time counter for reproducible results when debugging
 870 static jlong  fake_time = 0;
 871 
 872 #ifdef ASSERT
 873 // Just to be safe, recalculate the offset in debug mode
 874 static jlong _calculated_offset = 0;
 875 static int   _has_calculated_offset = 0;
 876 
 877 jlong offset() {
 878   if (_has_calculated_offset) return _calculated_offset;
 879   SYSTEMTIME java_origin;
 880   java_origin.wYear          = 1970;
 881   java_origin.wMonth         = 1;
 882   java_origin.wDayOfWeek     = 0; // ignored
 883   java_origin.wDay           = 1;
 884   java_origin.wHour          = 0;
 885   java_origin.wMinute        = 0;
 886   java_origin.wSecond        = 0;
 887   java_origin.wMilliseconds  = 0;
 888   FILETIME jot;
 889   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 890     fatal("Error = %d\nWindows error", GetLastError());
 891   }
 892   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 893   _has_calculated_offset = 1;
 894   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 895   return _calculated_offset;
 896 }
 897 #else
 898 jlong offset() {
 899   return _offset;
 900 }
 901 #endif
 902 
 903 jlong windows_to_java_time(FILETIME wt) {
 904   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 905   return (a - offset()) / 10000;
 906 }
 907 
 908 // Returns time ticks in (10th of micro seconds)
 909 jlong windows_to_time_ticks(FILETIME wt) {
 910   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 911   return (a - offset());
 912 }
 913 
 914 FILETIME java_to_windows_time(jlong l) {
 915   jlong a = (l * 10000) + offset();
 916   FILETIME result;
 917   result.dwHighDateTime = high(a);
 918   result.dwLowDateTime  = low(a);
 919   return result;
 920 }
 921 
 922 bool os::supports_vtime() { return true; }
 923 
 924 double os::elapsedVTime() {
 925   FILETIME created;
 926   FILETIME exited;
 927   FILETIME kernel;
 928   FILETIME user;
 929   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 930     // the resolution of windows_to_java_time() should be sufficient (ms)
 931     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 932   } else {
 933     return elapsedTime();
 934   }
 935 }
 936 
 937 jlong os::javaTimeMillis() {
 938   FILETIME wt;
 939   GetSystemTimeAsFileTime(&wt);
 940   return windows_to_java_time(wt);
 941 }
 942 
 943 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 944   FILETIME wt;
 945   GetSystemTimeAsFileTime(&wt);
 946   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 947   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 948   seconds = secs;
 949   nanos = jlong(ticks - (secs*10000000)) * 100;
 950 }
 951 
 952 jlong os::javaTimeNanos() {
 953     LARGE_INTEGER current_count;
 954     QueryPerformanceCounter(&current_count);
 955     double current = as_long(current_count);
 956     double freq = performance_frequency;
 957     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 958     return time;
 959 }
 960 
 961 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 962   jlong freq = performance_frequency;
 963   if (freq < NANOSECS_PER_SEC) {
 964     // the performance counter is 64 bits and we will
 965     // be multiplying it -- so no wrap in 64 bits
 966     info_ptr->max_value = ALL_64_BITS;
 967   } else if (freq > NANOSECS_PER_SEC) {
 968     // use the max value the counter can reach to
 969     // determine the max value which could be returned
 970     julong max_counter = (julong)ALL_64_BITS;
 971     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 972   } else {
 973     // the performance counter is 64 bits and we will
 974     // be using it directly -- so no wrap in 64 bits
 975     info_ptr->max_value = ALL_64_BITS;
 976   }
 977 
 978   // using a counter, so no skipping
 979   info_ptr->may_skip_backward = false;
 980   info_ptr->may_skip_forward = false;
 981 
 982   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 983 }
 984 
 985 char* os::local_time_string(char *buf, size_t buflen) {
 986   SYSTEMTIME st;
 987   GetLocalTime(&st);
 988   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 989                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 990   return buf;
 991 }
 992 
 993 bool os::getTimesSecs(double* process_real_time,
 994                       double* process_user_time,
 995                       double* process_system_time) {
 996   HANDLE h_process = GetCurrentProcess();
 997   FILETIME create_time, exit_time, kernel_time, user_time;
 998   BOOL result = GetProcessTimes(h_process,
 999                                 &create_time,
1000                                 &exit_time,
1001                                 &kernel_time,
1002                                 &user_time);
1003   if (result != 0) {
1004     FILETIME wt;
1005     GetSystemTimeAsFileTime(&wt);
1006     jlong rtc_millis = windows_to_java_time(wt);
1007     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1008     *process_user_time =
1009       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1010     *process_system_time =
1011       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1012     return true;
1013   } else {
1014     return false;
1015   }
1016 }
1017 
1018 void os::shutdown() {
1019   // allow PerfMemory to attempt cleanup of any persistent resources
1020   perfMemory_exit();
1021 
1022   // flush buffered output, finish log files
1023   ostream_abort();
1024 
1025   // Check for abort hook
1026   abort_hook_t abort_hook = Arguments::abort_hook();
1027   if (abort_hook != NULL) {
1028     abort_hook();
1029   }
1030 }
1031 
1032 
1033 static HANDLE dumpFile = NULL;
1034 
1035 // Check if dump file can be created.
1036 void os::check_dump_limit(char* buffer, size_t buffsz) {
1037   bool status = true;
1038   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1039     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1040     status = false;
1041   }
1042 
1043 #ifndef ASSERT
1044   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1045     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1046     status = false;
1047   }
1048 #endif
1049 
1050   if (status) {
1051     const char* cwd = get_current_directory(NULL, 0);
1052     int pid = current_process_id();
1053     if (cwd != NULL) {
1054       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1055     } else {
1056       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1057     }
1058 
1059     if (dumpFile == NULL &&
1060        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1061                  == INVALID_HANDLE_VALUE) {
1062       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1063       status = false;
1064     }
1065   }
1066   VMError::record_coredump_status(buffer, status);
1067 }
1068 
1069 void os::abort(bool dump_core, void* siginfo, const void* context) {
1070   EXCEPTION_POINTERS ep;
1071   MINIDUMP_EXCEPTION_INFORMATION mei;
1072   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1073 
1074   HANDLE hProcess = GetCurrentProcess();
1075   DWORD processId = GetCurrentProcessId();
1076   MINIDUMP_TYPE dumpType;
1077 
1078   shutdown();
1079   if (!dump_core || dumpFile == NULL) {
1080     if (dumpFile != NULL) {
1081       CloseHandle(dumpFile);
1082     }
1083     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1084   }
1085 
1086   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1087     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1088 
1089   if (siginfo != NULL && context != NULL) {
1090     ep.ContextRecord = (PCONTEXT) context;
1091     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1092 
1093     mei.ThreadId = GetCurrentThreadId();
1094     mei.ExceptionPointers = &ep;
1095     pmei = &mei;
1096   } else {
1097     pmei = NULL;
1098   }
1099 
1100   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1101   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1102   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1103       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1104     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1105   }
1106   CloseHandle(dumpFile);
1107   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1108 }
1109 
1110 // Die immediately, no exit hook, no abort hook, no cleanup.
1111 void os::die() {
1112   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1113 }
1114 
1115 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1116 //  * dirent_md.c       1.15 00/02/02
1117 //
1118 // The declarations for DIR and struct dirent are in jvm_win32.h.
1119 
1120 // Caller must have already run dirname through JVM_NativePath, which removes
1121 // duplicate slashes and converts all instances of '/' into '\\'.
1122 
1123 DIR * os::opendir(const char *dirname) {
1124   assert(dirname != NULL, "just checking");   // hotspot change
1125   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1126   DWORD fattr;                                // hotspot change
1127   char alt_dirname[4] = { 0, 0, 0, 0 };
1128 
1129   if (dirp == 0) {
1130     errno = ENOMEM;
1131     return 0;
1132   }
1133 
1134   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1135   // as a directory in FindFirstFile().  We detect this case here and
1136   // prepend the current drive name.
1137   //
1138   if (dirname[1] == '\0' && dirname[0] == '\\') {
1139     alt_dirname[0] = _getdrive() + 'A' - 1;
1140     alt_dirname[1] = ':';
1141     alt_dirname[2] = '\\';
1142     alt_dirname[3] = '\0';
1143     dirname = alt_dirname;
1144   }
1145 
1146   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1147   if (dirp->path == 0) {
1148     free(dirp);
1149     errno = ENOMEM;
1150     return 0;
1151   }
1152   strcpy(dirp->path, dirname);
1153 
1154   fattr = GetFileAttributes(dirp->path);
1155   if (fattr == 0xffffffff) {
1156     free(dirp->path);
1157     free(dirp);
1158     errno = ENOENT;
1159     return 0;
1160   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1161     free(dirp->path);
1162     free(dirp);
1163     errno = ENOTDIR;
1164     return 0;
1165   }
1166 
1167   // Append "*.*", or possibly "\\*.*", to path
1168   if (dirp->path[1] == ':' &&
1169       (dirp->path[2] == '\0' ||
1170       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1171     // No '\\' needed for cases like "Z:" or "Z:\"
1172     strcat(dirp->path, "*.*");
1173   } else {
1174     strcat(dirp->path, "\\*.*");
1175   }
1176 
1177   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1178   if (dirp->handle == INVALID_HANDLE_VALUE) {
1179     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1180       free(dirp->path);
1181       free(dirp);
1182       errno = EACCES;
1183       return 0;
1184     }
1185   }
1186   return dirp;
1187 }
1188 
1189 struct dirent * os::readdir(DIR *dirp) {
1190   assert(dirp != NULL, "just checking");      // hotspot change
1191   if (dirp->handle == INVALID_HANDLE_VALUE) {
1192     return NULL;
1193   }
1194 
1195   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1196 
1197   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1198     if (GetLastError() == ERROR_INVALID_HANDLE) {
1199       errno = EBADF;
1200       return NULL;
1201     }
1202     FindClose(dirp->handle);
1203     dirp->handle = INVALID_HANDLE_VALUE;
1204   }
1205 
1206   return &dirp->dirent;
1207 }
1208 
1209 int os::closedir(DIR *dirp) {
1210   assert(dirp != NULL, "just checking");      // hotspot change
1211   if (dirp->handle != INVALID_HANDLE_VALUE) {
1212     if (!FindClose(dirp->handle)) {
1213       errno = EBADF;
1214       return -1;
1215     }
1216     dirp->handle = INVALID_HANDLE_VALUE;
1217   }
1218   free(dirp->path);
1219   free(dirp);
1220   return 0;
1221 }
1222 
1223 // This must be hard coded because it's the system's temporary
1224 // directory not the java application's temp directory, ala java.io.tmpdir.
1225 const char* os::get_temp_directory() {
1226   static char path_buf[MAX_PATH];
1227   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1228     return path_buf;
1229   } else {
1230     path_buf[0] = '\0';
1231     return path_buf;
1232   }
1233 }
1234 
1235 // Needs to be in os specific directory because windows requires another
1236 // header file <direct.h>
1237 const char* os::get_current_directory(char *buf, size_t buflen) {
1238   int n = static_cast<int>(buflen);
1239   if (buflen > INT_MAX)  n = INT_MAX;
1240   return _getcwd(buf, n);
1241 }
1242 
1243 //-----------------------------------------------------------
1244 // Helper functions for fatal error handler
1245 #ifdef _WIN64
1246 // Helper routine which returns true if address in
1247 // within the NTDLL address space.
1248 //
1249 static bool _addr_in_ntdll(address addr) {
1250   HMODULE hmod;
1251   MODULEINFO minfo;
1252 
1253   hmod = GetModuleHandle("NTDLL.DLL");
1254   if (hmod == NULL) return false;
1255   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1256                                           &minfo, sizeof(MODULEINFO))) {
1257     return false;
1258   }
1259 
1260   if ((addr >= minfo.lpBaseOfDll) &&
1261       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1262     return true;
1263   } else {
1264     return false;
1265   }
1266 }
1267 #endif
1268 
1269 struct _modinfo {
1270   address addr;
1271   char*   full_path;   // point to a char buffer
1272   int     buflen;      // size of the buffer
1273   address base_addr;
1274 };
1275 
1276 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1277                                   address top_address, void * param) {
1278   struct _modinfo *pmod = (struct _modinfo *)param;
1279   if (!pmod) return -1;
1280 
1281   if (base_addr   <= pmod->addr &&
1282       top_address > pmod->addr) {
1283     // if a buffer is provided, copy path name to the buffer
1284     if (pmod->full_path) {
1285       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1286     }
1287     pmod->base_addr = base_addr;
1288     return 1;
1289   }
1290   return 0;
1291 }
1292 
1293 bool os::dll_address_to_library_name(address addr, char* buf,
1294                                      int buflen, int* offset) {
1295   // buf is not optional, but offset is optional
1296   assert(buf != NULL, "sanity check");
1297 
1298 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1299 //       return the full path to the DLL file, sometimes it returns path
1300 //       to the corresponding PDB file (debug info); sometimes it only
1301 //       returns partial path, which makes life painful.
1302 
1303   struct _modinfo mi;
1304   mi.addr      = addr;
1305   mi.full_path = buf;
1306   mi.buflen    = buflen;
1307   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1308     // buf already contains path name
1309     if (offset) *offset = addr - mi.base_addr;
1310     return true;
1311   }
1312 
1313   buf[0] = '\0';
1314   if (offset) *offset = -1;
1315   return false;
1316 }
1317 
1318 bool os::dll_address_to_function_name(address addr, char *buf,
1319                                       int buflen, int *offset,
1320                                       bool demangle) {
1321   // buf is not optional, but offset is optional
1322   assert(buf != NULL, "sanity check");
1323 
1324   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1325     return true;
1326   }
1327   if (offset != NULL)  *offset  = -1;
1328   buf[0] = '\0';
1329   return false;
1330 }
1331 
1332 // save the start and end address of jvm.dll into param[0] and param[1]
1333 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1334                            address top_address, void * param) {
1335   if (!param) return -1;
1336 
1337   if (base_addr   <= (address)_locate_jvm_dll &&
1338       top_address > (address)_locate_jvm_dll) {
1339     ((address*)param)[0] = base_addr;
1340     ((address*)param)[1] = top_address;
1341     return 1;
1342   }
1343   return 0;
1344 }
1345 
1346 address vm_lib_location[2];    // start and end address of jvm.dll
1347 
1348 // check if addr is inside jvm.dll
1349 bool os::address_is_in_vm(address addr) {
1350   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1351     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1352       assert(false, "Can't find jvm module.");
1353       return false;
1354     }
1355   }
1356 
1357   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1358 }
1359 
1360 // print module info; param is outputStream*
1361 static int _print_module(const char* fname, address base_address,
1362                          address top_address, void* param) {
1363   if (!param) return -1;
1364 
1365   outputStream* st = (outputStream*)param;
1366 
1367   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1368   return 0;
1369 }
1370 
1371 // Loads .dll/.so and
1372 // in case of error it checks if .dll/.so was built for the
1373 // same architecture as Hotspot is running on
1374 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1375   log_info(os)("attempting shared library load of %s", name);
1376 
1377   void * result = LoadLibrary(name);
1378   if (result != NULL) {
1379     Events::log(NULL, "Loaded shared library %s", name);
1380     // Recalculate pdb search path if a DLL was loaded successfully.
1381     SymbolEngine::recalc_search_path();
1382     log_info(os)("shared library load of %s was successful", name);
1383     return result;
1384   }
1385   DWORD errcode = GetLastError();
1386   // Read system error message into ebuf
1387   // It may or may not be overwritten below (in the for loop and just above)
1388   lasterror(ebuf, (size_t) ebuflen);
1389   ebuf[ebuflen - 1] = '\0';
1390   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1391   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1392 
1393   if (errcode == ERROR_MOD_NOT_FOUND) {
1394     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1395     ebuf[ebuflen - 1] = '\0';
1396     return NULL;
1397   }
1398 
1399   // Parsing dll below
1400   // If we can read dll-info and find that dll was built
1401   // for an architecture other than Hotspot is running in
1402   // - then print to buffer "DLL was built for a different architecture"
1403   // else call os::lasterror to obtain system error message
1404   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1405   if (fd < 0) {
1406     return NULL;
1407   }
1408 
1409   uint32_t signature_offset;
1410   uint16_t lib_arch = 0;
1411   bool failed_to_get_lib_arch =
1412     ( // Go to position 3c in the dll
1413      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1414      ||
1415      // Read location of signature
1416      (sizeof(signature_offset) !=
1417      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1418      ||
1419      // Go to COFF File Header in dll
1420      // that is located after "signature" (4 bytes long)
1421      (os::seek_to_file_offset(fd,
1422      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1423      ||
1424      // Read field that contains code of architecture
1425      // that dll was built for
1426      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1427     );
1428 
1429   ::close(fd);
1430   if (failed_to_get_lib_arch) {
1431     // file i/o error - report os::lasterror(...) msg
1432     return NULL;
1433   }
1434 
1435   typedef struct {
1436     uint16_t arch_code;
1437     char* arch_name;
1438   } arch_t;
1439 
1440   static const arch_t arch_array[] = {
1441     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1442     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"},
1443     {IMAGE_FILE_MACHINE_ARM64,     (char*)"ARM 64"}
1444   };
1445 #if (defined _M_ARM64)
1446   static const uint16_t running_arch = IMAGE_FILE_MACHINE_ARM64;
1447 #elif (defined _M_AMD64)
1448   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1449 #elif (defined _M_IX86)
1450   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1451 #else
1452   #error Method os::dll_load requires that one of following \
1453          is defined :_M_AMD64 or _M_IX86 or _M_ARM64
1454 #endif
1455 
1456 
1457   // Obtain a string for printf operation
1458   // lib_arch_str shall contain string what platform this .dll was built for
1459   // running_arch_str shall string contain what platform Hotspot was built for
1460   char *running_arch_str = NULL, *lib_arch_str = NULL;
1461   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1462     if (lib_arch == arch_array[i].arch_code) {
1463       lib_arch_str = arch_array[i].arch_name;
1464     }
1465     if (running_arch == arch_array[i].arch_code) {
1466       running_arch_str = arch_array[i].arch_name;
1467     }
1468   }
1469 
1470   assert(running_arch_str,
1471          "Didn't find running architecture code in arch_array");
1472 
1473   // If the architecture is right
1474   // but some other error took place - report os::lasterror(...) msg
1475   if (lib_arch == running_arch) {
1476     return NULL;
1477   }
1478 
1479   if (lib_arch_str != NULL) {
1480     ::_snprintf(ebuf, ebuflen - 1,
1481                 "Can't load %s-bit .dll on a %s-bit platform",
1482                 lib_arch_str, running_arch_str);
1483   } else {
1484     // don't know what architecture this dll was build for
1485     ::_snprintf(ebuf, ebuflen - 1,
1486                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1487                 lib_arch, running_arch_str);
1488   }
1489 
1490   return NULL;
1491 }
1492 
1493 void os::print_dll_info(outputStream *st) {
1494   st->print_cr("Dynamic libraries:");
1495   get_loaded_modules_info(_print_module, (void *)st);
1496 }
1497 
1498 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1499   HANDLE   hProcess;
1500 
1501 # define MAX_NUM_MODULES 128
1502   HMODULE     modules[MAX_NUM_MODULES];
1503   static char filename[MAX_PATH];
1504   int         result = 0;
1505 
1506   int pid = os::current_process_id();
1507   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1508                          FALSE, pid);
1509   if (hProcess == NULL) return 0;
1510 
1511   DWORD size_needed;
1512   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1513     CloseHandle(hProcess);
1514     return 0;
1515   }
1516 
1517   // number of modules that are currently loaded
1518   int num_modules = size_needed / sizeof(HMODULE);
1519 
1520   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1521     // Get Full pathname:
1522     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1523       filename[0] = '\0';
1524     }
1525 
1526     MODULEINFO modinfo;
1527     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1528       modinfo.lpBaseOfDll = NULL;
1529       modinfo.SizeOfImage = 0;
1530     }
1531 
1532     // Invoke callback function
1533     result = callback(filename, (address)modinfo.lpBaseOfDll,
1534                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1535     if (result) break;
1536   }
1537 
1538   CloseHandle(hProcess);
1539   return result;
1540 }
1541 
1542 bool os::get_host_name(char* buf, size_t buflen) {
1543   DWORD size = (DWORD)buflen;
1544   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1545 }
1546 
1547 void os::get_summary_os_info(char* buf, size_t buflen) {
1548   stringStream sst(buf, buflen);
1549   os::win32::print_windows_version(&sst);
1550   // chop off newline character
1551   char* nl = strchr(buf, '\n');
1552   if (nl != NULL) *nl = '\0';
1553 }
1554 
1555 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1556 #if _MSC_VER >= 1900
1557   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1558   int result = ::vsnprintf(buf, len, fmt, args);
1559   // If an encoding error occurred (result < 0) then it's not clear
1560   // whether the buffer is NUL terminated, so ensure it is.
1561   if ((result < 0) && (len > 0)) {
1562     buf[len - 1] = '\0';
1563   }
1564   return result;
1565 #else
1566   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1567   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1568   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1569   // go straight to _vscprintf.  The output is going to be truncated in
1570   // that case, except in the unusual case of empty output.  More
1571   // importantly, the documentation for various versions of Visual Studio
1572   // are inconsistent about the behavior of _vsnprintf when len == 0,
1573   // including it possibly being an error.
1574   int result = -1;
1575   if (len > 0) {
1576     result = _vsnprintf(buf, len, fmt, args);
1577     // If output (including NUL terminator) is truncated, the buffer
1578     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1579     if ((result < 0) || ((size_t)result >= len)) {
1580       buf[len - 1] = '\0';
1581     }
1582   }
1583   if (result < 0) {
1584     result = _vscprintf(fmt, args);
1585   }
1586   return result;
1587 #endif // _MSC_VER dispatch
1588 }
1589 
1590 static inline time_t get_mtime(const char* filename) {
1591   struct stat st;
1592   int ret = os::stat(filename, &st);
1593   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1594   return st.st_mtime;
1595 }
1596 
1597 int os::compare_file_modified_times(const char* file1, const char* file2) {
1598   time_t t1 = get_mtime(file1);
1599   time_t t2 = get_mtime(file2);
1600   return t1 - t2;
1601 }
1602 
1603 void os::print_os_info_brief(outputStream* st) {
1604   os::print_os_info(st);
1605 }
1606 
1607 void os::win32::print_uptime_info(outputStream* st) {
1608   unsigned long long ticks = GetTickCount64();
1609   os::print_dhm(st, "OS uptime:", ticks/1000);
1610 }
1611 
1612 void os::print_os_info(outputStream* st) {
1613 #ifdef ASSERT
1614   char buffer[1024];
1615   st->print("HostName: ");
1616   if (get_host_name(buffer, sizeof(buffer))) {
1617     st->print("%s ", buffer);
1618   } else {
1619     st->print("N/A ");
1620   }
1621 #endif
1622   st->print_cr("OS:");
1623   os::win32::print_windows_version(st);
1624 
1625   os::win32::print_uptime_info(st);
1626 
1627 #ifdef _LP64
1628   VM_Version::print_platform_virtualization_info(st);
1629 #endif
1630 }
1631 
1632 void os::win32::print_windows_version(outputStream* st) {
1633   OSVERSIONINFOEX osvi;
1634   VS_FIXEDFILEINFO *file_info;
1635   TCHAR kernel32_path[MAX_PATH];
1636   UINT len, ret;
1637 
1638   // Use the GetVersionEx information to see if we're on a server or
1639   // workstation edition of Windows. Starting with Windows 8.1 we can't
1640   // trust the OS version information returned by this API.
1641   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1642   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1643   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1644     st->print_cr("Call to GetVersionEx failed");
1645     return;
1646   }
1647   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1648 
1649   // Get the full path to \Windows\System32\kernel32.dll and use that for
1650   // determining what version of Windows we're running on.
1651   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1652   ret = GetSystemDirectory(kernel32_path, len);
1653   if (ret == 0 || ret > len) {
1654     st->print_cr("Call to GetSystemDirectory failed");
1655     return;
1656   }
1657   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1658 
1659   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1660   if (version_size == 0) {
1661     st->print_cr("Call to GetFileVersionInfoSize failed");
1662     return;
1663   }
1664 
1665   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1666   if (version_info == NULL) {
1667     st->print_cr("Failed to allocate version_info");
1668     return;
1669   }
1670 
1671   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1672     os::free(version_info);
1673     st->print_cr("Call to GetFileVersionInfo failed");
1674     return;
1675   }
1676 
1677   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1678     os::free(version_info);
1679     st->print_cr("Call to VerQueryValue failed");
1680     return;
1681   }
1682 
1683   int major_version = HIWORD(file_info->dwProductVersionMS);
1684   int minor_version = LOWORD(file_info->dwProductVersionMS);
1685   int build_number = HIWORD(file_info->dwProductVersionLS);
1686   int build_minor = LOWORD(file_info->dwProductVersionLS);
1687   int os_vers = major_version * 1000 + minor_version;
1688   os::free(version_info);
1689 
1690   st->print(" Windows ");
1691   switch (os_vers) {
1692 
1693   case 6000:
1694     if (is_workstation) {
1695       st->print("Vista");
1696     } else {
1697       st->print("Server 2008");
1698     }
1699     break;
1700 
1701   case 6001:
1702     if (is_workstation) {
1703       st->print("7");
1704     } else {
1705       st->print("Server 2008 R2");
1706     }
1707     break;
1708 
1709   case 6002:
1710     if (is_workstation) {
1711       st->print("8");
1712     } else {
1713       st->print("Server 2012");
1714     }
1715     break;
1716 
1717   case 6003:
1718     if (is_workstation) {
1719       st->print("8.1");
1720     } else {
1721       st->print("Server 2012 R2");
1722     }
1723     break;
1724 
1725   case 10000:
1726     if (is_workstation) {
1727       st->print("10");
1728     } else {
1729       // distinguish Windows Server 2016 and 2019 by build number
1730       // Windows server 2019 GA 10/2018 build number is 17763
1731       if (build_number > 17762) {
1732         st->print("Server 2019");
1733       } else {
1734         st->print("Server 2016");
1735       }
1736     }
1737     break;
1738 
1739   default:
1740     // Unrecognized windows, print out its major and minor versions
1741     st->print("%d.%d", major_version, minor_version);
1742     break;
1743   }
1744 
1745   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1746   // find out whether we are running on 64 bit processor or not
1747   SYSTEM_INFO si;
1748   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1749   GetNativeSystemInfo(&si);
1750   if ((si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) ||
1751       (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM64)) {
1752     st->print(" , 64 bit");
1753   }
1754 
1755   st->print(" Build %d", build_number);
1756   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1757   st->cr();
1758 }
1759 
1760 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1761   // Nothing to do for now.
1762 }
1763 
1764 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1765   HKEY key;
1766   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1767                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1768   if (status == ERROR_SUCCESS) {
1769     DWORD size = (DWORD)buflen;
1770     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1771     if (status != ERROR_SUCCESS) {
1772         strncpy(buf, "## __CPU__", buflen);
1773     }
1774     RegCloseKey(key);
1775   } else {
1776     // Put generic cpu info to return
1777     strncpy(buf, "## __CPU__", buflen);
1778   }
1779 }
1780 
1781 void os::print_memory_info(outputStream* st) {
1782   st->print("Memory:");
1783   st->print(" %dk page", os::vm_page_size()>>10);
1784 
1785   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1786   // value if total memory is larger than 4GB
1787   MEMORYSTATUSEX ms;
1788   ms.dwLength = sizeof(ms);
1789   int r1 = GlobalMemoryStatusEx(&ms);
1790 
1791   if (r1 != 0) {
1792     st->print(", system-wide physical " INT64_FORMAT "M ",
1793              (int64_t) ms.ullTotalPhys >> 20);
1794     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1795 
1796     st->print("TotalPageFile size " INT64_FORMAT "M ",
1797              (int64_t) ms.ullTotalPageFile >> 20);
1798     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1799              (int64_t) ms.ullAvailPageFile >> 20);
1800 
1801     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1802 #if defined(_M_IX86)
1803     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1804              (int64_t) ms.ullTotalVirtual >> 20);
1805     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1806 #endif
1807   } else {
1808     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1809   }
1810 
1811   // extended memory statistics for a process
1812   PROCESS_MEMORY_COUNTERS_EX pmex;
1813   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1814   pmex.cb = sizeof(pmex);
1815   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1816 
1817   if (r2 != 0) {
1818     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1819              (int64_t) pmex.WorkingSetSize >> 20);
1820     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1821 
1822     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1823              (int64_t) pmex.PrivateUsage >> 20);
1824     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1825   } else {
1826     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1827   }
1828 
1829   st->cr();
1830 }
1831 
1832 bool os::signal_sent_by_kill(const void* siginfo) {
1833   // TODO: Is this possible?
1834   return false;
1835 }
1836 
1837 void os::print_siginfo(outputStream *st, const void* siginfo) {
1838   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1839   st->print("siginfo:");
1840 
1841   char tmp[64];
1842   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1843     strcpy(tmp, "EXCEPTION_??");
1844   }
1845   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1846 
1847   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1848        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1849        er->NumberParameters >= 2) {
1850     switch (er->ExceptionInformation[0]) {
1851     case 0: st->print(", reading address"); break;
1852     case 1: st->print(", writing address"); break;
1853     case 8: st->print(", data execution prevention violation at address"); break;
1854     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1855                        er->ExceptionInformation[0]);
1856     }
1857     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1858   } else {
1859     int num = er->NumberParameters;
1860     if (num > 0) {
1861       st->print(", ExceptionInformation=");
1862       for (int i = 0; i < num; i++) {
1863         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1864       }
1865     }
1866   }
1867   st->cr();
1868 }
1869 
1870 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1871   // TODO: Can we kill thread?
1872   return false;
1873 }
1874 
1875 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1876   // do nothing
1877 }
1878 
1879 static char saved_jvm_path[MAX_PATH] = {0};
1880 
1881 // Find the full path to the current module, jvm.dll
1882 void os::jvm_path(char *buf, jint buflen) {
1883   // Error checking.
1884   if (buflen < MAX_PATH) {
1885     assert(false, "must use a large-enough buffer");
1886     buf[0] = '\0';
1887     return;
1888   }
1889   // Lazy resolve the path to current module.
1890   if (saved_jvm_path[0] != 0) {
1891     strcpy(buf, saved_jvm_path);
1892     return;
1893   }
1894 
1895   buf[0] = '\0';
1896   if (Arguments::sun_java_launcher_is_altjvm()) {
1897     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1898     // for a JAVA_HOME environment variable and fix up the path so it
1899     // looks like jvm.dll is installed there (append a fake suffix
1900     // hotspot/jvm.dll).
1901     char* java_home_var = ::getenv("JAVA_HOME");
1902     if (java_home_var != NULL && java_home_var[0] != 0 &&
1903         strlen(java_home_var) < (size_t)buflen) {
1904       strncpy(buf, java_home_var, buflen);
1905 
1906       // determine if this is a legacy image or modules image
1907       // modules image doesn't have "jre" subdirectory
1908       size_t len = strlen(buf);
1909       char* jrebin_p = buf + len;
1910       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1911       if (0 != _access(buf, 0)) {
1912         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1913       }
1914       len = strlen(buf);
1915       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1916     }
1917   }
1918 
1919   if (buf[0] == '\0') {
1920     GetModuleFileName(vm_lib_handle, buf, buflen);
1921   }
1922   strncpy(saved_jvm_path, buf, MAX_PATH);
1923   saved_jvm_path[MAX_PATH - 1] = '\0';
1924 }
1925 
1926 
1927 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1928 #ifndef _WIN64
1929   st->print("_");
1930 #endif
1931 }
1932 
1933 
1934 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1935 #ifndef _WIN64
1936   st->print("@%d", args_size  * sizeof(int));
1937 #endif
1938 }
1939 
1940 // This method is a copy of JDK's sysGetLastErrorString
1941 // from src/windows/hpi/src/system_md.c
1942 
1943 size_t os::lasterror(char* buf, size_t len) {
1944   DWORD errval;
1945 
1946   if ((errval = GetLastError()) != 0) {
1947     // DOS error
1948     size_t n = (size_t)FormatMessage(
1949                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1950                                      NULL,
1951                                      errval,
1952                                      0,
1953                                      buf,
1954                                      (DWORD)len,
1955                                      NULL);
1956     if (n > 3) {
1957       // Drop final '.', CR, LF
1958       if (buf[n - 1] == '\n') n--;
1959       if (buf[n - 1] == '\r') n--;
1960       if (buf[n - 1] == '.') n--;
1961       buf[n] = '\0';
1962     }
1963     return n;
1964   }
1965 
1966   if (errno != 0) {
1967     // C runtime error that has no corresponding DOS error code
1968     const char* s = os::strerror(errno);
1969     size_t n = strlen(s);
1970     if (n >= len) n = len - 1;
1971     strncpy(buf, s, n);
1972     buf[n] = '\0';
1973     return n;
1974   }
1975 
1976   return 0;
1977 }
1978 
1979 int os::get_last_error() {
1980   DWORD error = GetLastError();
1981   if (error == 0) {
1982     error = errno;
1983   }
1984   return (int)error;
1985 }
1986 
1987 // sun.misc.Signal
1988 // NOTE that this is a workaround for an apparent kernel bug where if
1989 // a signal handler for SIGBREAK is installed then that signal handler
1990 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1991 // See bug 4416763.
1992 static void (*sigbreakHandler)(int) = NULL;
1993 
1994 static void UserHandler(int sig, void *siginfo, void *context) {
1995   os::signal_notify(sig);
1996   // We need to reinstate the signal handler each time...
1997   os::signal(sig, (void*)UserHandler);
1998 }
1999 
2000 void* os::user_handler() {
2001   return (void*) UserHandler;
2002 }
2003 
2004 void* os::signal(int signal_number, void* handler) {
2005   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2006     void (*oldHandler)(int) = sigbreakHandler;
2007     sigbreakHandler = (void (*)(int)) handler;
2008     return (void*) oldHandler;
2009   } else {
2010     return (void*)::signal(signal_number, (void (*)(int))handler);
2011   }
2012 }
2013 
2014 void os::signal_raise(int signal_number) {
2015   raise(signal_number);
2016 }
2017 
2018 // The Win32 C runtime library maps all console control events other than ^C
2019 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2020 // logoff, and shutdown events.  We therefore install our own console handler
2021 // that raises SIGTERM for the latter cases.
2022 //
2023 static BOOL WINAPI consoleHandler(DWORD event) {
2024   switch (event) {
2025   case CTRL_C_EVENT:
2026     if (VMError::is_error_reported()) {
2027       // Ctrl-C is pressed during error reporting, likely because the error
2028       // handler fails to abort. Let VM die immediately.
2029       os::die();
2030     }
2031 
2032     os::signal_raise(SIGINT);
2033     return TRUE;
2034     break;
2035   case CTRL_BREAK_EVENT:
2036     if (sigbreakHandler != NULL) {
2037       (*sigbreakHandler)(SIGBREAK);
2038     }
2039     return TRUE;
2040     break;
2041   case CTRL_LOGOFF_EVENT: {
2042     // Don't terminate JVM if it is running in a non-interactive session,
2043     // such as a service process.
2044     USEROBJECTFLAGS flags;
2045     HANDLE handle = GetProcessWindowStation();
2046     if (handle != NULL &&
2047         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2048         sizeof(USEROBJECTFLAGS), NULL)) {
2049       // If it is a non-interactive session, let next handler to deal
2050       // with it.
2051       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2052         return FALSE;
2053       }
2054     }
2055   }
2056   case CTRL_CLOSE_EVENT:
2057   case CTRL_SHUTDOWN_EVENT:
2058     os::signal_raise(SIGTERM);
2059     return TRUE;
2060     break;
2061   default:
2062     break;
2063   }
2064   return FALSE;
2065 }
2066 
2067 // The following code is moved from os.cpp for making this
2068 // code platform specific, which it is by its very nature.
2069 
2070 // Return maximum OS signal used + 1 for internal use only
2071 // Used as exit signal for signal_thread
2072 int os::sigexitnum_pd() {
2073   return NSIG;
2074 }
2075 
2076 // a counter for each possible signal value, including signal_thread exit signal
2077 static volatile jint pending_signals[NSIG+1] = { 0 };
2078 static Semaphore* sig_sem = NULL;
2079 
2080 static void jdk_misc_signal_init() {
2081   // Initialize signal structures
2082   memset((void*)pending_signals, 0, sizeof(pending_signals));
2083 
2084   // Initialize signal semaphore
2085   sig_sem = new Semaphore();
2086 
2087   // Programs embedding the VM do not want it to attempt to receive
2088   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2089   // shutdown hooks mechanism introduced in 1.3.  For example, when
2090   // the VM is run as part of a Windows NT service (i.e., a servlet
2091   // engine in a web server), the correct behavior is for any console
2092   // control handler to return FALSE, not TRUE, because the OS's
2093   // "final" handler for such events allows the process to continue if
2094   // it is a service (while terminating it if it is not a service).
2095   // To make this behavior uniform and the mechanism simpler, we
2096   // completely disable the VM's usage of these console events if -Xrs
2097   // (=ReduceSignalUsage) is specified.  This means, for example, that
2098   // the CTRL-BREAK thread dump mechanism is also disabled in this
2099   // case.  See bugs 4323062, 4345157, and related bugs.
2100 
2101   // Add a CTRL-C handler
2102   SetConsoleCtrlHandler(consoleHandler, TRUE);
2103 }
2104 
2105 void os::signal_notify(int sig) {
2106   if (sig_sem != NULL) {
2107     Atomic::inc(&pending_signals[sig]);
2108     sig_sem->signal();
2109   } else {
2110     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2111     // initialization isn't called.
2112     assert(ReduceSignalUsage, "signal semaphore should be created");
2113   }
2114 }
2115 
2116 static int check_pending_signals() {
2117   while (true) {
2118     for (int i = 0; i < NSIG + 1; i++) {
2119       jint n = pending_signals[i];
2120       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2121         return i;
2122       }
2123     }
2124     JavaThread *thread = JavaThread::current();
2125 
2126     ThreadBlockInVM tbivm(thread);
2127 
2128     bool threadIsSuspended;
2129     do {
2130       thread->set_suspend_equivalent();
2131       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2132       sig_sem->wait();
2133 
2134       // were we externally suspended while we were waiting?
2135       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2136       if (threadIsSuspended) {
2137         // The semaphore has been incremented, but while we were waiting
2138         // another thread suspended us. We don't want to continue running
2139         // while suspended because that would surprise the thread that
2140         // suspended us.
2141         sig_sem->signal();
2142 
2143         thread->java_suspend_self();
2144       }
2145     } while (threadIsSuspended);
2146   }
2147 }
2148 
2149 int os::signal_wait() {
2150   return check_pending_signals();
2151 }
2152 
2153 // Implicit OS exception handling
2154 
2155 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2156                       address handler) {
2157   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2158   // Save pc in thread
2159 #if defined(_M_ARM64)
2160   // Do not blow up if no thread info available.
2161   if (thread) {
2162     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Pc);
2163   }
2164   // Set pc to handler
2165   exceptionInfo->ContextRecord->Pc = (DWORD64)handler;
2166 #elif defined(_M_AMD64)
2167   // Do not blow up if no thread info available.
2168   if (thread) {
2169     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2170   }
2171   // Set pc to handler
2172   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2173 #else
2174   // Do not blow up if no thread info available.
2175   if (thread) {
2176     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2177   }
2178   // Set pc to handler
2179   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2180 #endif
2181 
2182   // Continue the execution
2183   return EXCEPTION_CONTINUE_EXECUTION;
2184 }
2185 
2186 
2187 // Used for PostMortemDump
2188 extern "C" void safepoints();
2189 extern "C" void find(int x);
2190 extern "C" void events();
2191 
2192 // According to Windows API documentation, an illegal instruction sequence should generate
2193 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2194 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2195 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2196 
2197 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2198 
2199 // From "Execution Protection in the Windows Operating System" draft 0.35
2200 // Once a system header becomes available, the "real" define should be
2201 // included or copied here.
2202 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2203 
2204 // Windows Vista/2008 heap corruption check
2205 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2206 
2207 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2208 // C++ compiler contain this error code. Because this is a compiler-generated
2209 // error, the code is not listed in the Win32 API header files.
2210 // The code is actually a cryptic mnemonic device, with the initial "E"
2211 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2212 // ASCII values of "msc".
2213 
2214 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2215 
2216 #define def_excpt(val) { #val, (val) }
2217 
2218 static const struct { const char* name; uint number; } exceptlabels[] = {
2219     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2220     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2221     def_excpt(EXCEPTION_BREAKPOINT),
2222     def_excpt(EXCEPTION_SINGLE_STEP),
2223     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2224     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2225     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2226     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2227     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2228     def_excpt(EXCEPTION_FLT_OVERFLOW),
2229     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2230     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2231     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2232     def_excpt(EXCEPTION_INT_OVERFLOW),
2233     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2234     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2235     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2236     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2237     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2238     def_excpt(EXCEPTION_STACK_OVERFLOW),
2239     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2240     def_excpt(EXCEPTION_GUARD_PAGE),
2241     def_excpt(EXCEPTION_INVALID_HANDLE),
2242     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2243     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2244 };
2245 
2246 #undef def_excpt
2247 
2248 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2249   uint code = static_cast<uint>(exception_code);
2250   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2251     if (exceptlabels[i].number == code) {
2252       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2253       return buf;
2254     }
2255   }
2256 
2257   return NULL;
2258 }
2259 
2260 //-----------------------------------------------------------------------------
2261 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2262   // handle exception caused by idiv; should only happen for -MinInt/-1
2263   // (division by zero is handled explicitly)
2264 #if defined(_M_ARM64)
2265   PCONTEXT ctx = exceptionInfo->ContextRecord;
2266   address pc = (address)ctx->Sp;
2267   assert(pc[0] == 0x83, "not an sdiv opcode"); //Fixme did i get the right opcode?
2268   assert(ctx->X4 == min_jint, "unexpected idiv exception");
2269   // set correct result values and continue after idiv instruction
2270   ctx->Pc = (uint64_t)pc + 4;        // idiv reg, reg, reg  is 4 bytes
2271   ctx->X4 = (uint64_t)min_jint;      // result
2272   ctx->X5 = (uint64_t)0;             // remainder
2273   // Continue the execution
2274 #elif defined(_M_AMD64)
2275   PCONTEXT ctx = exceptionInfo->ContextRecord;
2276   address pc = (address)ctx->Rip;
2277   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2278   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2279   if (pc[0] == 0xF7) {
2280     // set correct result values and continue after idiv instruction
2281     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2282   } else {
2283     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2284   }
2285   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2286   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2287   // idiv opcode (0xF7).
2288   ctx->Rdx = (DWORD)0;             // remainder
2289   // Continue the execution
2290 #else
2291   PCONTEXT ctx = exceptionInfo->ContextRecord;
2292   address pc = (address)ctx->Eip;
2293   assert(pc[0] == 0xF7, "not an idiv opcode");
2294   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2295   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2296   // set correct result values and continue after idiv instruction
2297   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2298   ctx->Eax = (DWORD)min_jint;      // result
2299   ctx->Edx = (DWORD)0;             // remainder
2300   // Continue the execution
2301 #endif
2302   return EXCEPTION_CONTINUE_EXECUTION;
2303 }
2304 
2305 #if defined(_M_AMD64) || defined(_M_IX86)
2306 //-----------------------------------------------------------------------------
2307 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2308   PCONTEXT ctx = exceptionInfo->ContextRecord;
2309 #ifndef  _WIN64
2310   // handle exception caused by native method modifying control word
2311   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2312 
2313   switch (exception_code) {
2314   case EXCEPTION_FLT_DENORMAL_OPERAND:
2315   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2316   case EXCEPTION_FLT_INEXACT_RESULT:
2317   case EXCEPTION_FLT_INVALID_OPERATION:
2318   case EXCEPTION_FLT_OVERFLOW:
2319   case EXCEPTION_FLT_STACK_CHECK:
2320   case EXCEPTION_FLT_UNDERFLOW:
2321     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2322     if (fp_control_word != ctx->FloatSave.ControlWord) {
2323       // Restore FPCW and mask out FLT exceptions
2324       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2325       // Mask out pending FLT exceptions
2326       ctx->FloatSave.StatusWord &=  0xffffff00;
2327       return EXCEPTION_CONTINUE_EXECUTION;
2328     }
2329   }
2330 
2331   if (prev_uef_handler != NULL) {
2332     // We didn't handle this exception so pass it to the previous
2333     // UnhandledExceptionFilter.
2334     return (prev_uef_handler)(exceptionInfo);
2335   }
2336 #else // !_WIN64
2337   // On Windows, the mxcsr control bits are non-volatile across calls
2338   // See also CR 6192333
2339   //
2340   jint MxCsr = INITIAL_MXCSR;
2341   // we can't use StubRoutines::addr_mxcsr_std()
2342   // because in Win64 mxcsr is not saved there
2343   if (MxCsr != ctx->MxCsr) {
2344     ctx->MxCsr = MxCsr;
2345     return EXCEPTION_CONTINUE_EXECUTION;
2346   }
2347 #endif // !_WIN64
2348 
2349   return EXCEPTION_CONTINUE_SEARCH;
2350 }
2351 #endif
2352 
2353 static inline void report_error(Thread* t, DWORD exception_code,
2354                                 address addr, void* siginfo, void* context) {
2355   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2356 
2357   // If UseOsErrorReporting, this will return here and save the error file
2358   // somewhere where we can find it in the minidump.
2359 }
2360 
2361 #if !defined(USE_VECTORED_EXCEPTION_HANDLING) && INCLUDE_AOT
2362 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2363   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2364   address addr = (address) exceptionRecord->ExceptionInformation[1];
2365   address pc = (address) exceptionInfo->ContextRecord->Rip;
2366 
2367   // Handle the case where we get an implicit exception in AOT generated
2368   // code.  AOT DLL's loaded are not registered for structured exceptions.
2369   // If the exception occurred in the codeCache or AOT code, pass control
2370   // to our normal exception handler.
2371   CodeBlob* cb = CodeCache::find_blob(pc);
2372   if (cb != NULL) {
2373     return topLevelExceptionFilter(exceptionInfo);
2374   }
2375 
2376   return EXCEPTION_CONTINUE_SEARCH;
2377 }
2378 #endif
2379 
2380 //-----------------------------------------------------------------------------
2381 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2382   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2383   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2384 #if defined(_M_ARM64)
2385   address pc = (address)exceptionInfo->ContextRecord->Pc;
2386 #elif defined(_M_AMD64)
2387   address pc = (address) exceptionInfo->ContextRecord->Rip;
2388 #else
2389   address pc = (address) exceptionInfo->ContextRecord->Eip;
2390 #endif
2391   Thread* t = Thread::current_or_null_safe();
2392 
2393   // Handle SafeFetch32 and SafeFetchN exceptions.
2394   if (StubRoutines::is_safefetch_fault(pc)) {
2395     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2396   }
2397 
2398 #ifndef _WIN64
2399   // Execution protection violation - win32 running on AMD64 only
2400   // Handled first to avoid misdiagnosis as a "normal" access violation;
2401   // This is safe to do because we have a new/unique ExceptionInformation
2402   // code for this condition.
2403   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2404     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2405     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2406     address addr = (address) exceptionRecord->ExceptionInformation[1];
2407 
2408     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2409       int page_size = os::vm_page_size();
2410 
2411       // Make sure the pc and the faulting address are sane.
2412       //
2413       // If an instruction spans a page boundary, and the page containing
2414       // the beginning of the instruction is executable but the following
2415       // page is not, the pc and the faulting address might be slightly
2416       // different - we still want to unguard the 2nd page in this case.
2417       //
2418       // 15 bytes seems to be a (very) safe value for max instruction size.
2419       bool pc_is_near_addr =
2420         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2421       bool instr_spans_page_boundary =
2422         (align_down((intptr_t) pc ^ (intptr_t) addr,
2423                          (intptr_t) page_size) > 0);
2424 
2425       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2426         static volatile address last_addr =
2427           (address) os::non_memory_address_word();
2428 
2429         // In conservative mode, don't unguard unless the address is in the VM
2430         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2431             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2432 
2433           // Set memory to RWX and retry
2434           address page_start = align_down(addr, page_size);
2435           bool res = os::protect_memory((char*) page_start, page_size,
2436                                         os::MEM_PROT_RWX);
2437 
2438           log_debug(os)("Execution protection violation "
2439                         "at " INTPTR_FORMAT
2440                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2441                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2442 
2443           // Set last_addr so if we fault again at the same address, we don't
2444           // end up in an endless loop.
2445           //
2446           // There are two potential complications here.  Two threads trapping
2447           // at the same address at the same time could cause one of the
2448           // threads to think it already unguarded, and abort the VM.  Likely
2449           // very rare.
2450           //
2451           // The other race involves two threads alternately trapping at
2452           // different addresses and failing to unguard the page, resulting in
2453           // an endless loop.  This condition is probably even more unlikely
2454           // than the first.
2455           //
2456           // Although both cases could be avoided by using locks or thread
2457           // local last_addr, these solutions are unnecessary complication:
2458           // this handler is a best-effort safety net, not a complete solution.
2459           // It is disabled by default and should only be used as a workaround
2460           // in case we missed any no-execute-unsafe VM code.
2461 
2462           last_addr = addr;
2463 
2464           return EXCEPTION_CONTINUE_EXECUTION;
2465         }
2466       }
2467 
2468       // Last unguard failed or not unguarding
2469       tty->print_raw_cr("Execution protection violation");
2470       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2471                    exceptionInfo->ContextRecord);
2472       return EXCEPTION_CONTINUE_SEARCH;
2473     }
2474   }
2475 #endif // _WIN64
2476 
2477 #if defined(_M_AMD64) || defined(_M_IX86)
2478   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2479       VM_Version::is_cpuinfo_segv_addr(pc)) {
2480     // Verify that OS save/restore AVX registers.
2481     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2482   }
2483 #endif
2484 
2485   if (t != NULL && t->is_Java_thread()) {
2486     JavaThread* thread = (JavaThread*) t;
2487     bool in_java = thread->thread_state() == _thread_in_Java;
2488 
2489     // Handle potential stack overflows up front.
2490     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2491       if (thread->stack_guards_enabled()) {
2492         if (in_java) {
2493           frame fr;
2494           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2495           address addr = (address) exceptionRecord->ExceptionInformation[1];
2496           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2497             assert(fr.is_java_frame(), "Must be a Java frame");
2498             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2499           }
2500         }
2501         // Yellow zone violation.  The o/s has unprotected the first yellow
2502         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2503         // update the enabled status, even if the zone contains only one page.
2504         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2505         thread->disable_stack_yellow_reserved_zone();
2506         // If not in java code, return and hope for the best.
2507         return in_java
2508             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2509             :  EXCEPTION_CONTINUE_EXECUTION;
2510       } else {
2511         // Fatal red zone violation.
2512         thread->disable_stack_red_zone();
2513         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2514         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2515                       exceptionInfo->ContextRecord);
2516         return EXCEPTION_CONTINUE_SEARCH;
2517       }
2518     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2519       // Either stack overflow or null pointer exception.
2520       if (in_java) {
2521         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2522         address addr = (address) exceptionRecord->ExceptionInformation[1];
2523         address stack_end = thread->stack_end();
2524         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2525           // Stack overflow.
2526           assert(!os::uses_stack_guard_pages(),
2527                  "should be caught by red zone code above.");
2528           return Handle_Exception(exceptionInfo,
2529                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2530         }
2531         // Check for safepoint polling and implicit null
2532         // We only expect null pointers in the stubs (vtable)
2533         // the rest are checked explicitly now.
2534         CodeBlob* cb = CodeCache::find_blob(pc);
2535         if (cb != NULL) {
2536           if (SafepointMechanism::is_poll_address(addr)) {
2537             address stub = SharedRuntime::get_poll_stub(pc);
2538             return Handle_Exception(exceptionInfo, stub);
2539           }
2540         }
2541         {
2542 #ifdef _WIN64
2543           // If it's a legal stack address map the entire region in
2544           //
2545           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2546           address addr = (address) exceptionRecord->ExceptionInformation[1];
2547           if (thread->is_in_usable_stack(addr)) {
2548             addr = (address)((uintptr_t)addr &
2549                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2550             os::commit_memory((char *)addr, thread->stack_base() - addr,
2551                               !ExecMem);
2552             return EXCEPTION_CONTINUE_EXECUTION;
2553           } else
2554 #endif
2555           {
2556             // Null pointer exception.
2557             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2558               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2559               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2560             }
2561             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2562                          exceptionInfo->ContextRecord);
2563             return EXCEPTION_CONTINUE_SEARCH;
2564           }
2565         }
2566       }
2567 
2568 #ifdef _M_ARM64
2569       // Unsafe memory access
2570       CompiledMethod* nm = NULL;
2571       JavaThread* thread = (JavaThread*)t;
2572       if (in_java) {
2573         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2574         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2575       }
2576 
2577       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2578       if (is_unsafe_arraycopy ||
2579           ((thread->thread_state() == _thread_in_vm ||
2580               thread->thread_state() == _thread_in_native) &&
2581             thread->doing_unsafe_access()) ||
2582           (nm != NULL && nm->has_unsafe_access())) {
2583         address next_pc =  Assembler::locate_next_instruction(pc);
2584         if (is_unsafe_arraycopy) {
2585           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2586         }
2587         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2588       }
2589 #endif
2590 
2591 #ifdef _WIN64
2592       // Special care for fast JNI field accessors.
2593       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2594       // in and the heap gets shrunk before the field access.
2595       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2596         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2597         if (addr != (address)-1) {
2598           return Handle_Exception(exceptionInfo, addr);
2599         }
2600       }
2601 #endif
2602 
2603       // Stack overflow or null pointer exception in native code.
2604       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2605                    exceptionInfo->ContextRecord);
2606       return EXCEPTION_CONTINUE_SEARCH;
2607     } // /EXCEPTION_ACCESS_VIOLATION
2608     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2609 
2610     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2611       CompiledMethod* nm = NULL;
2612       JavaThread* thread = (JavaThread*)t;
2613       if (in_java) {
2614         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2615         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2616       }
2617 
2618       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2619       if (((thread->thread_state() == _thread_in_vm ||
2620            thread->thread_state() == _thread_in_native ||
2621            is_unsafe_arraycopy) &&
2622           thread->doing_unsafe_access()) ||
2623           (nm != NULL && nm->has_unsafe_access())) {
2624         address next_pc =  Assembler::locate_next_instruction(pc);
2625         if (is_unsafe_arraycopy) {
2626           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2627         }
2628         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2629       }
2630     }
2631 
2632 #ifdef _M_ARM64
2633     if (in_java &&
2634         (exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2635           exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2636       if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
2637         if (TraceTraps) {
2638           tty->print_cr("trap: zombie_not_entrant");
2639         }
2640         return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
2641       }
2642     }
2643 #endif
2644 
2645     if (in_java) {
2646       switch (exception_code) {
2647       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2648         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2649 
2650       case EXCEPTION_INT_OVERFLOW:
2651         return Handle_IDiv_Exception(exceptionInfo);
2652 
2653       } // switch
2654     }
2655 
2656 #if defined(_M_AMD64) || defined(_M_IX86)
2657     if (((thread->thread_state() == _thread_in_Java) ||
2658          (thread->thread_state() == _thread_in_native)) &&
2659          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2660       LONG result=Handle_FLT_Exception(exceptionInfo);
2661       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2662     }
2663 #endif
2664   }
2665 
2666   if (exception_code != EXCEPTION_BREAKPOINT) {
2667     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2668                  exceptionInfo->ContextRecord);
2669   }
2670   return EXCEPTION_CONTINUE_SEARCH;
2671 }
2672 
2673 #ifndef _WIN64
2674 // Special care for fast JNI accessors.
2675 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2676 // the heap gets shrunk before the field access.
2677 // Need to install our own structured exception handler since native code may
2678 // install its own.
2679 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2680   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2681   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2682     address pc = (address) exceptionInfo->ContextRecord->Eip;
2683     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2684     if (addr != (address)-1) {
2685       return Handle_Exception(exceptionInfo, addr);
2686     }
2687   }
2688   return EXCEPTION_CONTINUE_SEARCH;
2689 }
2690 
2691 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2692   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2693                                                      jobject obj,           \
2694                                                      jfieldID fieldID) {    \
2695     __try {                                                                 \
2696       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2697                                                                  obj,       \
2698                                                                  fieldID);  \
2699     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2700                                               _exception_info())) {         \
2701     }                                                                       \
2702     return 0;                                                               \
2703   }
2704 
2705 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2706 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2707 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2708 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2709 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2710 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2711 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2712 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2713 
2714 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2715   switch (type) {
2716   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2717   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2718   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2719   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2720   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2721   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2722   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2723   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2724   default:        ShouldNotReachHere();
2725   }
2726   return (address)-1;
2727 }
2728 #endif
2729 
2730 // Virtual Memory
2731 
2732 int os::vm_page_size() { return os::win32::vm_page_size(); }
2733 int os::vm_allocation_granularity() {
2734   return os::win32::vm_allocation_granularity();
2735 }
2736 
2737 // Windows large page support is available on Windows 2003. In order to use
2738 // large page memory, the administrator must first assign additional privilege
2739 // to the user:
2740 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2741 //   + select Local Policies -> User Rights Assignment
2742 //   + double click "Lock pages in memory", add users and/or groups
2743 //   + reboot
2744 // Note the above steps are needed for administrator as well, as administrators
2745 // by default do not have the privilege to lock pages in memory.
2746 //
2747 // Note about Windows 2003: although the API supports committing large page
2748 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2749 // scenario, I found through experiment it only uses large page if the entire
2750 // memory region is reserved and committed in a single VirtualAlloc() call.
2751 // This makes Windows large page support more or less like Solaris ISM, in
2752 // that the entire heap must be committed upfront. This probably will change
2753 // in the future, if so the code below needs to be revisited.
2754 
2755 #ifndef MEM_LARGE_PAGES
2756   #define MEM_LARGE_PAGES 0x20000000
2757 #endif
2758 
2759 #define VirtualFreeChecked(mem, size, type)                       \
2760   do {                                                            \
2761     bool ret = VirtualFree(mem, size, type);                      \
2762     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2763   } while (false)
2764 
2765 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2766 static const int gdi_tiny_bitmap_width_bytes = 4;
2767 
2768 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2769   // The documentation for CreateBitmap states a word-alignment requirement.
2770   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2771 
2772   // Some callers use this function to test if memory crossing separate memory
2773   // reservations can be used. Create a height of 2 to make sure that one pixel
2774   // ends up in the first reservation and the other in the second.
2775   int nHeight = 2;
2776 
2777   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2778 
2779   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2780   int nWidth = 1;
2781 
2782   // Calculate bit count - will be 32.
2783   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2784 
2785   return CreateBitmap(
2786       nWidth,
2787       nHeight,
2788       1,         // nPlanes
2789       nBitCount,
2790       mem);      // lpBits
2791 }
2792 
2793 // It has been found that some of the GDI functions fail under these two situations:
2794 //  1) When used with large pages
2795 //  2) When mem crosses the boundary between two separate memory reservations.
2796 //
2797 // This is a small test used to see if the current GDI implementation is
2798 // susceptible to any of these problems.
2799 static bool gdi_can_use_memory(void* mem) {
2800   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2801   if (bitmap != NULL) {
2802     DeleteObject(bitmap);
2803     return true;
2804   }
2805 
2806   // Verify that the bitmap could be created with a normal page.
2807   // If this fails, the testing method above isn't reliable.
2808 #ifdef ASSERT
2809   void* verify_mem = ::malloc(4 * 1024);
2810   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2811   if (verify_bitmap == NULL) {
2812     fatal("Couldn't create test bitmap with malloced memory");
2813   } else {
2814     DeleteObject(verify_bitmap);
2815   }
2816   ::free(verify_mem);
2817 #endif
2818 
2819   return false;
2820 }
2821 
2822 // Test if GDI functions work when memory spans
2823 // two adjacent memory reservations.
2824 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2825   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2826 
2827   // Find virtual memory range. Two granules for regions and one for alignment.
2828   void* reserved = VirtualAlloc(NULL,
2829                                 granule * 3,
2830                                 MEM_RESERVE,
2831                                 PAGE_NOACCESS);
2832   if (reserved == NULL) {
2833     // Can't proceed with test - pessimistically report false
2834     return false;
2835   }
2836   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2837 
2838   // Ensure proper alignment
2839   void* res0 = align_up(reserved, granule);
2840   void* res1 = (char*)res0 + granule;
2841 
2842   // Reserve and commit the first part
2843   void* mem0 = VirtualAlloc(res0,
2844                             granule,
2845                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2846                             PAGE_READWRITE);
2847   if (mem0 != res0) {
2848     // Can't proceed with test - pessimistically report false
2849     return false;
2850   }
2851 
2852   // Reserve and commit the second part
2853   void* mem1 = VirtualAlloc(res1,
2854                             granule,
2855                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2856                             PAGE_READWRITE);
2857   if (mem1 != res1) {
2858     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2859     // Can't proceed with test - pessimistically report false
2860     return false;
2861   }
2862 
2863   // Set the bitmap's bits to point one "width" bytes before, so that
2864   // the bitmap extends across the reservation boundary.
2865   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2866 
2867   bool success = gdi_can_use_memory(bitmapBits);
2868 
2869   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2870   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2871 
2872   return success;
2873 }
2874 
2875 // Container for NUMA node list info
2876 class NUMANodeListHolder {
2877  private:
2878   int *_numa_used_node_list;  // allocated below
2879   int _numa_used_node_count;
2880 
2881   void free_node_list() {
2882     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2883   }
2884 
2885  public:
2886   NUMANodeListHolder() {
2887     _numa_used_node_count = 0;
2888     _numa_used_node_list = NULL;
2889     // do rest of initialization in build routine (after function pointers are set up)
2890   }
2891 
2892   ~NUMANodeListHolder() {
2893     free_node_list();
2894   }
2895 
2896   bool build() {
2897     DWORD_PTR proc_aff_mask;
2898     DWORD_PTR sys_aff_mask;
2899     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2900     ULONG highest_node_number;
2901     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2902     free_node_list();
2903     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2904     for (unsigned int i = 0; i <= highest_node_number; i++) {
2905       ULONGLONG proc_mask_numa_node;
2906       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2907       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2908         _numa_used_node_list[_numa_used_node_count++] = i;
2909       }
2910     }
2911     return (_numa_used_node_count > 1);
2912   }
2913 
2914   int get_count() { return _numa_used_node_count; }
2915   int get_node_list_entry(int n) {
2916     // for indexes out of range, returns -1
2917     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2918   }
2919 
2920 } numa_node_list_holder;
2921 
2922 static size_t _large_page_size = 0;
2923 
2924 static bool request_lock_memory_privilege() {
2925   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2926                                 os::current_process_id());
2927 
2928   bool success = false;
2929   HANDLE hToken = NULL;
2930   LUID luid;
2931   if (hProcess != NULL &&
2932       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2933       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2934 
2935     TOKEN_PRIVILEGES tp;
2936     tp.PrivilegeCount = 1;
2937     tp.Privileges[0].Luid = luid;
2938     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2939 
2940     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2941     // privilege. Check GetLastError() too. See MSDN document.
2942     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2943         (GetLastError() == ERROR_SUCCESS)) {
2944       success = true;
2945     }
2946   }
2947 
2948   // Cleanup
2949   if (hProcess != NULL) {
2950     CloseHandle(hProcess);
2951   }
2952   if (hToken != NULL) {
2953     CloseHandle(hToken);
2954   }
2955 
2956   return success;
2957 }
2958 
2959 static bool numa_interleaving_init() {
2960   bool success = false;
2961 
2962   // print a warning if UseNUMAInterleaving flag is specified on command line
2963   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2964 
2965 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2966 
2967   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2968   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2969   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2970 
2971   if (!numa_node_list_holder.build()) {
2972     WARN("Process does not cover multiple NUMA nodes.");
2973     WARN("...Ignoring UseNUMAInterleaving flag.");
2974     return false;
2975   }
2976 
2977   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2978     WARN("Windows GDI cannot handle split reservations.");
2979     WARN("...Ignoring UseNUMAInterleaving flag.");
2980     return false;
2981   }
2982 
2983   if (log_is_enabled(Debug, os, cpu)) {
2984     Log(os, cpu) log;
2985     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2986     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2987       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2988     }
2989   }
2990 
2991 #undef WARN
2992 
2993   return true;
2994 }
2995 
2996 // this routine is used whenever we need to reserve a contiguous VA range
2997 // but we need to make separate VirtualAlloc calls for each piece of the range
2998 // Reasons for doing this:
2999 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
3000 //  * UseNUMAInterleaving requires a separate node for each piece
3001 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
3002                                          DWORD prot,
3003                                          bool should_inject_error = false) {
3004   char * p_buf;
3005   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
3006   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
3007   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
3008 
3009   // first reserve enough address space in advance since we want to be
3010   // able to break a single contiguous virtual address range into multiple
3011   // large page commits but WS2003 does not allow reserving large page space
3012   // so we just use 4K pages for reserve, this gives us a legal contiguous
3013   // address space. then we will deallocate that reservation, and re alloc
3014   // using large pages
3015   const size_t size_of_reserve = bytes + chunk_size;
3016   if (bytes > size_of_reserve) {
3017     // Overflowed.
3018     return NULL;
3019   }
3020   p_buf = (char *) VirtualAlloc(addr,
3021                                 size_of_reserve,  // size of Reserve
3022                                 MEM_RESERVE,
3023                                 PAGE_READWRITE);
3024   // If reservation failed, return NULL
3025   if (p_buf == NULL) return NULL;
3026   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
3027   os::release_memory(p_buf, bytes + chunk_size);
3028 
3029   // we still need to round up to a page boundary (in case we are using large pages)
3030   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
3031   // instead we handle this in the bytes_to_rq computation below
3032   p_buf = align_up(p_buf, page_size);
3033 
3034   // now go through and allocate one chunk at a time until all bytes are
3035   // allocated
3036   size_t  bytes_remaining = bytes;
3037   // An overflow of align_up() would have been caught above
3038   // in the calculation of size_of_reserve.
3039   char * next_alloc_addr = p_buf;
3040   HANDLE hProc = GetCurrentProcess();
3041 
3042 #ifdef ASSERT
3043   // Variable for the failure injection
3044   int ran_num = os::random();
3045   size_t fail_after = ran_num % bytes;
3046 #endif
3047 
3048   int count=0;
3049   while (bytes_remaining) {
3050     // select bytes_to_rq to get to the next chunk_size boundary
3051 
3052     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3053     // Note allocate and commit
3054     char * p_new;
3055 
3056 #ifdef ASSERT
3057     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3058 #else
3059     const bool inject_error_now = false;
3060 #endif
3061 
3062     if (inject_error_now) {
3063       p_new = NULL;
3064     } else {
3065       if (!UseNUMAInterleaving) {
3066         p_new = (char *) VirtualAlloc(next_alloc_addr,
3067                                       bytes_to_rq,
3068                                       flags,
3069                                       prot);
3070       } else {
3071         // get the next node to use from the used_node_list
3072         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3073         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3074         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3075       }
3076     }
3077 
3078     if (p_new == NULL) {
3079       // Free any allocated pages
3080       if (next_alloc_addr > p_buf) {
3081         // Some memory was committed so release it.
3082         size_t bytes_to_release = bytes - bytes_remaining;
3083         // NMT has yet to record any individual blocks, so it
3084         // need to create a dummy 'reserve' record to match
3085         // the release.
3086         MemTracker::record_virtual_memory_reserve((address)p_buf,
3087                                                   bytes_to_release, CALLER_PC);
3088         os::release_memory(p_buf, bytes_to_release);
3089       }
3090 #ifdef ASSERT
3091       if (should_inject_error) {
3092         log_develop_debug(pagesize)("Reserving pages individually failed.");
3093       }
3094 #endif
3095       return NULL;
3096     }
3097 
3098     bytes_remaining -= bytes_to_rq;
3099     next_alloc_addr += bytes_to_rq;
3100     count++;
3101   }
3102   // Although the memory is allocated individually, it is returned as one.
3103   // NMT records it as one block.
3104   if ((flags & MEM_COMMIT) != 0) {
3105     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3106   } else {
3107     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3108   }
3109 
3110   // made it this far, success
3111   return p_buf;
3112 }
3113 
3114 static size_t large_page_init_decide_size() {
3115   // print a warning if any large page related flag is specified on command line
3116   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3117                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3118 
3119 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3120 
3121   if (!request_lock_memory_privilege()) {
3122     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3123     return 0;
3124   }
3125 
3126   size_t size = GetLargePageMinimum();
3127   if (size == 0) {
3128     WARN("Large page is not supported by the processor.");
3129     return 0;
3130   }
3131 
3132 #if defined(IA32) || defined(AMD64)
3133   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3134     WARN("JVM cannot use large pages bigger than 4mb.");
3135     return 0;
3136   }
3137 #endif
3138 
3139   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3140     size = LargePageSizeInBytes;
3141   }
3142 
3143   // Now test allocating a page
3144   void* large_page = VirtualAlloc(NULL,
3145                                   size,
3146                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3147                                   PAGE_READWRITE);
3148   if (large_page == NULL) {
3149     WARN("JVM cannot allocate one single large page.");
3150     return 0;
3151   }
3152 
3153   // Detect if GDI can use memory backed by large pages
3154   if (!gdi_can_use_memory(large_page)) {
3155     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3156     return 0;
3157   }
3158 
3159   // Release test page
3160   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3161 
3162 #undef WARN
3163 
3164   return size;
3165 }
3166 
3167 void os::large_page_init() {
3168   if (!UseLargePages) {
3169     return;
3170   }
3171 
3172   _large_page_size = large_page_init_decide_size();
3173 
3174   const size_t default_page_size = (size_t) vm_page_size();
3175   if (_large_page_size > default_page_size) {
3176     _page_sizes[0] = _large_page_size;
3177     _page_sizes[1] = default_page_size;
3178     _page_sizes[2] = 0;
3179   }
3180 
3181   UseLargePages = _large_page_size != 0;
3182 
3183   if (UseLargePages && UseLargePagesIndividualAllocation) {
3184     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3185       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3186         warning("Windows GDI cannot handle split reservations.");
3187         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3188       }
3189       UseLargePagesIndividualAllocation = false;
3190     }
3191   }
3192 }
3193 
3194 int os::create_file_for_heap(const char* dir) {
3195 
3196   const char name_template[] = "/jvmheap.XXXXXX";
3197 
3198   size_t fullname_len = strlen(dir) + strlen(name_template);
3199   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3200   if (fullname == NULL) {
3201     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3202     return -1;
3203   }
3204   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3205   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3206 
3207   os::native_path(fullname);
3208 
3209   char *path = _mktemp(fullname);
3210   if (path == NULL) {
3211     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3212     os::free(fullname);
3213     return -1;
3214   }
3215 
3216   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3217 
3218   os::free(fullname);
3219   if (fd < 0) {
3220     warning("Problem opening file for heap (%s)", os::strerror(errno));
3221     return -1;
3222   }
3223   return fd;
3224 }
3225 
3226 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3227 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3228   assert(fd != -1, "File descriptor is not valid");
3229 
3230   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3231 #ifdef _LP64
3232   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3233     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3234 #else
3235   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3236     0, (DWORD)size, NULL);
3237 #endif
3238   if (fileMapping == NULL) {
3239     if (GetLastError() == ERROR_DISK_FULL) {
3240       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3241     }
3242     else {
3243       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3244     }
3245 
3246     return NULL;
3247   }
3248 
3249   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3250 
3251   CloseHandle(fileMapping);
3252 
3253   return (char*)addr;
3254 }
3255 
3256 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3257   assert(fd != -1, "File descriptor is not valid");
3258   assert(base != NULL, "Base address cannot be NULL");
3259 
3260   release_memory(base, size);
3261   return map_memory_to_file(base, size, fd);
3262 }
3263 
3264 // On win32, one cannot release just a part of reserved memory, it's an
3265 // all or nothing deal.  When we split a reservation, we must break the
3266 // reservation into two reservations.
3267 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3268 
3269   char* const split_address = base + split;
3270   assert(size > 0, "Sanity");
3271   assert(size > split, "Sanity");
3272   assert(split > 0, "Sanity");
3273   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3274   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3275 
3276   release_memory(base, size);
3277   reserve_memory(split, base);
3278   reserve_memory(size - split, split_address);
3279 
3280   // NMT: nothing to do here. Since Windows implements the split by
3281   //  releasing and re-reserving memory, the parts are already registered
3282   //  as individual mappings with NMT.
3283 
3284 }
3285 
3286 // Multiple threads can race in this code but it's not possible to unmap small sections of
3287 // virtual space to get requested alignment, like posix-like os's.
3288 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3289 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3290   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3291          "Alignment must be a multiple of allocation granularity (page size)");
3292   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3293 
3294   size_t extra_size = size + alignment;
3295   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3296 
3297   char* aligned_base = NULL;
3298 
3299   do {
3300     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3301     if (extra_base == NULL) {
3302       return NULL;
3303     }
3304     // Do manual alignment
3305     aligned_base = align_up(extra_base, alignment);
3306 
3307     if (file_desc != -1) {
3308       os::unmap_memory(extra_base, extra_size);
3309     } else {
3310       os::release_memory(extra_base, extra_size);
3311     }
3312 
3313     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3314 
3315   } while (aligned_base == NULL);
3316 
3317   return aligned_base;
3318 }
3319 
3320 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3321   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3322          "reserve alignment");
3323   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3324   char* res;
3325   // note that if UseLargePages is on, all the areas that require interleaving
3326   // will go thru reserve_memory_special rather than thru here.
3327   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3328   if (!use_individual) {
3329     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3330   } else {
3331     elapsedTimer reserveTimer;
3332     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3333     // in numa interleaving, we have to allocate pages individually
3334     // (well really chunks of NUMAInterleaveGranularity size)
3335     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3336     if (res == NULL) {
3337       warning("NUMA page allocation failed");
3338     }
3339     if (Verbose && PrintMiscellaneous) {
3340       reserveTimer.stop();
3341       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3342                     reserveTimer.milliseconds(), reserveTimer.ticks());
3343     }
3344   }
3345   assert(res == NULL || addr == NULL || addr == res,
3346          "Unexpected address from reserve.");
3347 
3348   return res;
3349 }
3350 
3351 // Reserve memory at an arbitrary address, only if that area is
3352 // available (and not reserved for something else).
3353 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3354   // Windows os::reserve_memory() fails of the requested address range is
3355   // not avilable.
3356   return reserve_memory(bytes, requested_addr);
3357 }
3358 
3359 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3360   assert(file_desc >= 0, "file_desc is not valid");
3361   return map_memory_to_file(requested_addr, bytes, file_desc);
3362 }
3363 
3364 size_t os::large_page_size() {
3365   return _large_page_size;
3366 }
3367 
3368 bool os::can_commit_large_page_memory() {
3369   // Windows only uses large page memory when the entire region is reserved
3370   // and committed in a single VirtualAlloc() call. This may change in the
3371   // future, but with Windows 2003 it's not possible to commit on demand.
3372   return false;
3373 }
3374 
3375 bool os::can_execute_large_page_memory() {
3376   return true;
3377 }
3378 
3379 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3380                                     bool exec) {
3381   assert(UseLargePages, "only for large pages");
3382 
3383   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3384     return NULL; // Fallback to small pages.
3385   }
3386 
3387   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3388   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3389 
3390   // with large pages, there are two cases where we need to use Individual Allocation
3391   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3392   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3393   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3394     log_debug(pagesize)("Reserving large pages individually.");
3395 
3396     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3397     if (p_buf == NULL) {
3398       // give an appropriate warning message
3399       if (UseNUMAInterleaving) {
3400         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3401       }
3402       if (UseLargePagesIndividualAllocation) {
3403         warning("Individually allocated large pages failed, "
3404                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3405       }
3406       return NULL;
3407     }
3408 
3409     return p_buf;
3410 
3411   } else {
3412     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3413 
3414     // normal policy just allocate it all at once
3415     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3416     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3417 
3418     return res;
3419   }
3420 }
3421 
3422 bool os::pd_release_memory_special(char* base, size_t bytes) {
3423   assert(base != NULL, "Sanity check");
3424   return pd_release_memory(base, bytes);
3425 }
3426 
3427 void os::print_statistics() {
3428 }
3429 
3430 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3431   int err = os::get_last_error();
3432   char buf[256];
3433   size_t buf_len = os::lasterror(buf, sizeof(buf));
3434   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3435           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3436           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3437 }
3438 
3439 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3440   if (bytes == 0) {
3441     // Don't bother the OS with noops.
3442     return true;
3443   }
3444   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3445   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3446   // Don't attempt to print anything if the OS call fails. We're
3447   // probably low on resources, so the print itself may cause crashes.
3448 
3449   // unless we have NUMAInterleaving enabled, the range of a commit
3450   // is always within a reserve covered by a single VirtualAlloc
3451   // in that case we can just do a single commit for the requested size
3452   if (!UseNUMAInterleaving) {
3453     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3454       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3455       return false;
3456     }
3457     if (exec) {
3458       DWORD oldprot;
3459       // Windows doc says to use VirtualProtect to get execute permissions
3460       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3461         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3462         return false;
3463       }
3464     }
3465     return true;
3466   } else {
3467 
3468     // when NUMAInterleaving is enabled, the commit might cover a range that
3469     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3470     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3471     // returns represents the number of bytes that can be committed in one step.
3472     size_t bytes_remaining = bytes;
3473     char * next_alloc_addr = addr;
3474     while (bytes_remaining > 0) {
3475       MEMORY_BASIC_INFORMATION alloc_info;
3476       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3477       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3478       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3479                        PAGE_READWRITE) == NULL) {
3480         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3481                                             exec);)
3482         return false;
3483       }
3484       if (exec) {
3485         DWORD oldprot;
3486         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3487                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3488           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3489                                               exec);)
3490           return false;
3491         }
3492       }
3493       bytes_remaining -= bytes_to_rq;
3494       next_alloc_addr += bytes_to_rq;
3495     }
3496   }
3497   // if we made it this far, return true
3498   return true;
3499 }
3500 
3501 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3502                           bool exec) {
3503   // alignment_hint is ignored on this OS
3504   return pd_commit_memory(addr, size, exec);
3505 }
3506 
3507 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3508                                   const char* mesg) {
3509   assert(mesg != NULL, "mesg must be specified");
3510   if (!pd_commit_memory(addr, size, exec)) {
3511     warn_fail_commit_memory(addr, size, exec);
3512     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3513   }
3514 }
3515 
3516 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3517                                   size_t alignment_hint, bool exec,
3518                                   const char* mesg) {
3519   // alignment_hint is ignored on this OS
3520   pd_commit_memory_or_exit(addr, size, exec, mesg);
3521 }
3522 
3523 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3524   if (bytes == 0) {
3525     // Don't bother the OS with noops.
3526     return true;
3527   }
3528   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3529   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3530   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3531 }
3532 
3533 bool os::pd_release_memory(char* addr, size_t bytes) {
3534   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3535 }
3536 
3537 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3538   return os::commit_memory(addr, size, !ExecMem);
3539 }
3540 
3541 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3542   return os::uncommit_memory(addr, size);
3543 }
3544 
3545 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3546   uint count = 0;
3547   bool ret = false;
3548   size_t bytes_remaining = bytes;
3549   char * next_protect_addr = addr;
3550 
3551   // Use VirtualQuery() to get the chunk size.
3552   while (bytes_remaining) {
3553     MEMORY_BASIC_INFORMATION alloc_info;
3554     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3555       return false;
3556     }
3557 
3558     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3559     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3560     // but we don't distinguish here as both cases are protected by same API.
3561     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3562     warning("Failed protecting pages individually for chunk #%u", count);
3563     if (!ret) {
3564       return false;
3565     }
3566 
3567     bytes_remaining -= bytes_to_protect;
3568     next_protect_addr += bytes_to_protect;
3569     count++;
3570   }
3571   return ret;
3572 }
3573 
3574 // Set protections specified
3575 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3576                         bool is_committed) {
3577   unsigned int p = 0;
3578   switch (prot) {
3579   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3580   case MEM_PROT_READ: p = PAGE_READONLY; break;
3581   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3582   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3583   default:
3584     ShouldNotReachHere();
3585   }
3586 
3587   DWORD old_status;
3588 
3589   // Strange enough, but on Win32 one can change protection only for committed
3590   // memory, not a big deal anyway, as bytes less or equal than 64K
3591   if (!is_committed) {
3592     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3593                           "cannot commit protection page");
3594   }
3595   // One cannot use os::guard_memory() here, as on Win32 guard page
3596   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3597   //
3598   // Pages in the region become guard pages. Any attempt to access a guard page
3599   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3600   // the guard page status. Guard pages thus act as a one-time access alarm.
3601   bool ret;
3602   if (UseNUMAInterleaving) {
3603     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3604     // so we must protect the chunks individually.
3605     ret = protect_pages_individually(addr, bytes, p, &old_status);
3606   } else {
3607     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3608   }
3609 #ifdef ASSERT
3610   if (!ret) {
3611     int err = os::get_last_error();
3612     char buf[256];
3613     size_t buf_len = os::lasterror(buf, sizeof(buf));
3614     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3615           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3616           buf_len != 0 ? buf : "<no_error_string>", err);
3617   }
3618 #endif
3619   return ret;
3620 }
3621 
3622 bool os::guard_memory(char* addr, size_t bytes) {
3623   DWORD old_status;
3624   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3625 }
3626 
3627 bool os::unguard_memory(char* addr, size_t bytes) {
3628   DWORD old_status;
3629   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3630 }
3631 
3632 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3633 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3634 void os::numa_make_global(char *addr, size_t bytes)    { }
3635 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3636 bool os::numa_topology_changed()                       { return false; }
3637 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3638 int os::numa_get_group_id()                            { return 0; }
3639 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3640   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3641     // Provide an answer for UMA systems
3642     ids[0] = 0;
3643     return 1;
3644   } else {
3645     // check for size bigger than actual groups_num
3646     size = MIN2(size, numa_get_groups_num());
3647     for (int i = 0; i < (int)size; i++) {
3648       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3649     }
3650     return size;
3651   }
3652 }
3653 
3654 int os::numa_get_group_id_for_address(const void* address) {
3655   return 0;
3656 }
3657 
3658 bool os::get_page_info(char *start, page_info* info) {
3659   return false;
3660 }
3661 
3662 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3663                      page_info* page_found) {
3664   return end;
3665 }
3666 
3667 char* os::non_memory_address_word() {
3668   // Must never look like an address returned by reserve_memory,
3669   // even in its subfields (as defined by the CPU immediate fields,
3670   // if the CPU splits constants across multiple instructions).
3671 #ifdef _M_ARM64
3672   // AArch64 has a maximum addressable space of 48-bits
3673   return (char*)((1ull << 48) - 1);
3674 #else
3675   return (char*)-1;
3676 #endif
3677 }
3678 
3679 #define MAX_ERROR_COUNT 100
3680 #define SYS_THREAD_ERROR 0xffffffffUL
3681 
3682 void os::pd_start_thread(Thread* thread) {
3683   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3684   // Returns previous suspend state:
3685   // 0:  Thread was not suspended
3686   // 1:  Thread is running now
3687   // >1: Thread is still suspended.
3688   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3689 }
3690 
3691 
3692 // Short sleep, direct OS call.
3693 //
3694 // ms = 0, means allow others (if any) to run.
3695 //
3696 void os::naked_short_sleep(jlong ms) {
3697   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3698   Sleep(ms);
3699 }
3700 
3701 // Windows does not provide sleep functionality with nanosecond resolution, so we
3702 // try to approximate this with spinning combined with yielding if another thread
3703 // is ready to run on the current processor.
3704 void os::naked_short_nanosleep(jlong ns) {
3705   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3706 
3707   int64_t start = os::javaTimeNanos();
3708   do {
3709     if (SwitchToThread() == 0) {
3710       // Nothing else is ready to run on this cpu, spin a little
3711       SpinPause();
3712     }
3713   } while (os::javaTimeNanos() - start < ns);
3714 }
3715 
3716 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3717 void os::infinite_sleep() {
3718   while (true) {    // sleep forever ...
3719     Sleep(100000);  // ... 100 seconds at a time
3720   }
3721 }
3722 
3723 typedef BOOL (WINAPI * STTSignature)(void);
3724 
3725 void os::naked_yield() {
3726   // Consider passing back the return value from SwitchToThread().
3727   SwitchToThread();
3728 }
3729 
3730 // Win32 only gives you access to seven real priorities at a time,
3731 // so we compress Java's ten down to seven.  It would be better
3732 // if we dynamically adjusted relative priorities.
3733 
3734 int os::java_to_os_priority[CriticalPriority + 1] = {
3735   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3736   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3737   THREAD_PRIORITY_LOWEST,                       // 2
3738   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3739   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3740   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3741   THREAD_PRIORITY_NORMAL,                       // 6
3742   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3743   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3744   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3745   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3746   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3747 };
3748 
3749 int prio_policy1[CriticalPriority + 1] = {
3750   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3751   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3752   THREAD_PRIORITY_LOWEST,                       // 2
3753   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3754   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3755   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3756   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3757   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3758   THREAD_PRIORITY_HIGHEST,                      // 8
3759   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3760   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3761   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3762 };
3763 
3764 static int prio_init() {
3765   // If ThreadPriorityPolicy is 1, switch tables
3766   if (ThreadPriorityPolicy == 1) {
3767     int i;
3768     for (i = 0; i < CriticalPriority + 1; i++) {
3769       os::java_to_os_priority[i] = prio_policy1[i];
3770     }
3771   }
3772   if (UseCriticalJavaThreadPriority) {
3773     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3774   }
3775   return 0;
3776 }
3777 
3778 OSReturn os::set_native_priority(Thread* thread, int priority) {
3779   if (!UseThreadPriorities) return OS_OK;
3780   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3781   return ret ? OS_OK : OS_ERR;
3782 }
3783 
3784 OSReturn os::get_native_priority(const Thread* const thread,
3785                                  int* priority_ptr) {
3786   if (!UseThreadPriorities) {
3787     *priority_ptr = java_to_os_priority[NormPriority];
3788     return OS_OK;
3789   }
3790   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3791   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3792     assert(false, "GetThreadPriority failed");
3793     return OS_ERR;
3794   }
3795   *priority_ptr = os_prio;
3796   return OS_OK;
3797 }
3798 
3799 // GetCurrentThreadId() returns DWORD
3800 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3801 
3802 static int _initial_pid = 0;
3803 
3804 int os::current_process_id() {
3805   return (_initial_pid ? _initial_pid : _getpid());
3806 }
3807 
3808 int    os::win32::_vm_page_size              = 0;
3809 int    os::win32::_vm_allocation_granularity = 0;
3810 int    os::win32::_processor_type            = 0;
3811 // Processor level is not available on non-NT systems, use vm_version instead
3812 int    os::win32::_processor_level           = 0;
3813 julong os::win32::_physical_memory           = 0;
3814 size_t os::win32::_default_stack_size        = 0;
3815 
3816 intx          os::win32::_os_thread_limit    = 0;
3817 volatile intx os::win32::_os_thread_count    = 0;
3818 
3819 bool   os::win32::_is_windows_server         = false;
3820 
3821 // 6573254
3822 // Currently, the bug is observed across all the supported Windows releases,
3823 // including the latest one (as of this writing - Windows Server 2012 R2)
3824 bool   os::win32::_has_exit_bug              = true;
3825 
3826 void os::win32::initialize_system_info() {
3827   SYSTEM_INFO si;
3828   GetSystemInfo(&si);
3829   _vm_page_size    = si.dwPageSize;
3830   _vm_allocation_granularity = si.dwAllocationGranularity;
3831   _processor_type  = si.dwProcessorType;
3832   _processor_level = si.wProcessorLevel;
3833   set_processor_count(si.dwNumberOfProcessors);
3834 
3835   MEMORYSTATUSEX ms;
3836   ms.dwLength = sizeof(ms);
3837 
3838   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3839   // dwMemoryLoad (% of memory in use)
3840   GlobalMemoryStatusEx(&ms);
3841   _physical_memory = ms.ullTotalPhys;
3842 
3843   if (FLAG_IS_DEFAULT(MaxRAM)) {
3844     // Adjust MaxRAM according to the maximum virtual address space available.
3845     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3846   }
3847 
3848   OSVERSIONINFOEX oi;
3849   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3850   GetVersionEx((OSVERSIONINFO*)&oi);
3851   switch (oi.dwPlatformId) {
3852   case VER_PLATFORM_WIN32_NT:
3853     {
3854       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3855       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3856           oi.wProductType == VER_NT_SERVER) {
3857         _is_windows_server = true;
3858       }
3859     }
3860     break;
3861   default: fatal("Unknown platform");
3862   }
3863 
3864   _default_stack_size = os::current_stack_size();
3865   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3866   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3867          "stack size not a multiple of page size");
3868 
3869   initialize_performance_counter();
3870 }
3871 
3872 int os::win32::get_cacheline_size() {
3873   PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
3874   DWORD returnLength = 0;
3875 
3876   // See https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
3877 
3878   GetLogicalProcessorInformation(NULL, &returnLength);
3879   assert(GetLastError() == ERROR_INSUFFICIENT_BUFFER, "Unexpected return from GetLogicalProcessorInformation");
3880 
3881   buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)os::malloc(returnLength, mtInternal);
3882   BOOL rc = GetLogicalProcessorInformation(buffer, &returnLength);
3883   assert(rc, "Unexpected return from GetLogicalProcessorInformation");
3884 
3885   int line_sz = -1;
3886   for (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = buffer; ptr < buffer + returnLength / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION); ptr++) {
3887     switch (ptr->Relationship) {
3888     case RelationCache:
3889       // Cache data is in ptr->Cache, one CACHE_DESCRIPTOR structure for each cache.
3890       PCACHE_DESCRIPTOR Cache = &ptr->Cache;
3891       if (Cache->Level == 1) {
3892         line_sz = Cache->LineSize;
3893       }
3894       break;
3895     }
3896   }
3897   os::free(buffer);
3898   return line_sz;
3899 }
3900 
3901 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3902                                       int ebuflen) {
3903   char path[MAX_PATH];
3904   DWORD size;
3905   DWORD pathLen = (DWORD)sizeof(path);
3906   HINSTANCE result = NULL;
3907 
3908   // only allow library name without path component
3909   assert(strchr(name, '\\') == NULL, "path not allowed");
3910   assert(strchr(name, ':') == NULL, "path not allowed");
3911   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3912     jio_snprintf(ebuf, ebuflen,
3913                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3914     return NULL;
3915   }
3916 
3917   // search system directory
3918   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3919     if (size >= pathLen) {
3920       return NULL; // truncated
3921     }
3922     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3923       return NULL; // truncated
3924     }
3925     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3926       return result;
3927     }
3928   }
3929 
3930   // try Windows directory
3931   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3932     if (size >= pathLen) {
3933       return NULL; // truncated
3934     }
3935     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3936       return NULL; // truncated
3937     }
3938     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3939       return result;
3940     }
3941   }
3942 
3943   jio_snprintf(ebuf, ebuflen,
3944                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3945   return NULL;
3946 }
3947 
3948 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3949 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3950 
3951 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3952   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3953   return TRUE;
3954 }
3955 
3956 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3957   // Basic approach:
3958   //  - Each exiting thread registers its intent to exit and then does so.
3959   //  - A thread trying to terminate the process must wait for all
3960   //    threads currently exiting to complete their exit.
3961 
3962   if (os::win32::has_exit_bug()) {
3963     // The array holds handles of the threads that have started exiting by calling
3964     // _endthreadex().
3965     // Should be large enough to avoid blocking the exiting thread due to lack of
3966     // a free slot.
3967     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3968     static int handle_count = 0;
3969 
3970     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3971     static CRITICAL_SECTION crit_sect;
3972     static volatile DWORD process_exiting = 0;
3973     int i, j;
3974     DWORD res;
3975     HANDLE hproc, hthr;
3976 
3977     // We only attempt to register threads until a process exiting
3978     // thread manages to set the process_exiting flag. Any threads
3979     // that come through here after the process_exiting flag is set
3980     // are unregistered and will be caught in the SuspendThread()
3981     // infinite loop below.
3982     bool registered = false;
3983 
3984     // The first thread that reached this point, initializes the critical section.
3985     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3986       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3987     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3988       if (what != EPT_THREAD) {
3989         // Atomically set process_exiting before the critical section
3990         // to increase the visibility between racing threads.
3991         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3992       }
3993       EnterCriticalSection(&crit_sect);
3994 
3995       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3996         // Remove from the array those handles of the threads that have completed exiting.
3997         for (i = 0, j = 0; i < handle_count; ++i) {
3998           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3999           if (res == WAIT_TIMEOUT) {
4000             handles[j++] = handles[i];
4001           } else {
4002             if (res == WAIT_FAILED) {
4003               warning("WaitForSingleObject failed (%u) in %s: %d\n",
4004                       GetLastError(), __FILE__, __LINE__);
4005             }
4006             // Don't keep the handle, if we failed waiting for it.
4007             CloseHandle(handles[i]);
4008           }
4009         }
4010 
4011         // If there's no free slot in the array of the kept handles, we'll have to
4012         // wait until at least one thread completes exiting.
4013         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
4014           // Raise the priority of the oldest exiting thread to increase its chances
4015           // to complete sooner.
4016           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
4017           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
4018           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
4019             i = (res - WAIT_OBJECT_0);
4020             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4021             for (; i < handle_count; ++i) {
4022               handles[i] = handles[i + 1];
4023             }
4024           } else {
4025             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4026                     (res == WAIT_FAILED ? "failed" : "timed out"),
4027                     GetLastError(), __FILE__, __LINE__);
4028             // Don't keep handles, if we failed waiting for them.
4029             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4030               CloseHandle(handles[i]);
4031             }
4032             handle_count = 0;
4033           }
4034         }
4035 
4036         // Store a duplicate of the current thread handle in the array of handles.
4037         hproc = GetCurrentProcess();
4038         hthr = GetCurrentThread();
4039         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4040                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
4041           warning("DuplicateHandle failed (%u) in %s: %d\n",
4042                   GetLastError(), __FILE__, __LINE__);
4043 
4044           // We can't register this thread (no more handles) so this thread
4045           // may be racing with a thread that is calling exit(). If the thread
4046           // that is calling exit() has managed to set the process_exiting
4047           // flag, then this thread will be caught in the SuspendThread()
4048           // infinite loop below which closes that race. A small timing
4049           // window remains before the process_exiting flag is set, but it
4050           // is only exposed when we are out of handles.
4051         } else {
4052           ++handle_count;
4053           registered = true;
4054 
4055           // The current exiting thread has stored its handle in the array, and now
4056           // should leave the critical section before calling _endthreadex().
4057         }
4058 
4059       } else if (what != EPT_THREAD && handle_count > 0) {
4060         jlong start_time, finish_time, timeout_left;
4061         // Before ending the process, make sure all the threads that had called
4062         // _endthreadex() completed.
4063 
4064         // Set the priority level of the current thread to the same value as
4065         // the priority level of exiting threads.
4066         // This is to ensure it will be given a fair chance to execute if
4067         // the timeout expires.
4068         hthr = GetCurrentThread();
4069         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4070         start_time = os::javaTimeNanos();
4071         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4072         for (i = 0; ; ) {
4073           int portion_count = handle_count - i;
4074           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4075             portion_count = MAXIMUM_WAIT_OBJECTS;
4076           }
4077           for (j = 0; j < portion_count; ++j) {
4078             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4079           }
4080           timeout_left = (finish_time - start_time) / 1000000L;
4081           if (timeout_left < 0) {
4082             timeout_left = 0;
4083           }
4084           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4085           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4086             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4087                     (res == WAIT_FAILED ? "failed" : "timed out"),
4088                     GetLastError(), __FILE__, __LINE__);
4089             // Reset portion_count so we close the remaining
4090             // handles due to this error.
4091             portion_count = handle_count - i;
4092           }
4093           for (j = 0; j < portion_count; ++j) {
4094             CloseHandle(handles[i + j]);
4095           }
4096           if ((i += portion_count) >= handle_count) {
4097             break;
4098           }
4099           start_time = os::javaTimeNanos();
4100         }
4101         handle_count = 0;
4102       }
4103 
4104       LeaveCriticalSection(&crit_sect);
4105     }
4106 
4107     if (!registered &&
4108         Atomic::load_acquire(&process_exiting) != 0 &&
4109         process_exiting != GetCurrentThreadId()) {
4110       // Some other thread is about to call exit(), so we don't let
4111       // the current unregistered thread proceed to exit() or _endthreadex()
4112       while (true) {
4113         SuspendThread(GetCurrentThread());
4114         // Avoid busy-wait loop, if SuspendThread() failed.
4115         Sleep(EXIT_TIMEOUT);
4116       }
4117     }
4118   }
4119 
4120   // We are here if either
4121   // - there's no 'race at exit' bug on this OS release;
4122   // - initialization of the critical section failed (unlikely);
4123   // - the current thread has registered itself and left the critical section;
4124   // - the process-exiting thread has raised the flag and left the critical section.
4125   if (what == EPT_THREAD) {
4126     _endthreadex((unsigned)exit_code);
4127   } else if (what == EPT_PROCESS) {
4128     ::exit(exit_code);
4129   } else {
4130     _exit(exit_code);
4131   }
4132 
4133   // Should not reach here
4134   return exit_code;
4135 }
4136 
4137 #undef EXIT_TIMEOUT
4138 
4139 void os::win32::setmode_streams() {
4140   _setmode(_fileno(stdin), _O_BINARY);
4141   _setmode(_fileno(stdout), _O_BINARY);
4142   _setmode(_fileno(stderr), _O_BINARY);
4143 }
4144 
4145 void os::wait_for_keypress_at_exit(void) {
4146   if (PauseAtExit) {
4147     fprintf(stderr, "Press any key to continue...\n");
4148     fgetc(stdin);
4149   }
4150 }
4151 
4152 
4153 bool os::message_box(const char* title, const char* message) {
4154   int result = MessageBox(NULL, message, title,
4155                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4156   return result == IDYES;
4157 }
4158 
4159 #ifndef PRODUCT
4160 #ifndef _WIN64
4161 // Helpers to check whether NX protection is enabled
4162 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4163   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4164       pex->ExceptionRecord->NumberParameters > 0 &&
4165       pex->ExceptionRecord->ExceptionInformation[0] ==
4166       EXCEPTION_INFO_EXEC_VIOLATION) {
4167     return EXCEPTION_EXECUTE_HANDLER;
4168   }
4169   return EXCEPTION_CONTINUE_SEARCH;
4170 }
4171 
4172 void nx_check_protection() {
4173   // If NX is enabled we'll get an exception calling into code on the stack
4174   char code[] = { (char)0xC3 }; // ret
4175   void *code_ptr = (void *)code;
4176   __try {
4177     __asm call code_ptr
4178   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4179     tty->print_raw_cr("NX protection detected.");
4180   }
4181 }
4182 #endif // _WIN64
4183 #endif // PRODUCT
4184 
4185 // This is called _before_ the global arguments have been parsed
4186 void os::init(void) {
4187   _initial_pid = _getpid();
4188 
4189   init_random(1234567);
4190 
4191   win32::initialize_system_info();
4192   win32::setmode_streams();
4193   init_page_sizes((size_t) win32::vm_page_size());
4194 
4195   // This may be overridden later when argument processing is done.
4196   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4197 
4198   // Initialize main_process and main_thread
4199   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4200   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4201                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4202     fatal("DuplicateHandle failed\n");
4203   }
4204   main_thread_id = (int) GetCurrentThreadId();
4205 
4206   // initialize fast thread access - only used for 32-bit
4207   win32::initialize_thread_ptr_offset();
4208 }
4209 
4210 // To install functions for atexit processing
4211 extern "C" {
4212   static void perfMemory_exit_helper() {
4213     perfMemory_exit();
4214   }
4215 }
4216 
4217 static jint initSock();
4218 
4219 // this is called _after_ the global arguments have been parsed
4220 jint os::init_2(void) {
4221 
4222   // This could be set any time but all platforms
4223   // have to set it the same so we have to mirror Solaris.
4224   DEBUG_ONLY(os::set_mutex_init_done();)
4225 
4226   // Setup Windows Exceptions
4227 
4228 #if defined(USE_VECTORED_EXCEPTION_HANDLING)
4229   topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelExceptionFilter);
4230 #elif INCLUDE_AOT
4231   // If AOT is enabled we need to install a vectored exception handler
4232   // in order to forward implicit exceptions from code in AOT
4233   // generated DLLs.  This is necessary since these DLLs are not
4234   // registered for structured exceptions like codecache methods are.
4235   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4236     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4237   }
4238 #endif
4239 
4240   // for debugging float code generation bugs
4241   if (ForceFloatExceptions) {
4242 #ifndef  _WIN64
4243     static long fp_control_word = 0;
4244     __asm { fstcw fp_control_word }
4245     // see Intel PPro Manual, Vol. 2, p 7-16
4246     const long precision = 0x20;
4247     const long underflow = 0x10;
4248     const long overflow  = 0x08;
4249     const long zero_div  = 0x04;
4250     const long denorm    = 0x02;
4251     const long invalid   = 0x01;
4252     fp_control_word |= invalid;
4253     __asm { fldcw fp_control_word }
4254 #endif
4255   }
4256 
4257   // If stack_commit_size is 0, windows will reserve the default size,
4258   // but only commit a small portion of it.
4259   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4260   size_t default_reserve_size = os::win32::default_stack_size();
4261   size_t actual_reserve_size = stack_commit_size;
4262   if (stack_commit_size < default_reserve_size) {
4263     // If stack_commit_size == 0, we want this too
4264     actual_reserve_size = default_reserve_size;
4265   }
4266 
4267   // Check minimum allowable stack size for thread creation and to initialize
4268   // the java system classes, including StackOverflowError - depends on page
4269   // size.  Add two 4K pages for compiler2 recursion in main thread.
4270   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4271   // class initialization depending on 32 or 64 bit VM.
4272   size_t min_stack_allowed =
4273             (size_t)(JavaThread::stack_guard_zone_size() +
4274                      JavaThread::stack_shadow_zone_size() +
4275                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4276 
4277   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4278 
4279   if (actual_reserve_size < min_stack_allowed) {
4280     tty->print_cr("\nThe Java thread stack size specified is too small. "
4281                   "Specify at least %dk",
4282                   min_stack_allowed / K);
4283     return JNI_ERR;
4284   }
4285 
4286   JavaThread::set_stack_size_at_create(stack_commit_size);
4287 
4288   // Calculate theoretical max. size of Threads to guard gainst artifical
4289   // out-of-memory situations, where all available address-space has been
4290   // reserved by thread stacks.
4291   assert(actual_reserve_size != 0, "Must have a stack");
4292 
4293   // Calculate the thread limit when we should start doing Virtual Memory
4294   // banging. Currently when the threads will have used all but 200Mb of space.
4295   //
4296   // TODO: consider performing a similar calculation for commit size instead
4297   // as reserve size, since on a 64-bit platform we'll run into that more
4298   // often than running out of virtual memory space.  We can use the
4299   // lower value of the two calculations as the os_thread_limit.
4300   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4301   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4302 
4303   // at exit methods are called in the reverse order of their registration.
4304   // there is no limit to the number of functions registered. atexit does
4305   // not set errno.
4306 
4307   if (PerfAllowAtExitRegistration) {
4308     // only register atexit functions if PerfAllowAtExitRegistration is set.
4309     // atexit functions can be delayed until process exit time, which
4310     // can be problematic for embedded VM situations. Embedded VMs should
4311     // call DestroyJavaVM() to assure that VM resources are released.
4312 
4313     // note: perfMemory_exit_helper atexit function may be removed in
4314     // the future if the appropriate cleanup code can be added to the
4315     // VM_Exit VMOperation's doit method.
4316     if (atexit(perfMemory_exit_helper) != 0) {
4317       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4318     }
4319   }
4320 
4321 #ifndef _WIN64
4322   // Print something if NX is enabled (win32 on AMD64)
4323   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4324 #endif
4325 
4326   // initialize thread priority policy
4327   prio_init();
4328 
4329   if (UseNUMA && !ForceNUMA) {
4330     UseNUMA = false; // We don't fully support this yet
4331   }
4332 
4333   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4334     if (!numa_interleaving_init()) {
4335       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4336     } else if (!UseNUMAInterleaving) {
4337       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4338       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4339     }
4340   }
4341 
4342   if (initSock() != JNI_OK) {
4343     return JNI_ERR;
4344   }
4345 
4346   SymbolEngine::recalc_search_path();
4347 
4348   // Initialize data for jdk.internal.misc.Signal
4349   if (!ReduceSignalUsage) {
4350     jdk_misc_signal_init();
4351   }
4352 
4353   return JNI_OK;
4354 }
4355 
4356 // combine the high and low DWORD into a ULONGLONG
4357 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4358   ULONGLONG value = high_word;
4359   value <<= sizeof(high_word) * 8;
4360   value |= low_word;
4361   return value;
4362 }
4363 
4364 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4365 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4366   ::memset((void*)sbuf, 0, sizeof(struct stat));
4367   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4368   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4369                                   file_data.ftLastWriteTime.dwLowDateTime);
4370   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4371                                   file_data.ftCreationTime.dwLowDateTime);
4372   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4373                                   file_data.ftLastAccessTime.dwLowDateTime);
4374   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4375     sbuf->st_mode |= S_IFDIR;
4376   } else {
4377     sbuf->st_mode |= S_IFREG;
4378   }
4379 }
4380 
4381 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4382   // Get required buffer size to convert to Unicode
4383   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4384                                              MB_ERR_INVALID_CHARS,
4385                                              char_path, -1,
4386                                              NULL, 0);
4387   if (unicode_path_len == 0) {
4388     return EINVAL;
4389   }
4390 
4391   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4392 
4393   int result = MultiByteToWideChar(CP_ACP,
4394                                    MB_ERR_INVALID_CHARS,
4395                                    char_path, -1,
4396                                    *unicode_path, unicode_path_len);
4397   assert(result == unicode_path_len, "length already checked above");
4398 
4399   return ERROR_SUCCESS;
4400 }
4401 
4402 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4403   // Get required buffer size to convert to full path. The return
4404   // value INCLUDES the terminating null character.
4405   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4406   if (full_path_len == 0) {
4407     return EINVAL;
4408   }
4409 
4410   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4411 
4412   // When the buffer has sufficient size, the return value EXCLUDES the
4413   // terminating null character
4414   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4415   assert(result <= full_path_len, "length already checked above");
4416 
4417   return ERROR_SUCCESS;
4418 }
4419 
4420 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4421   *prefix_off = 0;
4422   *needs_fullpath = true;
4423 
4424   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4425     *prefix = L"\\\\?\\";
4426   } else if (buf[0] == '\\' && buf[1] == '\\') {
4427     if (buf[2] == '?' && buf[3] == '\\') {
4428       *prefix = L"";
4429       *needs_fullpath = false;
4430     } else {
4431       *prefix = L"\\\\?\\UNC";
4432       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4433     }
4434   } else {
4435     *prefix = L"\\\\?\\";
4436   }
4437 }
4438 
4439 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4440 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4441 // additional_space is the size of space, in wchar_t, the function will additionally add to
4442 // the allocation of return buffer (such that the size of the returned buffer is at least
4443 // wcslen(buf) + 1 + additional_space).
4444 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4445   if ((path == NULL) || (path[0] == '\0')) {
4446     err = ENOENT;
4447     return NULL;
4448   }
4449 
4450   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4451   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4452   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4453   strncpy(buf, path, buf_len);
4454   os::native_path(buf);
4455 
4456   LPWSTR prefix = NULL;
4457   int prefix_off = 0;
4458   bool needs_fullpath = true;
4459   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4460 
4461   LPWSTR unicode_path = NULL;
4462   err = convert_to_unicode(buf, &unicode_path);
4463   FREE_C_HEAP_ARRAY(char, buf);
4464   if (err != ERROR_SUCCESS) {
4465     return NULL;
4466   }
4467 
4468   LPWSTR converted_path = NULL;
4469   if (needs_fullpath) {
4470     err = get_full_path(unicode_path, &converted_path);
4471   } else {
4472     converted_path = unicode_path;
4473   }
4474 
4475   LPWSTR result = NULL;
4476   if (converted_path != NULL) {
4477     size_t prefix_len = wcslen(prefix);
4478     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4479     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4480     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4481 
4482     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4483     result_len = wcslen(result);
4484     if ((result[result_len - 1] == L'\\') &&
4485         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4486       result[result_len - 1] = L'\0';
4487     }
4488   }
4489 
4490   if (converted_path != unicode_path) {
4491     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4492   }
4493   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4494 
4495   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4496 }
4497 
4498 int os::stat(const char *path, struct stat *sbuf) {
4499   errno_t err;
4500   wchar_t* wide_path = wide_abs_unc_path(path, err);
4501 
4502   if (wide_path == NULL) {
4503     errno = err;
4504     return -1;
4505   }
4506 
4507   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4508   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4509   os::free(wide_path);
4510 
4511   if (!bret) {
4512     errno = ::GetLastError();
4513     return -1;
4514   }
4515 
4516   file_attribute_data_to_stat(sbuf, file_data);
4517   return 0;
4518 }
4519 
4520 static HANDLE create_read_only_file_handle(const char* file) {
4521   errno_t err;
4522   wchar_t* wide_path = wide_abs_unc_path(file, err);
4523 
4524   if (wide_path == NULL) {
4525     errno = err;
4526     return INVALID_HANDLE_VALUE;
4527   }
4528 
4529   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4530                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4531   os::free(wide_path);
4532 
4533   return handle;
4534 }
4535 
4536 bool os::same_files(const char* file1, const char* file2) {
4537 
4538   if (file1 == NULL && file2 == NULL) {
4539     return true;
4540   }
4541 
4542   if (file1 == NULL || file2 == NULL) {
4543     return false;
4544   }
4545 
4546   if (strcmp(file1, file2) == 0) {
4547     return true;
4548   }
4549 
4550   HANDLE handle1 = create_read_only_file_handle(file1);
4551   HANDLE handle2 = create_read_only_file_handle(file2);
4552   bool result = false;
4553 
4554   // if we could open both paths...
4555   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4556     BY_HANDLE_FILE_INFORMATION fileInfo1;
4557     BY_HANDLE_FILE_INFORMATION fileInfo2;
4558     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4559       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4560       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4561       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4562         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4563         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4564         result = true;
4565       }
4566     }
4567   }
4568 
4569   //free the handles
4570   if (handle1 != INVALID_HANDLE_VALUE) {
4571     ::CloseHandle(handle1);
4572   }
4573 
4574   if (handle2 != INVALID_HANDLE_VALUE) {
4575     ::CloseHandle(handle2);
4576   }
4577 
4578   return result;
4579 }
4580 
4581 #define FT2INT64(ft) \
4582   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4583 
4584 
4585 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4586 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4587 // of a thread.
4588 //
4589 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4590 // the fast estimate available on the platform.
4591 
4592 // current_thread_cpu_time() is not optimized for Windows yet
4593 jlong os::current_thread_cpu_time() {
4594   // return user + sys since the cost is the same
4595   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4596 }
4597 
4598 jlong os::thread_cpu_time(Thread* thread) {
4599   // consistent with what current_thread_cpu_time() returns.
4600   return os::thread_cpu_time(thread, true /* user+sys */);
4601 }
4602 
4603 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4604   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4605 }
4606 
4607 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4608   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4609   // If this function changes, os::is_thread_cpu_time_supported() should too
4610   FILETIME CreationTime;
4611   FILETIME ExitTime;
4612   FILETIME KernelTime;
4613   FILETIME UserTime;
4614 
4615   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4616                       &ExitTime, &KernelTime, &UserTime) == 0) {
4617     return -1;
4618   } else if (user_sys_cpu_time) {
4619     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4620   } else {
4621     return FT2INT64(UserTime) * 100;
4622   }
4623 }
4624 
4625 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4626   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4627   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4628   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4629   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4630 }
4631 
4632 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4633   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4634   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4635   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4636   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4637 }
4638 
4639 bool os::is_thread_cpu_time_supported() {
4640   // see os::thread_cpu_time
4641   FILETIME CreationTime;
4642   FILETIME ExitTime;
4643   FILETIME KernelTime;
4644   FILETIME UserTime;
4645 
4646   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4647                       &KernelTime, &UserTime) == 0) {
4648     return false;
4649   } else {
4650     return true;
4651   }
4652 }
4653 
4654 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4655 // It does have primitives (PDH API) to get CPU usage and run queue length.
4656 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4657 // If we wanted to implement loadavg on Windows, we have a few options:
4658 //
4659 // a) Query CPU usage and run queue length and "fake" an answer by
4660 //    returning the CPU usage if it's under 100%, and the run queue
4661 //    length otherwise.  It turns out that querying is pretty slow
4662 //    on Windows, on the order of 200 microseconds on a fast machine.
4663 //    Note that on the Windows the CPU usage value is the % usage
4664 //    since the last time the API was called (and the first call
4665 //    returns 100%), so we'd have to deal with that as well.
4666 //
4667 // b) Sample the "fake" answer using a sampling thread and store
4668 //    the answer in a global variable.  The call to loadavg would
4669 //    just return the value of the global, avoiding the slow query.
4670 //
4671 // c) Sample a better answer using exponential decay to smooth the
4672 //    value.  This is basically the algorithm used by UNIX kernels.
4673 //
4674 // Note that sampling thread starvation could affect both (b) and (c).
4675 int os::loadavg(double loadavg[], int nelem) {
4676   return -1;
4677 }
4678 
4679 
4680 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4681 bool os::dont_yield() {
4682   return DontYieldALot;
4683 }
4684 
4685 int os::open(const char *path, int oflag, int mode) {
4686   errno_t err;
4687   wchar_t* wide_path = wide_abs_unc_path(path, err);
4688 
4689   if (wide_path == NULL) {
4690     errno = err;
4691     return -1;
4692   }
4693   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4694   os::free(wide_path);
4695 
4696   if (fd == -1) {
4697     errno = ::GetLastError();
4698   }
4699 
4700   return fd;
4701 }
4702 
4703 FILE* os::open(int fd, const char* mode) {
4704   return ::_fdopen(fd, mode);
4705 }
4706 
4707 // Is a (classpath) directory empty?
4708 bool os::dir_is_empty(const char* path) {
4709   errno_t err;
4710   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4711 
4712   if (wide_path == NULL) {
4713     errno = err;
4714     return false;
4715   }
4716 
4717   // Make sure we end with "\\*"
4718   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4719     wcscat(wide_path, L"*");
4720   } else {
4721     wcscat(wide_path, L"\\*");
4722   }
4723 
4724   WIN32_FIND_DATAW fd;
4725   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4726   os::free(wide_path);
4727   bool is_empty = true;
4728 
4729   if (f != INVALID_HANDLE_VALUE) {
4730     while (is_empty && ::FindNextFileW(f, &fd)) {
4731       // An empty directory contains only the current directory file
4732       // and the previous directory file.
4733       if ((wcscmp(fd.cFileName, L".") != 0) &&
4734           (wcscmp(fd.cFileName, L"..") != 0)) {
4735         is_empty = false;
4736       }
4737     }
4738     FindClose(f);
4739   } else {
4740     errno = ::GetLastError();
4741   }
4742 
4743   return is_empty;
4744 }
4745 
4746 // create binary file, rewriting existing file if required
4747 int os::create_binary_file(const char* path, bool rewrite_existing) {
4748   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4749   if (!rewrite_existing) {
4750     oflags |= _O_EXCL;
4751   }
4752   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4753 }
4754 
4755 // return current position of file pointer
4756 jlong os::current_file_offset(int fd) {
4757   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4758 }
4759 
4760 // move file pointer to the specified offset
4761 jlong os::seek_to_file_offset(int fd, jlong offset) {
4762   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4763 }
4764 
4765 
4766 jlong os::lseek(int fd, jlong offset, int whence) {
4767   return (jlong) ::_lseeki64(fd, offset, whence);
4768 }
4769 
4770 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4771   OVERLAPPED ov;
4772   DWORD nread;
4773   BOOL result;
4774 
4775   ZeroMemory(&ov, sizeof(ov));
4776   ov.Offset = (DWORD)offset;
4777   ov.OffsetHigh = (DWORD)(offset >> 32);
4778 
4779   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4780 
4781   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4782 
4783   return result ? nread : 0;
4784 }
4785 
4786 
4787 // This method is a slightly reworked copy of JDK's sysNativePath
4788 // from src/windows/hpi/src/path_md.c
4789 
4790 // Convert a pathname to native format.  On win32, this involves forcing all
4791 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4792 // sometimes rejects '/') and removing redundant separators.  The input path is
4793 // assumed to have been converted into the character encoding used by the local
4794 // system.  Because this might be a double-byte encoding, care is taken to
4795 // treat double-byte lead characters correctly.
4796 //
4797 // This procedure modifies the given path in place, as the result is never
4798 // longer than the original.  There is no error return; this operation always
4799 // succeeds.
4800 char * os::native_path(char *path) {
4801   char *src = path, *dst = path, *end = path;
4802   char *colon = NULL;  // If a drive specifier is found, this will
4803                        // point to the colon following the drive letter
4804 
4805   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4806   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4807           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4808 
4809   // Check for leading separators
4810 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4811   while (isfilesep(*src)) {
4812     src++;
4813   }
4814 
4815   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4816     // Remove leading separators if followed by drive specifier.  This
4817     // hack is necessary to support file URLs containing drive
4818     // specifiers (e.g., "file://c:/path").  As a side effect,
4819     // "/c:/path" can be used as an alternative to "c:/path".
4820     *dst++ = *src++;
4821     colon = dst;
4822     *dst++ = ':';
4823     src++;
4824   } else {
4825     src = path;
4826     if (isfilesep(src[0]) && isfilesep(src[1])) {
4827       // UNC pathname: Retain first separator; leave src pointed at
4828       // second separator so that further separators will be collapsed
4829       // into the second separator.  The result will be a pathname
4830       // beginning with "\\\\" followed (most likely) by a host name.
4831       src = dst = path + 1;
4832       path[0] = '\\';     // Force first separator to '\\'
4833     }
4834   }
4835 
4836   end = dst;
4837 
4838   // Remove redundant separators from remainder of path, forcing all
4839   // separators to be '\\' rather than '/'. Also, single byte space
4840   // characters are removed from the end of the path because those
4841   // are not legal ending characters on this operating system.
4842   //
4843   while (*src != '\0') {
4844     if (isfilesep(*src)) {
4845       *dst++ = '\\'; src++;
4846       while (isfilesep(*src)) src++;
4847       if (*src == '\0') {
4848         // Check for trailing separator
4849         end = dst;
4850         if (colon == dst - 2) break;  // "z:\\"
4851         if (dst == path + 1) break;   // "\\"
4852         if (dst == path + 2 && isfilesep(path[0])) {
4853           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4854           // beginning of a UNC pathname.  Even though it is not, by
4855           // itself, a valid UNC pathname, we leave it as is in order
4856           // to be consistent with the path canonicalizer as well
4857           // as the win32 APIs, which treat this case as an invalid
4858           // UNC pathname rather than as an alias for the root
4859           // directory of the current drive.
4860           break;
4861         }
4862         end = --dst;  // Path does not denote a root directory, so
4863                       // remove trailing separator
4864         break;
4865       }
4866       end = dst;
4867     } else {
4868       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4869         *dst++ = *src++;
4870         if (*src) *dst++ = *src++;
4871         end = dst;
4872       } else {  // Copy a single-byte character
4873         char c = *src++;
4874         *dst++ = c;
4875         // Space is not a legal ending character
4876         if (c != ' ') end = dst;
4877       }
4878     }
4879   }
4880 
4881   *end = '\0';
4882 
4883   // For "z:", add "." to work around a bug in the C runtime library
4884   if (colon == dst - 1) {
4885     path[2] = '.';
4886     path[3] = '\0';
4887   }
4888 
4889   return path;
4890 }
4891 
4892 // This code is a copy of JDK's sysSetLength
4893 // from src/windows/hpi/src/sys_api_md.c
4894 
4895 int os::ftruncate(int fd, jlong length) {
4896   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4897   long high = (long)(length >> 32);
4898   DWORD ret;
4899 
4900   if (h == (HANDLE)(-1)) {
4901     return -1;
4902   }
4903 
4904   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4905   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4906     return -1;
4907   }
4908 
4909   if (::SetEndOfFile(h) == FALSE) {
4910     return -1;
4911   }
4912 
4913   return 0;
4914 }
4915 
4916 int os::get_fileno(FILE* fp) {
4917   return _fileno(fp);
4918 }
4919 
4920 // This code is a copy of JDK's sysSync
4921 // from src/windows/hpi/src/sys_api_md.c
4922 // except for the legacy workaround for a bug in Win 98
4923 
4924 int os::fsync(int fd) {
4925   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4926 
4927   if ((!::FlushFileBuffers(handle)) &&
4928       (GetLastError() != ERROR_ACCESS_DENIED)) {
4929     // from winerror.h
4930     return -1;
4931   }
4932   return 0;
4933 }
4934 
4935 static int nonSeekAvailable(int, long *);
4936 static int stdinAvailable(int, long *);
4937 
4938 // This code is a copy of JDK's sysAvailable
4939 // from src/windows/hpi/src/sys_api_md.c
4940 
4941 int os::available(int fd, jlong *bytes) {
4942   jlong cur, end;
4943   struct _stati64 stbuf64;
4944 
4945   if (::_fstati64(fd, &stbuf64) >= 0) {
4946     int mode = stbuf64.st_mode;
4947     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4948       int ret;
4949       long lpbytes;
4950       if (fd == 0) {
4951         ret = stdinAvailable(fd, &lpbytes);
4952       } else {
4953         ret = nonSeekAvailable(fd, &lpbytes);
4954       }
4955       (*bytes) = (jlong)(lpbytes);
4956       return ret;
4957     }
4958     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4959       return FALSE;
4960     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4961       return FALSE;
4962     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4963       return FALSE;
4964     }
4965     *bytes = end - cur;
4966     return TRUE;
4967   } else {
4968     return FALSE;
4969   }
4970 }
4971 
4972 void os::flockfile(FILE* fp) {
4973   _lock_file(fp);
4974 }
4975 
4976 void os::funlockfile(FILE* fp) {
4977   _unlock_file(fp);
4978 }
4979 
4980 // This code is a copy of JDK's nonSeekAvailable
4981 // from src/windows/hpi/src/sys_api_md.c
4982 
4983 static int nonSeekAvailable(int fd, long *pbytes) {
4984   // This is used for available on non-seekable devices
4985   // (like both named and anonymous pipes, such as pipes
4986   //  connected to an exec'd process).
4987   // Standard Input is a special case.
4988   HANDLE han;
4989 
4990   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4991     return FALSE;
4992   }
4993 
4994   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4995     // PeekNamedPipe fails when at EOF.  In that case we
4996     // simply make *pbytes = 0 which is consistent with the
4997     // behavior we get on Solaris when an fd is at EOF.
4998     // The only alternative is to raise an Exception,
4999     // which isn't really warranted.
5000     //
5001     if (::GetLastError() != ERROR_BROKEN_PIPE) {
5002       return FALSE;
5003     }
5004     *pbytes = 0;
5005   }
5006   return TRUE;
5007 }
5008 
5009 #define MAX_INPUT_EVENTS 2000
5010 
5011 // This code is a copy of JDK's stdinAvailable
5012 // from src/windows/hpi/src/sys_api_md.c
5013 
5014 static int stdinAvailable(int fd, long *pbytes) {
5015   HANDLE han;
5016   DWORD numEventsRead = 0;  // Number of events read from buffer
5017   DWORD numEvents = 0;      // Number of events in buffer
5018   DWORD i = 0;              // Loop index
5019   DWORD curLength = 0;      // Position marker
5020   DWORD actualLength = 0;   // Number of bytes readable
5021   BOOL error = FALSE;       // Error holder
5022   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
5023 
5024   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
5025     return FALSE;
5026   }
5027 
5028   // Construct an array of input records in the console buffer
5029   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
5030   if (error == 0) {
5031     return nonSeekAvailable(fd, pbytes);
5032   }
5033 
5034   // lpBuffer must fit into 64K or else PeekConsoleInput fails
5035   if (numEvents > MAX_INPUT_EVENTS) {
5036     numEvents = MAX_INPUT_EVENTS;
5037   }
5038 
5039   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
5040   if (lpBuffer == NULL) {
5041     return FALSE;
5042   }
5043 
5044   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
5045   if (error == 0) {
5046     os::free(lpBuffer);
5047     return FALSE;
5048   }
5049 
5050   // Examine input records for the number of bytes available
5051   for (i=0; i<numEvents; i++) {
5052     if (lpBuffer[i].EventType == KEY_EVENT) {
5053 
5054       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
5055                                       &(lpBuffer[i].Event);
5056       if (keyRecord->bKeyDown == TRUE) {
5057         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
5058         curLength++;
5059         if (*keyPressed == '\r') {
5060           actualLength = curLength;
5061         }
5062       }
5063     }
5064   }
5065 
5066   if (lpBuffer != NULL) {
5067     os::free(lpBuffer);
5068   }
5069 
5070   *pbytes = (long) actualLength;
5071   return TRUE;
5072 }
5073 
5074 // Map a block of memory.
5075 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5076                         char *addr, size_t bytes, bool read_only,
5077                         bool allow_exec) {
5078   HANDLE hFile;
5079   char* base;
5080 
5081   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
5082                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5083   if (hFile == NULL) {
5084     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
5085     return NULL;
5086   }
5087 
5088   if (allow_exec) {
5089     // CreateFileMapping/MapViewOfFileEx can't map executable memory
5090     // unless it comes from a PE image (which the shared archive is not.)
5091     // Even VirtualProtect refuses to give execute access to mapped memory
5092     // that was not previously executable.
5093     //
5094     // Instead, stick the executable region in anonymous memory.  Yuck.
5095     // Penalty is that ~4 pages will not be shareable - in the future
5096     // we might consider DLLizing the shared archive with a proper PE
5097     // header so that mapping executable + sharing is possible.
5098 
5099     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5100                                 PAGE_READWRITE);
5101     if (base == NULL) {
5102       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
5103       CloseHandle(hFile);
5104       return NULL;
5105     }
5106 
5107     // Record virtual memory allocation
5108     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5109 
5110     DWORD bytes_read;
5111     OVERLAPPED overlapped;
5112     overlapped.Offset = (DWORD)file_offset;
5113     overlapped.OffsetHigh = 0;
5114     overlapped.hEvent = NULL;
5115     // ReadFile guarantees that if the return value is true, the requested
5116     // number of bytes were read before returning.
5117     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5118     if (!res) {
5119       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5120       release_memory(base, bytes);
5121       CloseHandle(hFile);
5122       return NULL;
5123     }
5124   } else {
5125     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5126                                     NULL /* file_name */);
5127     if (hMap == NULL) {
5128       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5129       CloseHandle(hFile);
5130       return NULL;
5131     }
5132 
5133     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5134     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5135                                   (DWORD)bytes, addr);
5136     if (base == NULL) {
5137       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5138       CloseHandle(hMap);
5139       CloseHandle(hFile);
5140       return NULL;
5141     }
5142 
5143     if (CloseHandle(hMap) == 0) {
5144       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5145       CloseHandle(hFile);
5146       return base;
5147     }
5148   }
5149 
5150   if (allow_exec) {
5151     DWORD old_protect;
5152     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5153     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5154 
5155     if (!res) {
5156       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5157       // Don't consider this a hard error, on IA32 even if the
5158       // VirtualProtect fails, we should still be able to execute
5159       CloseHandle(hFile);
5160       return base;
5161     }
5162   }
5163 
5164   if (CloseHandle(hFile) == 0) {
5165     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5166     return base;
5167   }
5168 
5169   return base;
5170 }
5171 
5172 
5173 // Remap a block of memory.
5174 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5175                           char *addr, size_t bytes, bool read_only,
5176                           bool allow_exec) {
5177   // This OS does not allow existing memory maps to be remapped so we
5178   // would have to unmap the memory before we remap it.
5179 
5180   // Because there is a small window between unmapping memory and mapping
5181   // it in again with different protections, CDS archives are mapped RW
5182   // on windows, so this function isn't called.
5183   ShouldNotReachHere();
5184   return NULL;
5185 }
5186 
5187 
5188 // Unmap a block of memory.
5189 // Returns true=success, otherwise false.
5190 
5191 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5192   MEMORY_BASIC_INFORMATION mem_info;
5193   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5194     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5195     return false;
5196   }
5197 
5198   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5199   // Instead, executable region was allocated using VirtualAlloc(). See
5200   // pd_map_memory() above.
5201   //
5202   // The following flags should match the 'exec_access' flages used for
5203   // VirtualProtect() in pd_map_memory().
5204   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5205       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5206     return pd_release_memory(addr, bytes);
5207   }
5208 
5209   BOOL result = UnmapViewOfFile(addr);
5210   if (result == 0) {
5211     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5212     return false;
5213   }
5214   return true;
5215 }
5216 
5217 void os::pause() {
5218   char filename[MAX_PATH];
5219   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5220     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5221   } else {
5222     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5223   }
5224 
5225   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5226   if (fd != -1) {
5227     struct stat buf;
5228     ::close(fd);
5229     while (::stat(filename, &buf) == 0) {
5230       Sleep(100);
5231     }
5232   } else {
5233     jio_fprintf(stderr,
5234                 "Could not open pause file '%s', continuing immediately.\n", filename);
5235   }
5236 }
5237 
5238 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5239 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5240 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5241 
5242 os::ThreadCrashProtection::ThreadCrashProtection() {
5243 }
5244 
5245 // See the caveats for this class in os_windows.hpp
5246 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5247 // into this method and returns false. If no OS EXCEPTION was raised, returns
5248 // true.
5249 // The callback is supposed to provide the method that should be protected.
5250 //
5251 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5252 
5253   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5254 
5255   _protected_thread = Thread::current_or_null();
5256   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5257 
5258   bool success = true;
5259   __try {
5260     _crash_protection = this;
5261     cb.call();
5262   } __except(EXCEPTION_EXECUTE_HANDLER) {
5263     // only for protection, nothing to do
5264     success = false;
5265   }
5266   _crash_protection = NULL;
5267   _protected_thread = NULL;
5268   Thread::muxRelease(&_crash_mux);
5269   return success;
5270 }
5271 
5272 
5273 class HighResolutionInterval : public CHeapObj<mtThread> {
5274   // The default timer resolution seems to be 10 milliseconds.
5275   // (Where is this written down?)
5276   // If someone wants to sleep for only a fraction of the default,
5277   // then we set the timer resolution down to 1 millisecond for
5278   // the duration of their interval.
5279   // We carefully set the resolution back, since otherwise we
5280   // seem to incur an overhead (3%?) that we don't need.
5281   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5282   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5283   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5284   // timeBeginPeriod() if the relative error exceeded some threshold.
5285   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5286   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5287   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5288   // resolution timers running.
5289  private:
5290   jlong resolution;
5291  public:
5292   HighResolutionInterval(jlong ms) {
5293     resolution = ms % 10L;
5294     if (resolution != 0) {
5295       MMRESULT result = timeBeginPeriod(1L);
5296     }
5297   }
5298   ~HighResolutionInterval() {
5299     if (resolution != 0) {
5300       MMRESULT result = timeEndPeriod(1L);
5301     }
5302     resolution = 0L;
5303   }
5304 };
5305 
5306 // An Event wraps a win32 "CreateEvent" kernel handle.
5307 //
5308 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5309 //
5310 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5311 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5312 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5313 //     In addition, an unpark() operation might fetch the handle field, but the
5314 //     event could recycle between the fetch and the SetEvent() operation.
5315 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5316 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5317 //     on an stale but recycled handle would be harmless, but in practice this might
5318 //     confuse other non-Sun code, so it's not a viable approach.
5319 //
5320 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5321 //     with the Event.  The event handle is never closed.  This could be construed
5322 //     as handle leakage, but only up to the maximum # of threads that have been extant
5323 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5324 //     permit a process to have hundreds of thousands of open handles.
5325 //
5326 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5327 //     and release unused handles.
5328 //
5329 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5330 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5331 //
5332 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5333 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5334 //
5335 // We use (2).
5336 //
5337 // TODO-FIXME:
5338 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5339 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5340 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5341 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5342 //     into a single win32 CreateEvent() handle.
5343 //
5344 // Assumption:
5345 //    Only one parker can exist on an event, which is why we allocate
5346 //    them per-thread. Multiple unparkers can coexist.
5347 //
5348 // _Event transitions in park()
5349 //   -1 => -1 : illegal
5350 //    1 =>  0 : pass - return immediately
5351 //    0 => -1 : block; then set _Event to 0 before returning
5352 //
5353 // _Event transitions in unpark()
5354 //    0 => 1 : just return
5355 //    1 => 1 : just return
5356 //   -1 => either 0 or 1; must signal target thread
5357 //         That is, we can safely transition _Event from -1 to either
5358 //         0 or 1.
5359 //
5360 // _Event serves as a restricted-range semaphore.
5361 //   -1 : thread is blocked, i.e. there is a waiter
5362 //    0 : neutral: thread is running or ready,
5363 //        could have been signaled after a wait started
5364 //    1 : signaled - thread is running or ready
5365 //
5366 // Another possible encoding of _Event would be with
5367 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5368 //
5369 
5370 int os::PlatformEvent::park(jlong Millis) {
5371   // Transitions for _Event:
5372   //   -1 => -1 : illegal
5373   //    1 =>  0 : pass - return immediately
5374   //    0 => -1 : block; then set _Event to 0 before returning
5375 
5376   guarantee(_ParkHandle != NULL , "Invariant");
5377   guarantee(Millis > 0          , "Invariant");
5378 
5379   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5380   // the initial park() operation.
5381   // Consider: use atomic decrement instead of CAS-loop
5382 
5383   int v;
5384   for (;;) {
5385     v = _Event;
5386     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5387   }
5388   guarantee((v == 0) || (v == 1), "invariant");
5389   if (v != 0) return OS_OK;
5390 
5391   // Do this the hard way by blocking ...
5392   // TODO: consider a brief spin here, gated on the success of recent
5393   // spin attempts by this thread.
5394   //
5395   // We decompose long timeouts into series of shorter timed waits.
5396   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5397   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5398   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5399   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5400   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5401   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5402   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5403   // for the already waited time.  This policy does not admit any new outcomes.
5404   // In the future, however, we might want to track the accumulated wait time and
5405   // adjust Millis accordingly if we encounter a spurious wakeup.
5406 
5407   const int MAXTIMEOUT = 0x10000000;
5408   DWORD rv = WAIT_TIMEOUT;
5409   while (_Event < 0 && Millis > 0) {
5410     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5411     if (Millis > MAXTIMEOUT) {
5412       prd = MAXTIMEOUT;
5413     }
5414     HighResolutionInterval *phri = NULL;
5415     if (!ForceTimeHighResolution) {
5416       phri = new HighResolutionInterval(prd);
5417     }
5418     rv = ::WaitForSingleObject(_ParkHandle, prd);
5419     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5420     if (rv == WAIT_TIMEOUT) {
5421       Millis -= prd;
5422     }
5423     delete phri; // if it is NULL, harmless
5424   }
5425   v = _Event;
5426   _Event = 0;
5427   // see comment at end of os::PlatformEvent::park() below:
5428   OrderAccess::fence();
5429   // If we encounter a nearly simultanous timeout expiry and unpark()
5430   // we return OS_OK indicating we awoke via unpark().
5431   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5432   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5433 }
5434 
5435 void os::PlatformEvent::park() {
5436   // Transitions for _Event:
5437   //   -1 => -1 : illegal
5438   //    1 =>  0 : pass - return immediately
5439   //    0 => -1 : block; then set _Event to 0 before returning
5440 
5441   guarantee(_ParkHandle != NULL, "Invariant");
5442   // Invariant: Only the thread associated with the Event/PlatformEvent
5443   // may call park().
5444   // Consider: use atomic decrement instead of CAS-loop
5445   int v;
5446   for (;;) {
5447     v = _Event;
5448     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5449   }
5450   guarantee((v == 0) || (v == 1), "invariant");
5451   if (v != 0) return;
5452 
5453   // Do this the hard way by blocking ...
5454   // TODO: consider a brief spin here, gated on the success of recent
5455   // spin attempts by this thread.
5456   while (_Event < 0) {
5457     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5458     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5459   }
5460 
5461   // Usually we'll find _Event == 0 at this point, but as
5462   // an optional optimization we clear it, just in case can
5463   // multiple unpark() operations drove _Event up to 1.
5464   _Event = 0;
5465   OrderAccess::fence();
5466   guarantee(_Event >= 0, "invariant");
5467 }
5468 
5469 void os::PlatformEvent::unpark() {
5470   guarantee(_ParkHandle != NULL, "Invariant");
5471 
5472   // Transitions for _Event:
5473   //    0 => 1 : just return
5474   //    1 => 1 : just return
5475   //   -1 => either 0 or 1; must signal target thread
5476   //         That is, we can safely transition _Event from -1 to either
5477   //         0 or 1.
5478   // See also: "Semaphores in Plan 9" by Mullender & Cox
5479   //
5480   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5481   // that it will take two back-to-back park() calls for the owning
5482   // thread to block. This has the benefit of forcing a spurious return
5483   // from the first park() call after an unpark() call which will help
5484   // shake out uses of park() and unpark() without condition variables.
5485 
5486   if (Atomic::xchg(&_Event, 1) >= 0) return;
5487 
5488   ::SetEvent(_ParkHandle);
5489 }
5490 
5491 
5492 // JSR166
5493 // -------------------------------------------------------
5494 
5495 // The Windows implementation of Park is very straightforward: Basic
5496 // operations on Win32 Events turn out to have the right semantics to
5497 // use them directly. We opportunistically resuse the event inherited
5498 // from Monitor.
5499 
5500 void Parker::park(bool isAbsolute, jlong time) {
5501   guarantee(_ParkEvent != NULL, "invariant");
5502   // First, demultiplex/decode time arguments
5503   if (time < 0) { // don't wait
5504     return;
5505   } else if (time == 0 && !isAbsolute) {
5506     time = INFINITE;
5507   } else if (isAbsolute) {
5508     time -= os::javaTimeMillis(); // convert to relative time
5509     if (time <= 0) {  // already elapsed
5510       return;
5511     }
5512   } else { // relative
5513     time /= 1000000;  // Must coarsen from nanos to millis
5514     if (time == 0) {  // Wait for the minimal time unit if zero
5515       time = 1;
5516     }
5517   }
5518 
5519   JavaThread* thread = JavaThread::current();
5520 
5521   // Don't wait if interrupted or already triggered
5522   if (thread->is_interrupted(false) ||
5523       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5524     ResetEvent(_ParkEvent);
5525     return;
5526   } else {
5527     ThreadBlockInVM tbivm(thread);
5528     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5529     thread->set_suspend_equivalent();
5530 
5531     WaitForSingleObject(_ParkEvent, time);
5532     ResetEvent(_ParkEvent);
5533 
5534     // If externally suspended while waiting, re-suspend
5535     if (thread->handle_special_suspend_equivalent_condition()) {
5536       thread->java_suspend_self();
5537     }
5538   }
5539 }
5540 
5541 void Parker::unpark() {
5542   guarantee(_ParkEvent != NULL, "invariant");
5543   SetEvent(_ParkEvent);
5544 }
5545 
5546 // Platform Monitor implementation
5547 
5548 // Must already be locked
5549 int os::PlatformMonitor::wait(jlong millis) {
5550   assert(millis >= 0, "negative timeout");
5551   int ret = OS_TIMEOUT;
5552   int status = SleepConditionVariableCS(&_cond, &_mutex,
5553                                         millis == 0 ? INFINITE : millis);
5554   if (status != 0) {
5555     ret = OS_OK;
5556   }
5557   #ifndef PRODUCT
5558   else {
5559     DWORD err = GetLastError();
5560     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5561   }
5562   #endif
5563   return ret;
5564 }
5565 
5566 // Run the specified command in a separate process. Return its exit value,
5567 // or -1 on failure (e.g. can't create a new process).
5568 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5569   STARTUPINFO si;
5570   PROCESS_INFORMATION pi;
5571   DWORD exit_code;
5572 
5573   char * cmd_string;
5574   const char * cmd_prefix = "cmd /C ";
5575   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5576   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5577   if (cmd_string == NULL) {
5578     return -1;
5579   }
5580   cmd_string[0] = '\0';
5581   strcat(cmd_string, cmd_prefix);
5582   strcat(cmd_string, cmd);
5583 
5584   // now replace all '\n' with '&'
5585   char * substring = cmd_string;
5586   while ((substring = strchr(substring, '\n')) != NULL) {
5587     substring[0] = '&';
5588     substring++;
5589   }
5590   memset(&si, 0, sizeof(si));
5591   si.cb = sizeof(si);
5592   memset(&pi, 0, sizeof(pi));
5593   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5594                             cmd_string,    // command line
5595                             NULL,   // process security attribute
5596                             NULL,   // thread security attribute
5597                             TRUE,   // inherits system handles
5598                             0,      // no creation flags
5599                             NULL,   // use parent's environment block
5600                             NULL,   // use parent's starting directory
5601                             &si,    // (in) startup information
5602                             &pi);   // (out) process information
5603 
5604   if (rslt) {
5605     // Wait until child process exits.
5606     WaitForSingleObject(pi.hProcess, INFINITE);
5607 
5608     GetExitCodeProcess(pi.hProcess, &exit_code);
5609 
5610     // Close process and thread handles.
5611     CloseHandle(pi.hProcess);
5612     CloseHandle(pi.hThread);
5613   } else {
5614     exit_code = -1;
5615   }
5616 
5617   FREE_C_HEAP_ARRAY(char, cmd_string);
5618   return (int)exit_code;
5619 }
5620 
5621 bool os::find(address addr, outputStream* st) {
5622   int offset = -1;
5623   bool result = false;
5624   char buf[256];
5625   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5626     st->print(PTR_FORMAT " ", addr);
5627     if (strlen(buf) < sizeof(buf) - 1) {
5628       char* p = strrchr(buf, '\\');
5629       if (p) {
5630         st->print("%s", p + 1);
5631       } else {
5632         st->print("%s", buf);
5633       }
5634     } else {
5635         // The library name is probably truncated. Let's omit the library name.
5636         // See also JDK-8147512.
5637     }
5638     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5639       st->print("::%s + 0x%x", buf, offset);
5640     }
5641     st->cr();
5642     result = true;
5643   }
5644   return result;
5645 }
5646 
5647 static jint initSock() {
5648   WSADATA wsadata;
5649 
5650   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5651     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5652                 ::GetLastError());
5653     return JNI_ERR;
5654   }
5655   return JNI_OK;
5656 }
5657 
5658 struct hostent* os::get_host_by_name(char* name) {
5659   return (struct hostent*)gethostbyname(name);
5660 }
5661 
5662 int os::socket_close(int fd) {
5663   return ::closesocket(fd);
5664 }
5665 
5666 int os::socket(int domain, int type, int protocol) {
5667   return ::socket(domain, type, protocol);
5668 }
5669 
5670 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5671   return ::connect(fd, him, len);
5672 }
5673 
5674 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5675   return ::recv(fd, buf, (int)nBytes, flags);
5676 }
5677 
5678 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5679   return ::send(fd, buf, (int)nBytes, flags);
5680 }
5681 
5682 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5683   return ::send(fd, buf, (int)nBytes, flags);
5684 }
5685 
5686 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5687 #if defined(IA32)
5688   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5689 #elif defined(AMD64) || defined(_M_ARM64)
5690   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5691 #endif
5692 
5693 // returns true if thread could be suspended,
5694 // false otherwise
5695 static bool do_suspend(HANDLE* h) {
5696   if (h != NULL) {
5697     if (SuspendThread(*h) != ~0) {
5698       return true;
5699     }
5700   }
5701   return false;
5702 }
5703 
5704 // resume the thread
5705 // calling resume on an active thread is a no-op
5706 static void do_resume(HANDLE* h) {
5707   if (h != NULL) {
5708     ResumeThread(*h);
5709   }
5710 }
5711 
5712 // retrieve a suspend/resume context capable handle
5713 // from the tid. Caller validates handle return value.
5714 void get_thread_handle_for_extended_context(HANDLE* h,
5715                                             OSThread::thread_id_t tid) {
5716   if (h != NULL) {
5717     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5718   }
5719 }
5720 
5721 // Thread sampling implementation
5722 //
5723 void os::SuspendedThreadTask::internal_do_task() {
5724   CONTEXT    ctxt;
5725   HANDLE     h = NULL;
5726 
5727   // get context capable handle for thread
5728   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5729 
5730   // sanity
5731   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5732     return;
5733   }
5734 
5735   // suspend the thread
5736   if (do_suspend(&h)) {
5737     ctxt.ContextFlags = sampling_context_flags;
5738     // get thread context
5739     GetThreadContext(h, &ctxt);
5740     SuspendedThreadTaskContext context(_thread, &ctxt);
5741     // pass context to Thread Sampling impl
5742     do_task(context);
5743     // resume thread
5744     do_resume(&h);
5745   }
5746 
5747   // close handle
5748   CloseHandle(h);
5749 }
5750 
5751 bool os::start_debugging(char *buf, int buflen) {
5752   int len = (int)strlen(buf);
5753   char *p = &buf[len];
5754 
5755   jio_snprintf(p, buflen-len,
5756              "\n\n"
5757              "Do you want to debug the problem?\n\n"
5758              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5759              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5760              "Otherwise, select 'No' to abort...",
5761              os::current_process_id(), os::current_thread_id());
5762 
5763   bool yes = os::message_box("Unexpected Error", buf);
5764 
5765   if (yes) {
5766     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5767     // exception. If VM is running inside a debugger, the debugger will
5768     // catch the exception. Otherwise, the breakpoint exception will reach
5769     // the default windows exception handler, which can spawn a debugger and
5770     // automatically attach to the dying VM.
5771     os::breakpoint();
5772     yes = false;
5773   }
5774   return yes;
5775 }
5776 
5777 void* os::get_default_process_handle() {
5778   return (void*)GetModuleHandle(NULL);
5779 }
5780 
5781 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5782 // which is used to find statically linked in agents.
5783 // Additionally for windows, takes into account __stdcall names.
5784 // Parameters:
5785 //            sym_name: Symbol in library we are looking for
5786 //            lib_name: Name of library to look in, NULL for shared libs.
5787 //            is_absolute_path == true if lib_name is absolute path to agent
5788 //                                     such as "C:/a/b/L.dll"
5789 //            == false if only the base name of the library is passed in
5790 //               such as "L"
5791 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5792                                     bool is_absolute_path) {
5793   char *agent_entry_name;
5794   size_t len;
5795   size_t name_len;
5796   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5797   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5798   const char *start;
5799 
5800   if (lib_name != NULL) {
5801     len = name_len = strlen(lib_name);
5802     if (is_absolute_path) {
5803       // Need to strip path, prefix and suffix
5804       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5805         lib_name = ++start;
5806       } else {
5807         // Need to check for drive prefix
5808         if ((start = strchr(lib_name, ':')) != NULL) {
5809           lib_name = ++start;
5810         }
5811       }
5812       if (len <= (prefix_len + suffix_len)) {
5813         return NULL;
5814       }
5815       lib_name += prefix_len;
5816       name_len = strlen(lib_name) - suffix_len;
5817     }
5818   }
5819   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5820   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5821   if (agent_entry_name == NULL) {
5822     return NULL;
5823   }
5824   if (lib_name != NULL) {
5825     const char *p = strrchr(sym_name, '@');
5826     if (p != NULL && p != sym_name) {
5827       // sym_name == _Agent_OnLoad@XX
5828       strncpy(agent_entry_name, sym_name, (p - sym_name));
5829       agent_entry_name[(p-sym_name)] = '\0';
5830       // agent_entry_name == _Agent_OnLoad
5831       strcat(agent_entry_name, "_");
5832       strncat(agent_entry_name, lib_name, name_len);
5833       strcat(agent_entry_name, p);
5834       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5835     } else {
5836       strcpy(agent_entry_name, sym_name);
5837       strcat(agent_entry_name, "_");
5838       strncat(agent_entry_name, lib_name, name_len);
5839     }
5840   } else {
5841     strcpy(agent_entry_name, sym_name);
5842   }
5843   return agent_entry_name;
5844 }
5845 
5846 #ifndef PRODUCT
5847 
5848 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5849 // contiguous memory block at a particular address.
5850 // The test first tries to find a good approximate address to allocate at by using the same
5851 // method to allocate some memory at any address. The test then tries to allocate memory in
5852 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5853 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5854 // the previously allocated memory is available for allocation. The only actual failure
5855 // that is reported is when the test tries to allocate at a particular location but gets a
5856 // different valid one. A NULL return value at this point is not considered an error but may
5857 // be legitimate.
5858 void TestReserveMemorySpecial_test() {
5859   if (!UseLargePages) {
5860     return;
5861   }
5862   // save current value of globals
5863   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5864   bool old_use_numa_interleaving = UseNUMAInterleaving;
5865 
5866   // set globals to make sure we hit the correct code path
5867   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5868 
5869   // do an allocation at an address selected by the OS to get a good one.
5870   const size_t large_allocation_size = os::large_page_size() * 4;
5871   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5872   if (result == NULL) {
5873   } else {
5874     os::release_memory_special(result, large_allocation_size);
5875 
5876     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5877     // we managed to get it once.
5878     const size_t expected_allocation_size = os::large_page_size();
5879     char* expected_location = result + os::large_page_size();
5880     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5881     if (actual_location == NULL) {
5882     } else {
5883       // release memory
5884       os::release_memory_special(actual_location, expected_allocation_size);
5885       // only now check, after releasing any memory to avoid any leaks.
5886       assert(actual_location == expected_location,
5887              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5888              expected_location, expected_allocation_size, actual_location);
5889     }
5890   }
5891 
5892   // restore globals
5893   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5894   UseNUMAInterleaving = old_use_numa_interleaving;
5895 }
5896 #endif // PRODUCT
5897 
5898 /*
5899   All the defined signal names for Windows.
5900 
5901   NOTE that not all of these names are accepted by FindSignal!
5902 
5903   For various reasons some of these may be rejected at runtime.
5904 
5905   Here are the names currently accepted by a user of sun.misc.Signal with
5906   1.4.1 (ignoring potential interaction with use of chaining, etc):
5907 
5908      (LIST TBD)
5909 
5910 */
5911 int os::get_signal_number(const char* name) {
5912   static const struct {
5913     const char* name;
5914     int         number;
5915   } siglabels [] =
5916     // derived from version 6.0 VC98/include/signal.h
5917   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5918   "FPE",        SIGFPE,         // floating point exception
5919   "SEGV",       SIGSEGV,        // segment violation
5920   "INT",        SIGINT,         // interrupt
5921   "TERM",       SIGTERM,        // software term signal from kill
5922   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5923   "ILL",        SIGILL};        // illegal instruction
5924   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5925     if (strcmp(name, siglabels[i].name) == 0) {
5926       return siglabels[i].number;
5927     }
5928   }
5929   return -1;
5930 }
5931 
5932 // Fast current thread access
5933 
5934 int os::win32::_thread_ptr_offset = 0;
5935 
5936 static void call_wrapper_dummy() {}
5937 
5938 // We need to call the os_exception_wrapper once so that it sets
5939 // up the offset from FS of the thread pointer.
5940 void os::win32::initialize_thread_ptr_offset() {
5941   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5942                            NULL, methodHandle(), NULL, NULL);
5943 }
5944 
5945 bool os::supports_map_sync() {
5946   return false;
5947 }