1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safepointMechanism.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/statSampler.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadCritical.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_version.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/decoder.hpp"
  72 #include "utilities/defaultStream.hpp"
  73 #include "utilities/events.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 #ifdef _DEBUG
  80 #include <crtdbg.h>
  81 #endif
  82 
  83 #include <windows.h>
  84 #include <sys/types.h>
  85 #include <sys/stat.h>
  86 #include <sys/timeb.h>
  87 #include <objidl.h>
  88 #include <shlobj.h>
  89 
  90 #include <malloc.h>
  91 #include <signal.h>
  92 #include <direct.h>
  93 #include <errno.h>
  94 #include <fcntl.h>
  95 #include <io.h>
  96 #include <process.h>              // For _beginthreadex(), _endthreadex()
  97 #include <imagehlp.h>             // For os::dll_address_to_function_name
  98 // for enumerating dll libraries
  99 #include <vdmdbg.h>
 100 #include <psapi.h>
 101 #include <mmsystem.h>
 102 #include <winsock2.h>
 103 
 104 // for timer info max values which include all bits
 105 #define ALL_64_BITS CONST64(-1)
 106 
 107 // For DLL loading/load error detection
 108 // Values of PE COFF
 109 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 110 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 111 
 112 static HANDLE main_process;
 113 static HANDLE main_thread;
 114 static int    main_thread_id;
 115 
 116 static FILETIME process_creation_time;
 117 static FILETIME process_exit_time;
 118 static FILETIME process_user_time;
 119 static FILETIME process_kernel_time;
 120 
 121 #ifdef _M_AMD64
 122   #define __CPU__ amd64
 123 #else
 124   #define __CPU__ i486
 125 #endif
 126 
 127 #if INCLUDE_AOT
 128 PVOID  topLevelVectoredExceptionHandler = NULL;
 129 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 130 #endif
 131 
 132 // save DLL module handle, used by GetModuleFileName
 133 
 134 HINSTANCE vm_lib_handle;
 135 
 136 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 137   switch (reason) {
 138   case DLL_PROCESS_ATTACH:
 139     vm_lib_handle = hinst;
 140     if (ForceTimeHighResolution) {
 141       timeBeginPeriod(1L);
 142     }
 143     WindowsDbgHelp::pre_initialize();
 144     SymbolEngine::pre_initialize();
 145     break;
 146   case DLL_PROCESS_DETACH:
 147     if (ForceTimeHighResolution) {
 148       timeEndPeriod(1L);
 149     }
 150 #if INCLUDE_AOT
 151     if (topLevelVectoredExceptionHandler != NULL) {
 152       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 153       topLevelVectoredExceptionHandler = NULL;
 154     }
 155 #endif
 156     break;
 157   default:
 158     break;
 159   }
 160   return true;
 161 }
 162 
 163 static inline double fileTimeAsDouble(FILETIME* time) {
 164   const double high  = (double) ((unsigned int) ~0);
 165   const double split = 10000000.0;
 166   double result = (time->dwLowDateTime / split) +
 167                    time->dwHighDateTime * (high/split);
 168   return result;
 169 }
 170 
 171 // Implementation of os
 172 
 173 bool os::unsetenv(const char* name) {
 174   assert(name != NULL, "Null pointer");
 175   return (SetEnvironmentVariable(name, NULL) == TRUE);
 176 }
 177 
 178 // No setuid programs under Windows.
 179 bool os::have_special_privileges() {
 180   return false;
 181 }
 182 
 183 
 184 // This method is  a periodic task to check for misbehaving JNI applications
 185 // under CheckJNI, we can add any periodic checks here.
 186 // For Windows at the moment does nothing
 187 void os::run_periodic_checks() {
 188   return;
 189 }
 190 
 191 // previous UnhandledExceptionFilter, if there is one
 192 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 193 
 194 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 195 
 196 void os::init_system_properties_values() {
 197   // sysclasspath, java_home, dll_dir
 198   {
 199     char *home_path;
 200     char *dll_path;
 201     char *pslash;
 202     const char *bin = "\\bin";
 203     char home_dir[MAX_PATH + 1];
 204     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 205 
 206     if (alt_home_dir != NULL)  {
 207       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 208       home_dir[MAX_PATH] = '\0';
 209     } else {
 210       os::jvm_path(home_dir, sizeof(home_dir));
 211       // Found the full path to jvm.dll.
 212       // Now cut the path to <java_home>/jre if we can.
 213       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 214       pslash = strrchr(home_dir, '\\');
 215       if (pslash != NULL) {
 216         *pslash = '\0';                   // get rid of \{client|server}
 217         pslash = strrchr(home_dir, '\\');
 218         if (pslash != NULL) {
 219           *pslash = '\0';                 // get rid of \bin
 220         }
 221       }
 222     }
 223 
 224     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 225     strcpy(home_path, home_dir);
 226     Arguments::set_java_home(home_path);
 227     FREE_C_HEAP_ARRAY(char, home_path);
 228 
 229     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 230                                 mtInternal);
 231     strcpy(dll_path, home_dir);
 232     strcat(dll_path, bin);
 233     Arguments::set_dll_dir(dll_path);
 234     FREE_C_HEAP_ARRAY(char, dll_path);
 235 
 236     if (!set_boot_path('\\', ';')) {
 237       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 238     }
 239   }
 240 
 241 // library_path
 242 #define EXT_DIR "\\lib\\ext"
 243 #define BIN_DIR "\\bin"
 244 #define PACKAGE_DIR "\\Sun\\Java"
 245   {
 246     // Win32 library search order (See the documentation for LoadLibrary):
 247     //
 248     // 1. The directory from which application is loaded.
 249     // 2. The system wide Java Extensions directory (Java only)
 250     // 3. System directory (GetSystemDirectory)
 251     // 4. Windows directory (GetWindowsDirectory)
 252     // 5. The PATH environment variable
 253     // 6. The current directory
 254 
 255     char *library_path;
 256     char tmp[MAX_PATH];
 257     char *path_str = ::getenv("PATH");
 258 
 259     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 260                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 261 
 262     library_path[0] = '\0';
 263 
 264     GetModuleFileName(NULL, tmp, sizeof(tmp));
 265     *(strrchr(tmp, '\\')) = '\0';
 266     strcat(library_path, tmp);
 267 
 268     GetWindowsDirectory(tmp, sizeof(tmp));
 269     strcat(library_path, ";");
 270     strcat(library_path, tmp);
 271     strcat(library_path, PACKAGE_DIR BIN_DIR);
 272 
 273     GetSystemDirectory(tmp, sizeof(tmp));
 274     strcat(library_path, ";");
 275     strcat(library_path, tmp);
 276 
 277     GetWindowsDirectory(tmp, sizeof(tmp));
 278     strcat(library_path, ";");
 279     strcat(library_path, tmp);
 280 
 281     if (path_str) {
 282       strcat(library_path, ";");
 283       strcat(library_path, path_str);
 284     }
 285 
 286     strcat(library_path, ";.");
 287 
 288     Arguments::set_library_path(library_path);
 289     FREE_C_HEAP_ARRAY(char, library_path);
 290   }
 291 
 292   // Default extensions directory
 293   {
 294     char path[MAX_PATH];
 295     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 296     GetWindowsDirectory(path, MAX_PATH);
 297     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 298             path, PACKAGE_DIR, EXT_DIR);
 299     Arguments::set_ext_dirs(buf);
 300   }
 301   #undef EXT_DIR
 302   #undef BIN_DIR
 303   #undef PACKAGE_DIR
 304 
 305 #ifndef _WIN64
 306   // set our UnhandledExceptionFilter and save any previous one
 307   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 308 #endif
 309 
 310   // Done
 311   return;
 312 }
 313 
 314 void os::breakpoint() {
 315   DebugBreak();
 316 }
 317 
 318 // Invoked from the BREAKPOINT Macro
 319 extern "C" void breakpoint() {
 320   os::breakpoint();
 321 }
 322 
 323 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 324 // So far, this method is only used by Native Memory Tracking, which is
 325 // only supported on Windows XP or later.
 326 //
 327 int os::get_native_stack(address* stack, int frames, int toSkip) {
 328   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 329   for (int index = captured; index < frames; index ++) {
 330     stack[index] = NULL;
 331   }
 332   return captured;
 333 }
 334 
 335 
 336 // os::current_stack_base()
 337 //
 338 //   Returns the base of the stack, which is the stack's
 339 //   starting address.  This function must be called
 340 //   while running on the stack of the thread being queried.
 341 
 342 address os::current_stack_base() {
 343   MEMORY_BASIC_INFORMATION minfo;
 344   address stack_bottom;
 345   size_t stack_size;
 346 
 347   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 348   stack_bottom =  (address)minfo.AllocationBase;
 349   stack_size = minfo.RegionSize;
 350 
 351   // Add up the sizes of all the regions with the same
 352   // AllocationBase.
 353   while (1) {
 354     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 355     if (stack_bottom == (address)minfo.AllocationBase) {
 356       stack_size += minfo.RegionSize;
 357     } else {
 358       break;
 359     }
 360   }
 361   return stack_bottom + stack_size;
 362 }
 363 
 364 size_t os::current_stack_size() {
 365   size_t sz;
 366   MEMORY_BASIC_INFORMATION minfo;
 367   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 368   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 369   return sz;
 370 }
 371 
 372 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 373   MEMORY_BASIC_INFORMATION minfo;
 374   committed_start = NULL;
 375   committed_size = 0;
 376   address top = start + size;
 377   const address start_addr = start;
 378   while (start < top) {
 379     VirtualQuery(start, &minfo, sizeof(minfo));
 380     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 381       if (committed_start != NULL) {
 382         break;
 383       }
 384     } else {  // committed
 385       if (committed_start == NULL) {
 386         committed_start = start;
 387       }
 388       size_t offset = start - (address)minfo.BaseAddress;
 389       committed_size += minfo.RegionSize - offset;
 390     }
 391     start = (address)minfo.BaseAddress + minfo.RegionSize;
 392   }
 393 
 394   if (committed_start == NULL) {
 395     assert(committed_size == 0, "Sanity");
 396     return false;
 397   } else {
 398     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 399     // current region may go beyond the limit, trim to the limit
 400     committed_size = MIN2(committed_size, size_t(top - committed_start));
 401     return true;
 402   }
 403 }
 404 
 405 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 406   const struct tm* time_struct_ptr = localtime(clock);
 407   if (time_struct_ptr != NULL) {
 408     *res = *time_struct_ptr;
 409     return res;
 410   }
 411   return NULL;
 412 }
 413 
 414 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 415   const struct tm* time_struct_ptr = gmtime(clock);
 416   if (time_struct_ptr != NULL) {
 417     *res = *time_struct_ptr;
 418     return res;
 419   }
 420   return NULL;
 421 }
 422 
 423 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 424 
 425 // Thread start routine for all newly created threads
 426 static unsigned __stdcall thread_native_entry(Thread* thread) {
 427 
 428   thread->record_stack_base_and_size();
 429 
 430   // Try to randomize the cache line index of hot stack frames.
 431   // This helps when threads of the same stack traces evict each other's
 432   // cache lines. The threads can be either from the same JVM instance, or
 433   // from different JVM instances. The benefit is especially true for
 434   // processors with hyperthreading technology.
 435   static int counter = 0;
 436   int pid = os::current_process_id();
 437   _alloca(((pid ^ counter++) & 7) * 128);
 438 
 439   thread->initialize_thread_current();
 440 
 441   OSThread* osthr = thread->osthread();
 442   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 443 
 444   if (UseNUMA) {
 445     int lgrp_id = os::numa_get_group_id();
 446     if (lgrp_id != -1) {
 447       thread->set_lgrp_id(lgrp_id);
 448     }
 449   }
 450 
 451   // Diagnostic code to investigate JDK-6573254
 452   int res = 30115;  // non-java thread
 453   if (thread->is_Java_thread()) {
 454     res = 20115;    // java thread
 455   }
 456 
 457   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 458 
 459   // Install a win32 structured exception handler around every thread created
 460   // by VM, so VM can generate error dump when an exception occurred in non-
 461   // Java thread (e.g. VM thread).
 462   __try {
 463     thread->call_run();
 464   } __except(topLevelExceptionFilter(
 465                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 466     // Nothing to do.
 467   }
 468 
 469   // Note: at this point the thread object may already have deleted itself.
 470   // Do not dereference it from here on out.
 471 
 472   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 473 
 474   // One less thread is executing
 475   // When the VMThread gets here, the main thread may have already exited
 476   // which frees the CodeHeap containing the Atomic::add code
 477   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 478     Atomic::dec(&os::win32::_os_thread_count);
 479   }
 480 
 481   // Thread must not return from exit_process_or_thread(), but if it does,
 482   // let it proceed to exit normally
 483   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 484 }
 485 
 486 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 487                                   int thread_id) {
 488   // Allocate the OSThread object
 489   OSThread* osthread = new OSThread(NULL, NULL);
 490   if (osthread == NULL) return NULL;
 491 
 492   // Initialize the JDK library's interrupt event.
 493   // This should really be done when OSThread is constructed,
 494   // but there is no way for a constructor to report failure to
 495   // allocate the event.
 496   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 497   if (interrupt_event == NULL) {
 498     delete osthread;
 499     return NULL;
 500   }
 501   osthread->set_interrupt_event(interrupt_event);
 502 
 503   // Store info on the Win32 thread into the OSThread
 504   osthread->set_thread_handle(thread_handle);
 505   osthread->set_thread_id(thread_id);
 506 
 507   if (UseNUMA) {
 508     int lgrp_id = os::numa_get_group_id();
 509     if (lgrp_id != -1) {
 510       thread->set_lgrp_id(lgrp_id);
 511     }
 512   }
 513 
 514   // Initial thread state is INITIALIZED, not SUSPENDED
 515   osthread->set_state(INITIALIZED);
 516 
 517   return osthread;
 518 }
 519 
 520 
 521 bool os::create_attached_thread(JavaThread* thread) {
 522 #ifdef ASSERT
 523   thread->verify_not_published();
 524 #endif
 525   HANDLE thread_h;
 526   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 527                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 528     fatal("DuplicateHandle failed\n");
 529   }
 530   OSThread* osthread = create_os_thread(thread, thread_h,
 531                                         (int)current_thread_id());
 532   if (osthread == NULL) {
 533     return false;
 534   }
 535 
 536   // Initial thread state is RUNNABLE
 537   osthread->set_state(RUNNABLE);
 538 
 539   thread->set_osthread(osthread);
 540 
 541   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 542     os::current_thread_id());
 543 
 544   return true;
 545 }
 546 
 547 bool os::create_main_thread(JavaThread* thread) {
 548 #ifdef ASSERT
 549   thread->verify_not_published();
 550 #endif
 551   if (_starting_thread == NULL) {
 552     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 553     if (_starting_thread == NULL) {
 554       return false;
 555     }
 556   }
 557 
 558   // The primordial thread is runnable from the start)
 559   _starting_thread->set_state(RUNNABLE);
 560 
 561   thread->set_osthread(_starting_thread);
 562   return true;
 563 }
 564 
 565 // Helper function to trace _beginthreadex attributes,
 566 //  similar to os::Posix::describe_pthread_attr()
 567 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 568                                                size_t stacksize, unsigned initflag) {
 569   stringStream ss(buf, buflen);
 570   if (stacksize == 0) {
 571     ss.print("stacksize: default, ");
 572   } else {
 573     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 574   }
 575   ss.print("flags: ");
 576   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 577   #define ALL(X) \
 578     X(CREATE_SUSPENDED) \
 579     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 580   ALL(PRINT_FLAG)
 581   #undef ALL
 582   #undef PRINT_FLAG
 583   return buf;
 584 }
 585 
 586 // Allocate and initialize a new OSThread
 587 bool os::create_thread(Thread* thread, ThreadType thr_type,
 588                        size_t stack_size) {
 589   unsigned thread_id;
 590 
 591   // Allocate the OSThread object
 592   OSThread* osthread = new OSThread(NULL, NULL);
 593   if (osthread == NULL) {
 594     return false;
 595   }
 596 
 597   // Initialize the JDK library's interrupt event.
 598   // This should really be done when OSThread is constructed,
 599   // but there is no way for a constructor to report failure to
 600   // allocate the event.
 601   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 602   if (interrupt_event == NULL) {
 603     delete osthread;
 604     return false;
 605   }
 606   osthread->set_interrupt_event(interrupt_event);
 607   // We don't call set_interrupted(false) as it will trip the assert in there
 608   // as we are not operating on the current thread. We don't need to call it
 609   // because the initial state is already correct.
 610 
 611   thread->set_osthread(osthread);
 612 
 613   if (stack_size == 0) {
 614     switch (thr_type) {
 615     case os::java_thread:
 616       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 617       if (JavaThread::stack_size_at_create() > 0) {
 618         stack_size = JavaThread::stack_size_at_create();
 619       }
 620       break;
 621     case os::compiler_thread:
 622       if (CompilerThreadStackSize > 0) {
 623         stack_size = (size_t)(CompilerThreadStackSize * K);
 624         break;
 625       } // else fall through:
 626         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 627     case os::vm_thread:
 628     case os::pgc_thread:
 629     case os::cgc_thread:
 630     case os::watcher_thread:
 631       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 632       break;
 633     }
 634   }
 635 
 636   // Create the Win32 thread
 637   //
 638   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 639   // does not specify stack size. Instead, it specifies the size of
 640   // initially committed space. The stack size is determined by
 641   // PE header in the executable. If the committed "stack_size" is larger
 642   // than default value in the PE header, the stack is rounded up to the
 643   // nearest multiple of 1MB. For example if the launcher has default
 644   // stack size of 320k, specifying any size less than 320k does not
 645   // affect the actual stack size at all, it only affects the initial
 646   // commitment. On the other hand, specifying 'stack_size' larger than
 647   // default value may cause significant increase in memory usage, because
 648   // not only the stack space will be rounded up to MB, but also the
 649   // entire space is committed upfront.
 650   //
 651   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 652   // for CreateThread() that can treat 'stack_size' as stack size. However we
 653   // are not supposed to call CreateThread() directly according to MSDN
 654   // document because JVM uses C runtime library. The good news is that the
 655   // flag appears to work with _beginthredex() as well.
 656 
 657   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 658   HANDLE thread_handle =
 659     (HANDLE)_beginthreadex(NULL,
 660                            (unsigned)stack_size,
 661                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 662                            thread,
 663                            initflag,
 664                            &thread_id);
 665 
 666   char buf[64];
 667   if (thread_handle != NULL) {
 668     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 669       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 670   } else {
 671     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 672       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 673     // Log some OS information which might explain why creating the thread failed.
 674     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 675     LogStream st(Log(os, thread)::info());
 676     os::print_memory_info(&st);
 677   }
 678 
 679   if (thread_handle == NULL) {
 680     // Need to clean up stuff we've allocated so far
 681     thread->set_osthread(NULL);
 682     delete osthread;
 683     return false;
 684   }
 685 
 686   Atomic::inc(&os::win32::_os_thread_count);
 687 
 688   // Store info on the Win32 thread into the OSThread
 689   osthread->set_thread_handle(thread_handle);
 690   osthread->set_thread_id(thread_id);
 691 
 692   // Initial thread state is INITIALIZED, not SUSPENDED
 693   osthread->set_state(INITIALIZED);
 694 
 695   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 696   return true;
 697 }
 698 
 699 
 700 // Free Win32 resources related to the OSThread
 701 void os::free_thread(OSThread* osthread) {
 702   assert(osthread != NULL, "osthread not set");
 703 
 704   // We are told to free resources of the argument thread,
 705   // but we can only really operate on the current thread.
 706   assert(Thread::current()->osthread() == osthread,
 707          "os::free_thread but not current thread");
 708 
 709   CloseHandle(osthread->thread_handle());
 710   delete osthread;
 711 }
 712 
 713 static jlong first_filetime;
 714 static jlong initial_performance_count;
 715 static jlong performance_frequency;
 716 
 717 
 718 jlong as_long(LARGE_INTEGER x) {
 719   jlong result = 0; // initialization to avoid warning
 720   set_high(&result, x.HighPart);
 721   set_low(&result, x.LowPart);
 722   return result;
 723 }
 724 
 725 
 726 jlong os::elapsed_counter() {
 727   LARGE_INTEGER count;
 728   QueryPerformanceCounter(&count);
 729   return as_long(count) - initial_performance_count;
 730 }
 731 
 732 
 733 jlong os::elapsed_frequency() {
 734   return performance_frequency;
 735 }
 736 
 737 
 738 julong os::available_memory() {
 739   return win32::available_memory();
 740 }
 741 
 742 julong os::win32::available_memory() {
 743   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 744   // value if total memory is larger than 4GB
 745   MEMORYSTATUSEX ms;
 746   ms.dwLength = sizeof(ms);
 747   GlobalMemoryStatusEx(&ms);
 748 
 749   return (julong)ms.ullAvailPhys;
 750 }
 751 
 752 julong os::physical_memory() {
 753   return win32::physical_memory();
 754 }
 755 
 756 bool os::has_allocatable_memory_limit(julong* limit) {
 757   MEMORYSTATUSEX ms;
 758   ms.dwLength = sizeof(ms);
 759   GlobalMemoryStatusEx(&ms);
 760 #ifdef _LP64
 761   *limit = (julong)ms.ullAvailVirtual;
 762   return true;
 763 #else
 764   // Limit to 1400m because of the 2gb address space wall
 765   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 766   return true;
 767 #endif
 768 }
 769 
 770 int os::active_processor_count() {
 771   // User has overridden the number of active processors
 772   if (ActiveProcessorCount > 0) {
 773     log_trace(os)("active_processor_count: "
 774                   "active processor count set by user : %d",
 775                   ActiveProcessorCount);
 776     return ActiveProcessorCount;
 777   }
 778 
 779   DWORD_PTR lpProcessAffinityMask = 0;
 780   DWORD_PTR lpSystemAffinityMask = 0;
 781   int proc_count = processor_count();
 782   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 783       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 784     // Nof active processors is number of bits in process affinity mask
 785     int bitcount = 0;
 786     while (lpProcessAffinityMask != 0) {
 787       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 788       bitcount++;
 789     }
 790     return bitcount;
 791   } else {
 792     return proc_count;
 793   }
 794 }
 795 
 796 uint os::processor_id() {
 797   return (uint)GetCurrentProcessorNumber();
 798 }
 799 
 800 void os::set_native_thread_name(const char *name) {
 801 
 802   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 803   //
 804   // Note that unfortunately this only works if the process
 805   // is already attached to a debugger; debugger must observe
 806   // the exception below to show the correct name.
 807 
 808   // If there is no debugger attached skip raising the exception
 809   if (!IsDebuggerPresent()) {
 810     return;
 811   }
 812 
 813   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 814   struct {
 815     DWORD dwType;     // must be 0x1000
 816     LPCSTR szName;    // pointer to name (in user addr space)
 817     DWORD dwThreadID; // thread ID (-1=caller thread)
 818     DWORD dwFlags;    // reserved for future use, must be zero
 819   } info;
 820 
 821   info.dwType = 0x1000;
 822   info.szName = name;
 823   info.dwThreadID = -1;
 824   info.dwFlags = 0;
 825 
 826   __try {
 827     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 828   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 829 }
 830 
 831 bool os::bind_to_processor(uint processor_id) {
 832   // Not yet implemented.
 833   return false;
 834 }
 835 
 836 void os::win32::initialize_performance_counter() {
 837   LARGE_INTEGER count;
 838   QueryPerformanceFrequency(&count);
 839   performance_frequency = as_long(count);
 840   QueryPerformanceCounter(&count);
 841   initial_performance_count = as_long(count);
 842 }
 843 
 844 
 845 double os::elapsedTime() {
 846   return (double) elapsed_counter() / (double) elapsed_frequency();
 847 }
 848 
 849 
 850 // Windows format:
 851 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 852 // Java format:
 853 //   Java standards require the number of milliseconds since 1/1/1970
 854 
 855 // Constant offset - calculated using offset()
 856 static jlong  _offset   = 116444736000000000;
 857 // Fake time counter for reproducible results when debugging
 858 static jlong  fake_time = 0;
 859 
 860 #ifdef ASSERT
 861 // Just to be safe, recalculate the offset in debug mode
 862 static jlong _calculated_offset = 0;
 863 static int   _has_calculated_offset = 0;
 864 
 865 jlong offset() {
 866   if (_has_calculated_offset) return _calculated_offset;
 867   SYSTEMTIME java_origin;
 868   java_origin.wYear          = 1970;
 869   java_origin.wMonth         = 1;
 870   java_origin.wDayOfWeek     = 0; // ignored
 871   java_origin.wDay           = 1;
 872   java_origin.wHour          = 0;
 873   java_origin.wMinute        = 0;
 874   java_origin.wSecond        = 0;
 875   java_origin.wMilliseconds  = 0;
 876   FILETIME jot;
 877   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 878     fatal("Error = %d\nWindows error", GetLastError());
 879   }
 880   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 881   _has_calculated_offset = 1;
 882   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 883   return _calculated_offset;
 884 }
 885 #else
 886 jlong offset() {
 887   return _offset;
 888 }
 889 #endif
 890 
 891 jlong windows_to_java_time(FILETIME wt) {
 892   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 893   return (a - offset()) / 10000;
 894 }
 895 
 896 // Returns time ticks in (10th of micro seconds)
 897 jlong windows_to_time_ticks(FILETIME wt) {
 898   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 899   return (a - offset());
 900 }
 901 
 902 FILETIME java_to_windows_time(jlong l) {
 903   jlong a = (l * 10000) + offset();
 904   FILETIME result;
 905   result.dwHighDateTime = high(a);
 906   result.dwLowDateTime  = low(a);
 907   return result;
 908 }
 909 
 910 bool os::supports_vtime() { return true; }
 911 
 912 double os::elapsedVTime() {
 913   FILETIME created;
 914   FILETIME exited;
 915   FILETIME kernel;
 916   FILETIME user;
 917   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 918     // the resolution of windows_to_java_time() should be sufficient (ms)
 919     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 920   } else {
 921     return elapsedTime();
 922   }
 923 }
 924 
 925 jlong os::javaTimeMillis() {
 926   FILETIME wt;
 927   GetSystemTimeAsFileTime(&wt);
 928   return windows_to_java_time(wt);
 929 }
 930 
 931 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 932   FILETIME wt;
 933   GetSystemTimeAsFileTime(&wt);
 934   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 935   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 936   seconds = secs;
 937   nanos = jlong(ticks - (secs*10000000)) * 100;
 938 }
 939 
 940 jlong os::javaTimeNanos() {
 941     LARGE_INTEGER current_count;
 942     QueryPerformanceCounter(&current_count);
 943     double current = as_long(current_count);
 944     double freq = performance_frequency;
 945     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 946     return time;
 947 }
 948 
 949 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 950   jlong freq = performance_frequency;
 951   if (freq < NANOSECS_PER_SEC) {
 952     // the performance counter is 64 bits and we will
 953     // be multiplying it -- so no wrap in 64 bits
 954     info_ptr->max_value = ALL_64_BITS;
 955   } else if (freq > NANOSECS_PER_SEC) {
 956     // use the max value the counter can reach to
 957     // determine the max value which could be returned
 958     julong max_counter = (julong)ALL_64_BITS;
 959     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 960   } else {
 961     // the performance counter is 64 bits and we will
 962     // be using it directly -- so no wrap in 64 bits
 963     info_ptr->max_value = ALL_64_BITS;
 964   }
 965 
 966   // using a counter, so no skipping
 967   info_ptr->may_skip_backward = false;
 968   info_ptr->may_skip_forward = false;
 969 
 970   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 971 }
 972 
 973 char* os::local_time_string(char *buf, size_t buflen) {
 974   SYSTEMTIME st;
 975   GetLocalTime(&st);
 976   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 977                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 978   return buf;
 979 }
 980 
 981 bool os::getTimesSecs(double* process_real_time,
 982                       double* process_user_time,
 983                       double* process_system_time) {
 984   HANDLE h_process = GetCurrentProcess();
 985   FILETIME create_time, exit_time, kernel_time, user_time;
 986   BOOL result = GetProcessTimes(h_process,
 987                                 &create_time,
 988                                 &exit_time,
 989                                 &kernel_time,
 990                                 &user_time);
 991   if (result != 0) {
 992     FILETIME wt;
 993     GetSystemTimeAsFileTime(&wt);
 994     jlong rtc_millis = windows_to_java_time(wt);
 995     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 996     *process_user_time =
 997       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 998     *process_system_time =
 999       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1000     return true;
1001   } else {
1002     return false;
1003   }
1004 }
1005 
1006 void os::shutdown() {
1007   // allow PerfMemory to attempt cleanup of any persistent resources
1008   perfMemory_exit();
1009 
1010   // flush buffered output, finish log files
1011   ostream_abort();
1012 
1013   // Check for abort hook
1014   abort_hook_t abort_hook = Arguments::abort_hook();
1015   if (abort_hook != NULL) {
1016     abort_hook();
1017   }
1018 }
1019 
1020 
1021 static HANDLE dumpFile = NULL;
1022 
1023 // Check if dump file can be created.
1024 void os::check_dump_limit(char* buffer, size_t buffsz) {
1025   bool status = true;
1026   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1027     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1028     status = false;
1029   }
1030 
1031 #ifndef ASSERT
1032   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1033     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1034     status = false;
1035   }
1036 #endif
1037 
1038   if (status) {
1039     const char* cwd = get_current_directory(NULL, 0);
1040     int pid = current_process_id();
1041     if (cwd != NULL) {
1042       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1043     } else {
1044       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1045     }
1046 
1047     if (dumpFile == NULL &&
1048        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1049                  == INVALID_HANDLE_VALUE) {
1050       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1051       status = false;
1052     }
1053   }
1054   VMError::record_coredump_status(buffer, status);
1055 }
1056 
1057 void os::abort(bool dump_core, void* siginfo, const void* context) {
1058   EXCEPTION_POINTERS ep;
1059   MINIDUMP_EXCEPTION_INFORMATION mei;
1060   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1061 
1062   HANDLE hProcess = GetCurrentProcess();
1063   DWORD processId = GetCurrentProcessId();
1064   MINIDUMP_TYPE dumpType;
1065 
1066   shutdown();
1067   if (!dump_core || dumpFile == NULL) {
1068     if (dumpFile != NULL) {
1069       CloseHandle(dumpFile);
1070     }
1071     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1072   }
1073 
1074   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1075     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1076 
1077   if (siginfo != NULL && context != NULL) {
1078     ep.ContextRecord = (PCONTEXT) context;
1079     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1080 
1081     mei.ThreadId = GetCurrentThreadId();
1082     mei.ExceptionPointers = &ep;
1083     pmei = &mei;
1084   } else {
1085     pmei = NULL;
1086   }
1087 
1088   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1089   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1090   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1091       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1092     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1093   }
1094   CloseHandle(dumpFile);
1095   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1096 }
1097 
1098 // Die immediately, no exit hook, no abort hook, no cleanup.
1099 void os::die() {
1100   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1101 }
1102 
1103 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1104 //  * dirent_md.c       1.15 00/02/02
1105 //
1106 // The declarations for DIR and struct dirent are in jvm_win32.h.
1107 
1108 // Caller must have already run dirname through JVM_NativePath, which removes
1109 // duplicate slashes and converts all instances of '/' into '\\'.
1110 
1111 DIR * os::opendir(const char *dirname) {
1112   assert(dirname != NULL, "just checking");   // hotspot change
1113   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1114   DWORD fattr;                                // hotspot change
1115   char alt_dirname[4] = { 0, 0, 0, 0 };
1116 
1117   if (dirp == 0) {
1118     errno = ENOMEM;
1119     return 0;
1120   }
1121 
1122   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1123   // as a directory in FindFirstFile().  We detect this case here and
1124   // prepend the current drive name.
1125   //
1126   if (dirname[1] == '\0' && dirname[0] == '\\') {
1127     alt_dirname[0] = _getdrive() + 'A' - 1;
1128     alt_dirname[1] = ':';
1129     alt_dirname[2] = '\\';
1130     alt_dirname[3] = '\0';
1131     dirname = alt_dirname;
1132   }
1133 
1134   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1135   if (dirp->path == 0) {
1136     free(dirp);
1137     errno = ENOMEM;
1138     return 0;
1139   }
1140   strcpy(dirp->path, dirname);
1141 
1142   fattr = GetFileAttributes(dirp->path);
1143   if (fattr == 0xffffffff) {
1144     free(dirp->path);
1145     free(dirp);
1146     errno = ENOENT;
1147     return 0;
1148   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1149     free(dirp->path);
1150     free(dirp);
1151     errno = ENOTDIR;
1152     return 0;
1153   }
1154 
1155   // Append "*.*", or possibly "\\*.*", to path
1156   if (dirp->path[1] == ':' &&
1157       (dirp->path[2] == '\0' ||
1158       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1159     // No '\\' needed for cases like "Z:" or "Z:\"
1160     strcat(dirp->path, "*.*");
1161   } else {
1162     strcat(dirp->path, "\\*.*");
1163   }
1164 
1165   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1166   if (dirp->handle == INVALID_HANDLE_VALUE) {
1167     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1168       free(dirp->path);
1169       free(dirp);
1170       errno = EACCES;
1171       return 0;
1172     }
1173   }
1174   return dirp;
1175 }
1176 
1177 struct dirent * os::readdir(DIR *dirp) {
1178   assert(dirp != NULL, "just checking");      // hotspot change
1179   if (dirp->handle == INVALID_HANDLE_VALUE) {
1180     return NULL;
1181   }
1182 
1183   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1184 
1185   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1186     if (GetLastError() == ERROR_INVALID_HANDLE) {
1187       errno = EBADF;
1188       return NULL;
1189     }
1190     FindClose(dirp->handle);
1191     dirp->handle = INVALID_HANDLE_VALUE;
1192   }
1193 
1194   return &dirp->dirent;
1195 }
1196 
1197 int os::closedir(DIR *dirp) {
1198   assert(dirp != NULL, "just checking");      // hotspot change
1199   if (dirp->handle != INVALID_HANDLE_VALUE) {
1200     if (!FindClose(dirp->handle)) {
1201       errno = EBADF;
1202       return -1;
1203     }
1204     dirp->handle = INVALID_HANDLE_VALUE;
1205   }
1206   free(dirp->path);
1207   free(dirp);
1208   return 0;
1209 }
1210 
1211 // This must be hard coded because it's the system's temporary
1212 // directory not the java application's temp directory, ala java.io.tmpdir.
1213 const char* os::get_temp_directory() {
1214   static char path_buf[MAX_PATH];
1215   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1216     return path_buf;
1217   } else {
1218     path_buf[0] = '\0';
1219     return path_buf;
1220   }
1221 }
1222 
1223 // Needs to be in os specific directory because windows requires another
1224 // header file <direct.h>
1225 const char* os::get_current_directory(char *buf, size_t buflen) {
1226   int n = static_cast<int>(buflen);
1227   if (buflen > INT_MAX)  n = INT_MAX;
1228   return _getcwd(buf, n);
1229 }
1230 
1231 //-----------------------------------------------------------
1232 // Helper functions for fatal error handler
1233 #ifdef _WIN64
1234 // Helper routine which returns true if address in
1235 // within the NTDLL address space.
1236 //
1237 static bool _addr_in_ntdll(address addr) {
1238   HMODULE hmod;
1239   MODULEINFO minfo;
1240 
1241   hmod = GetModuleHandle("NTDLL.DLL");
1242   if (hmod == NULL) return false;
1243   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1244                                           &minfo, sizeof(MODULEINFO))) {
1245     return false;
1246   }
1247 
1248   if ((addr >= minfo.lpBaseOfDll) &&
1249       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1250     return true;
1251   } else {
1252     return false;
1253   }
1254 }
1255 #endif
1256 
1257 struct _modinfo {
1258   address addr;
1259   char*   full_path;   // point to a char buffer
1260   int     buflen;      // size of the buffer
1261   address base_addr;
1262 };
1263 
1264 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1265                                   address top_address, void * param) {
1266   struct _modinfo *pmod = (struct _modinfo *)param;
1267   if (!pmod) return -1;
1268 
1269   if (base_addr   <= pmod->addr &&
1270       top_address > pmod->addr) {
1271     // if a buffer is provided, copy path name to the buffer
1272     if (pmod->full_path) {
1273       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1274     }
1275     pmod->base_addr = base_addr;
1276     return 1;
1277   }
1278   return 0;
1279 }
1280 
1281 bool os::dll_address_to_library_name(address addr, char* buf,
1282                                      int buflen, int* offset) {
1283   // buf is not optional, but offset is optional
1284   assert(buf != NULL, "sanity check");
1285 
1286 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1287 //       return the full path to the DLL file, sometimes it returns path
1288 //       to the corresponding PDB file (debug info); sometimes it only
1289 //       returns partial path, which makes life painful.
1290 
1291   struct _modinfo mi;
1292   mi.addr      = addr;
1293   mi.full_path = buf;
1294   mi.buflen    = buflen;
1295   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1296     // buf already contains path name
1297     if (offset) *offset = addr - mi.base_addr;
1298     return true;
1299   }
1300 
1301   buf[0] = '\0';
1302   if (offset) *offset = -1;
1303   return false;
1304 }
1305 
1306 bool os::dll_address_to_function_name(address addr, char *buf,
1307                                       int buflen, int *offset,
1308                                       bool demangle) {
1309   // buf is not optional, but offset is optional
1310   assert(buf != NULL, "sanity check");
1311 
1312   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1313     return true;
1314   }
1315   if (offset != NULL)  *offset  = -1;
1316   buf[0] = '\0';
1317   return false;
1318 }
1319 
1320 // save the start and end address of jvm.dll into param[0] and param[1]
1321 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1322                            address top_address, void * param) {
1323   if (!param) return -1;
1324 
1325   if (base_addr   <= (address)_locate_jvm_dll &&
1326       top_address > (address)_locate_jvm_dll) {
1327     ((address*)param)[0] = base_addr;
1328     ((address*)param)[1] = top_address;
1329     return 1;
1330   }
1331   return 0;
1332 }
1333 
1334 address vm_lib_location[2];    // start and end address of jvm.dll
1335 
1336 // check if addr is inside jvm.dll
1337 bool os::address_is_in_vm(address addr) {
1338   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1339     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1340       assert(false, "Can't find jvm module.");
1341       return false;
1342     }
1343   }
1344 
1345   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1346 }
1347 
1348 // print module info; param is outputStream*
1349 static int _print_module(const char* fname, address base_address,
1350                          address top_address, void* param) {
1351   if (!param) return -1;
1352 
1353   outputStream* st = (outputStream*)param;
1354 
1355   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1356   return 0;
1357 }
1358 
1359 // Loads .dll/.so and
1360 // in case of error it checks if .dll/.so was built for the
1361 // same architecture as Hotspot is running on
1362 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1363   log_info(os)("attempting shared library load of %s", name);
1364 
1365   void * result = LoadLibrary(name);
1366   if (result != NULL) {
1367     Events::log(NULL, "Loaded shared library %s", name);
1368     // Recalculate pdb search path if a DLL was loaded successfully.
1369     SymbolEngine::recalc_search_path();
1370     log_info(os)("shared library load of %s was successful", name);
1371     return result;
1372   }
1373   DWORD errcode = GetLastError();
1374   // Read system error message into ebuf
1375   // It may or may not be overwritten below (in the for loop and just above)
1376   lasterror(ebuf, (size_t) ebuflen);
1377   ebuf[ebuflen - 1] = '\0';
1378   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1379   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1380 
1381   if (errcode == ERROR_MOD_NOT_FOUND) {
1382     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1383     ebuf[ebuflen - 1] = '\0';
1384     return NULL;
1385   }
1386 
1387   // Parsing dll below
1388   // If we can read dll-info and find that dll was built
1389   // for an architecture other than Hotspot is running in
1390   // - then print to buffer "DLL was built for a different architecture"
1391   // else call os::lasterror to obtain system error message
1392   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1393   if (fd < 0) {
1394     return NULL;
1395   }
1396 
1397   uint32_t signature_offset;
1398   uint16_t lib_arch = 0;
1399   bool failed_to_get_lib_arch =
1400     ( // Go to position 3c in the dll
1401      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1402      ||
1403      // Read location of signature
1404      (sizeof(signature_offset) !=
1405      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1406      ||
1407      // Go to COFF File Header in dll
1408      // that is located after "signature" (4 bytes long)
1409      (os::seek_to_file_offset(fd,
1410      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1411      ||
1412      // Read field that contains code of architecture
1413      // that dll was built for
1414      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1415     );
1416 
1417   ::close(fd);
1418   if (failed_to_get_lib_arch) {
1419     // file i/o error - report os::lasterror(...) msg
1420     return NULL;
1421   }
1422 
1423   typedef struct {
1424     uint16_t arch_code;
1425     char* arch_name;
1426   } arch_t;
1427 
1428   static const arch_t arch_array[] = {
1429     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1430     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1431   };
1432 #if (defined _M_AMD64)
1433   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1434 #elif (defined _M_IX86)
1435   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1436 #else
1437   #error Method os::dll_load requires that one of following \
1438          is defined :_M_AMD64 or _M_IX86
1439 #endif
1440 
1441 
1442   // Obtain a string for printf operation
1443   // lib_arch_str shall contain string what platform this .dll was built for
1444   // running_arch_str shall string contain what platform Hotspot was built for
1445   char *running_arch_str = NULL, *lib_arch_str = NULL;
1446   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1447     if (lib_arch == arch_array[i].arch_code) {
1448       lib_arch_str = arch_array[i].arch_name;
1449     }
1450     if (running_arch == arch_array[i].arch_code) {
1451       running_arch_str = arch_array[i].arch_name;
1452     }
1453   }
1454 
1455   assert(running_arch_str,
1456          "Didn't find running architecture code in arch_array");
1457 
1458   // If the architecture is right
1459   // but some other error took place - report os::lasterror(...) msg
1460   if (lib_arch == running_arch) {
1461     return NULL;
1462   }
1463 
1464   if (lib_arch_str != NULL) {
1465     ::_snprintf(ebuf, ebuflen - 1,
1466                 "Can't load %s-bit .dll on a %s-bit platform",
1467                 lib_arch_str, running_arch_str);
1468   } else {
1469     // don't know what architecture this dll was build for
1470     ::_snprintf(ebuf, ebuflen - 1,
1471                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1472                 lib_arch, running_arch_str);
1473   }
1474 
1475   return NULL;
1476 }
1477 
1478 void os::print_dll_info(outputStream *st) {
1479   st->print_cr("Dynamic libraries:");
1480   get_loaded_modules_info(_print_module, (void *)st);
1481 }
1482 
1483 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1484   HANDLE   hProcess;
1485 
1486 # define MAX_NUM_MODULES 128
1487   HMODULE     modules[MAX_NUM_MODULES];
1488   static char filename[MAX_PATH];
1489   int         result = 0;
1490 
1491   int pid = os::current_process_id();
1492   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1493                          FALSE, pid);
1494   if (hProcess == NULL) return 0;
1495 
1496   DWORD size_needed;
1497   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1498     CloseHandle(hProcess);
1499     return 0;
1500   }
1501 
1502   // number of modules that are currently loaded
1503   int num_modules = size_needed / sizeof(HMODULE);
1504 
1505   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1506     // Get Full pathname:
1507     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1508       filename[0] = '\0';
1509     }
1510 
1511     MODULEINFO modinfo;
1512     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1513       modinfo.lpBaseOfDll = NULL;
1514       modinfo.SizeOfImage = 0;
1515     }
1516 
1517     // Invoke callback function
1518     result = callback(filename, (address)modinfo.lpBaseOfDll,
1519                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1520     if (result) break;
1521   }
1522 
1523   CloseHandle(hProcess);
1524   return result;
1525 }
1526 
1527 bool os::get_host_name(char* buf, size_t buflen) {
1528   DWORD size = (DWORD)buflen;
1529   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1530 }
1531 
1532 void os::get_summary_os_info(char* buf, size_t buflen) {
1533   stringStream sst(buf, buflen);
1534   os::win32::print_windows_version(&sst);
1535   // chop off newline character
1536   char* nl = strchr(buf, '\n');
1537   if (nl != NULL) *nl = '\0';
1538 }
1539 
1540 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1541 #if _MSC_VER >= 1900
1542   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1543   int result = ::vsnprintf(buf, len, fmt, args);
1544   // If an encoding error occurred (result < 0) then it's not clear
1545   // whether the buffer is NUL terminated, so ensure it is.
1546   if ((result < 0) && (len > 0)) {
1547     buf[len - 1] = '\0';
1548   }
1549   return result;
1550 #else
1551   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1552   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1553   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1554   // go straight to _vscprintf.  The output is going to be truncated in
1555   // that case, except in the unusual case of empty output.  More
1556   // importantly, the documentation for various versions of Visual Studio
1557   // are inconsistent about the behavior of _vsnprintf when len == 0,
1558   // including it possibly being an error.
1559   int result = -1;
1560   if (len > 0) {
1561     result = _vsnprintf(buf, len, fmt, args);
1562     // If output (including NUL terminator) is truncated, the buffer
1563     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1564     if ((result < 0) || ((size_t)result >= len)) {
1565       buf[len - 1] = '\0';
1566     }
1567   }
1568   if (result < 0) {
1569     result = _vscprintf(fmt, args);
1570   }
1571   return result;
1572 #endif // _MSC_VER dispatch
1573 }
1574 
1575 static inline time_t get_mtime(const char* filename) {
1576   struct stat st;
1577   int ret = os::stat(filename, &st);
1578   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1579   return st.st_mtime;
1580 }
1581 
1582 int os::compare_file_modified_times(const char* file1, const char* file2) {
1583   time_t t1 = get_mtime(file1);
1584   time_t t2 = get_mtime(file2);
1585   return t1 - t2;
1586 }
1587 
1588 void os::print_os_info_brief(outputStream* st) {
1589   os::print_os_info(st);
1590 }
1591 
1592 void os::win32::print_uptime_info(outputStream* st) {
1593   unsigned long long ticks = GetTickCount64();
1594   os::print_dhm(st, "OS uptime:", ticks/1000);
1595 }
1596 
1597 void os::print_os_info(outputStream* st) {
1598 #ifdef ASSERT
1599   char buffer[1024];
1600   st->print("HostName: ");
1601   if (get_host_name(buffer, sizeof(buffer))) {
1602     st->print("%s ", buffer);
1603   } else {
1604     st->print("N/A ");
1605   }
1606 #endif
1607   st->print_cr("OS:");
1608   os::win32::print_windows_version(st);
1609 
1610   os::win32::print_uptime_info(st);
1611 
1612 #ifdef _LP64
1613   VM_Version::print_platform_virtualization_info(st);
1614 #endif
1615 }
1616 
1617 void os::win32::print_windows_version(outputStream* st) {
1618   OSVERSIONINFOEX osvi;
1619   VS_FIXEDFILEINFO *file_info;
1620   TCHAR kernel32_path[MAX_PATH];
1621   UINT len, ret;
1622 
1623   // Use the GetVersionEx information to see if we're on a server or
1624   // workstation edition of Windows. Starting with Windows 8.1 we can't
1625   // trust the OS version information returned by this API.
1626   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1627   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1628   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1629     st->print_cr("Call to GetVersionEx failed");
1630     return;
1631   }
1632   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1633 
1634   // Get the full path to \Windows\System32\kernel32.dll and use that for
1635   // determining what version of Windows we're running on.
1636   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1637   ret = GetSystemDirectory(kernel32_path, len);
1638   if (ret == 0 || ret > len) {
1639     st->print_cr("Call to GetSystemDirectory failed");
1640     return;
1641   }
1642   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1643 
1644   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1645   if (version_size == 0) {
1646     st->print_cr("Call to GetFileVersionInfoSize failed");
1647     return;
1648   }
1649 
1650   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1651   if (version_info == NULL) {
1652     st->print_cr("Failed to allocate version_info");
1653     return;
1654   }
1655 
1656   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1657     os::free(version_info);
1658     st->print_cr("Call to GetFileVersionInfo failed");
1659     return;
1660   }
1661 
1662   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1663     os::free(version_info);
1664     st->print_cr("Call to VerQueryValue failed");
1665     return;
1666   }
1667 
1668   int major_version = HIWORD(file_info->dwProductVersionMS);
1669   int minor_version = LOWORD(file_info->dwProductVersionMS);
1670   int build_number = HIWORD(file_info->dwProductVersionLS);
1671   int build_minor = LOWORD(file_info->dwProductVersionLS);
1672   int os_vers = major_version * 1000 + minor_version;
1673   os::free(version_info);
1674 
1675   st->print(" Windows ");
1676   switch (os_vers) {
1677 
1678   case 6000:
1679     if (is_workstation) {
1680       st->print("Vista");
1681     } else {
1682       st->print("Server 2008");
1683     }
1684     break;
1685 
1686   case 6001:
1687     if (is_workstation) {
1688       st->print("7");
1689     } else {
1690       st->print("Server 2008 R2");
1691     }
1692     break;
1693 
1694   case 6002:
1695     if (is_workstation) {
1696       st->print("8");
1697     } else {
1698       st->print("Server 2012");
1699     }
1700     break;
1701 
1702   case 6003:
1703     if (is_workstation) {
1704       st->print("8.1");
1705     } else {
1706       st->print("Server 2012 R2");
1707     }
1708     break;
1709 
1710   case 10000:
1711     if (is_workstation) {
1712       st->print("10");
1713     } else {
1714       // distinguish Windows Server 2016 and 2019 by build number
1715       // Windows server 2019 GA 10/2018 build number is 17763
1716       if (build_number > 17762) {
1717         st->print("Server 2019");
1718       } else {
1719         st->print("Server 2016");
1720       }
1721     }
1722     break;
1723 
1724   default:
1725     // Unrecognized windows, print out its major and minor versions
1726     st->print("%d.%d", major_version, minor_version);
1727     break;
1728   }
1729 
1730   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1731   // find out whether we are running on 64 bit processor or not
1732   SYSTEM_INFO si;
1733   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1734   GetNativeSystemInfo(&si);
1735   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1736     st->print(" , 64 bit");
1737   }
1738 
1739   st->print(" Build %d", build_number);
1740   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1741   st->cr();
1742 }
1743 
1744 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1745   // Nothing to do for now.
1746 }
1747 
1748 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1749   HKEY key;
1750   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1751                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1752   if (status == ERROR_SUCCESS) {
1753     DWORD size = (DWORD)buflen;
1754     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1755     if (status != ERROR_SUCCESS) {
1756         strncpy(buf, "## __CPU__", buflen);
1757     }
1758     RegCloseKey(key);
1759   } else {
1760     // Put generic cpu info to return
1761     strncpy(buf, "## __CPU__", buflen);
1762   }
1763 }
1764 
1765 void os::print_memory_info(outputStream* st) {
1766   st->print("Memory:");
1767   st->print(" %dk page", os::vm_page_size()>>10);
1768 
1769   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1770   // value if total memory is larger than 4GB
1771   MEMORYSTATUSEX ms;
1772   ms.dwLength = sizeof(ms);
1773   int r1 = GlobalMemoryStatusEx(&ms);
1774 
1775   if (r1 != 0) {
1776     st->print(", system-wide physical " INT64_FORMAT "M ",
1777              (int64_t) ms.ullTotalPhys >> 20);
1778     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1779 
1780     st->print("TotalPageFile size " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPageFile >> 20);
1782     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1783              (int64_t) ms.ullAvailPageFile >> 20);
1784 
1785     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1786 #if defined(_M_IX86)
1787     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1788              (int64_t) ms.ullTotalVirtual >> 20);
1789     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1790 #endif
1791   } else {
1792     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1793   }
1794 
1795   // extended memory statistics for a process
1796   PROCESS_MEMORY_COUNTERS_EX pmex;
1797   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1798   pmex.cb = sizeof(pmex);
1799   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1800 
1801   if (r2 != 0) {
1802     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1803              (int64_t) pmex.WorkingSetSize >> 20);
1804     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1805 
1806     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.PrivateUsage >> 20);
1808     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1809   } else {
1810     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1811   }
1812 
1813   st->cr();
1814 }
1815 
1816 bool os::signal_sent_by_kill(const void* siginfo) {
1817   // TODO: Is this possible?
1818   return false;
1819 }
1820 
1821 void os::print_siginfo(outputStream *st, const void* siginfo) {
1822   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1823   st->print("siginfo:");
1824 
1825   char tmp[64];
1826   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1827     strcpy(tmp, "EXCEPTION_??");
1828   }
1829   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1830 
1831   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1832        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1833        er->NumberParameters >= 2) {
1834     switch (er->ExceptionInformation[0]) {
1835     case 0: st->print(", reading address"); break;
1836     case 1: st->print(", writing address"); break;
1837     case 8: st->print(", data execution prevention violation at address"); break;
1838     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1839                        er->ExceptionInformation[0]);
1840     }
1841     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1842   } else {
1843     int num = er->NumberParameters;
1844     if (num > 0) {
1845       st->print(", ExceptionInformation=");
1846       for (int i = 0; i < num; i++) {
1847         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1848       }
1849     }
1850   }
1851   st->cr();
1852 }
1853 
1854 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1855   // TODO: Can we kill thread?
1856   return false;
1857 }
1858 
1859 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1860   // do nothing
1861 }
1862 
1863 static char saved_jvm_path[MAX_PATH] = {0};
1864 
1865 // Find the full path to the current module, jvm.dll
1866 void os::jvm_path(char *buf, jint buflen) {
1867   // Error checking.
1868   if (buflen < MAX_PATH) {
1869     assert(false, "must use a large-enough buffer");
1870     buf[0] = '\0';
1871     return;
1872   }
1873   // Lazy resolve the path to current module.
1874   if (saved_jvm_path[0] != 0) {
1875     strcpy(buf, saved_jvm_path);
1876     return;
1877   }
1878 
1879   buf[0] = '\0';
1880   if (Arguments::sun_java_launcher_is_altjvm()) {
1881     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1882     // for a JAVA_HOME environment variable and fix up the path so it
1883     // looks like jvm.dll is installed there (append a fake suffix
1884     // hotspot/jvm.dll).
1885     char* java_home_var = ::getenv("JAVA_HOME");
1886     if (java_home_var != NULL && java_home_var[0] != 0 &&
1887         strlen(java_home_var) < (size_t)buflen) {
1888       strncpy(buf, java_home_var, buflen);
1889 
1890       // determine if this is a legacy image or modules image
1891       // modules image doesn't have "jre" subdirectory
1892       size_t len = strlen(buf);
1893       char* jrebin_p = buf + len;
1894       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1895       if (0 != _access(buf, 0)) {
1896         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1897       }
1898       len = strlen(buf);
1899       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1900     }
1901   }
1902 
1903   if (buf[0] == '\0') {
1904     GetModuleFileName(vm_lib_handle, buf, buflen);
1905   }
1906   strncpy(saved_jvm_path, buf, MAX_PATH);
1907   saved_jvm_path[MAX_PATH - 1] = '\0';
1908 }
1909 
1910 
1911 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1912 #ifndef _WIN64
1913   st->print("_");
1914 #endif
1915 }
1916 
1917 
1918 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1919 #ifndef _WIN64
1920   st->print("@%d", args_size  * sizeof(int));
1921 #endif
1922 }
1923 
1924 // This method is a copy of JDK's sysGetLastErrorString
1925 // from src/windows/hpi/src/system_md.c
1926 
1927 size_t os::lasterror(char* buf, size_t len) {
1928   DWORD errval;
1929 
1930   if ((errval = GetLastError()) != 0) {
1931     // DOS error
1932     size_t n = (size_t)FormatMessage(
1933                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1934                                      NULL,
1935                                      errval,
1936                                      0,
1937                                      buf,
1938                                      (DWORD)len,
1939                                      NULL);
1940     if (n > 3) {
1941       // Drop final '.', CR, LF
1942       if (buf[n - 1] == '\n') n--;
1943       if (buf[n - 1] == '\r') n--;
1944       if (buf[n - 1] == '.') n--;
1945       buf[n] = '\0';
1946     }
1947     return n;
1948   }
1949 
1950   if (errno != 0) {
1951     // C runtime error that has no corresponding DOS error code
1952     const char* s = os::strerror(errno);
1953     size_t n = strlen(s);
1954     if (n >= len) n = len - 1;
1955     strncpy(buf, s, n);
1956     buf[n] = '\0';
1957     return n;
1958   }
1959 
1960   return 0;
1961 }
1962 
1963 int os::get_last_error() {
1964   DWORD error = GetLastError();
1965   if (error == 0) {
1966     error = errno;
1967   }
1968   return (int)error;
1969 }
1970 
1971 // sun.misc.Signal
1972 // NOTE that this is a workaround for an apparent kernel bug where if
1973 // a signal handler for SIGBREAK is installed then that signal handler
1974 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1975 // See bug 4416763.
1976 static void (*sigbreakHandler)(int) = NULL;
1977 
1978 static void UserHandler(int sig, void *siginfo, void *context) {
1979   os::signal_notify(sig);
1980   // We need to reinstate the signal handler each time...
1981   os::signal(sig, (void*)UserHandler);
1982 }
1983 
1984 void* os::user_handler() {
1985   return (void*) UserHandler;
1986 }
1987 
1988 void* os::signal(int signal_number, void* handler) {
1989   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1990     void (*oldHandler)(int) = sigbreakHandler;
1991     sigbreakHandler = (void (*)(int)) handler;
1992     return (void*) oldHandler;
1993   } else {
1994     return (void*)::signal(signal_number, (void (*)(int))handler);
1995   }
1996 }
1997 
1998 void os::signal_raise(int signal_number) {
1999   raise(signal_number);
2000 }
2001 
2002 // The Win32 C runtime library maps all console control events other than ^C
2003 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2004 // logoff, and shutdown events.  We therefore install our own console handler
2005 // that raises SIGTERM for the latter cases.
2006 //
2007 static BOOL WINAPI consoleHandler(DWORD event) {
2008   switch (event) {
2009   case CTRL_C_EVENT:
2010     if (VMError::is_error_reported()) {
2011       // Ctrl-C is pressed during error reporting, likely because the error
2012       // handler fails to abort. Let VM die immediately.
2013       os::die();
2014     }
2015 
2016     os::signal_raise(SIGINT);
2017     return TRUE;
2018     break;
2019   case CTRL_BREAK_EVENT:
2020     if (sigbreakHandler != NULL) {
2021       (*sigbreakHandler)(SIGBREAK);
2022     }
2023     return TRUE;
2024     break;
2025   case CTRL_LOGOFF_EVENT: {
2026     // Don't terminate JVM if it is running in a non-interactive session,
2027     // such as a service process.
2028     USEROBJECTFLAGS flags;
2029     HANDLE handle = GetProcessWindowStation();
2030     if (handle != NULL &&
2031         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2032         sizeof(USEROBJECTFLAGS), NULL)) {
2033       // If it is a non-interactive session, let next handler to deal
2034       // with it.
2035       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2036         return FALSE;
2037       }
2038     }
2039   }
2040   case CTRL_CLOSE_EVENT:
2041   case CTRL_SHUTDOWN_EVENT:
2042     os::signal_raise(SIGTERM);
2043     return TRUE;
2044     break;
2045   default:
2046     break;
2047   }
2048   return FALSE;
2049 }
2050 
2051 // The following code is moved from os.cpp for making this
2052 // code platform specific, which it is by its very nature.
2053 
2054 // Return maximum OS signal used + 1 for internal use only
2055 // Used as exit signal for signal_thread
2056 int os::sigexitnum_pd() {
2057   return NSIG;
2058 }
2059 
2060 // a counter for each possible signal value, including signal_thread exit signal
2061 static volatile jint pending_signals[NSIG+1] = { 0 };
2062 static Semaphore* sig_sem = NULL;
2063 
2064 static void jdk_misc_signal_init() {
2065   // Initialize signal structures
2066   memset((void*)pending_signals, 0, sizeof(pending_signals));
2067 
2068   // Initialize signal semaphore
2069   sig_sem = new Semaphore();
2070 
2071   // Programs embedding the VM do not want it to attempt to receive
2072   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2073   // shutdown hooks mechanism introduced in 1.3.  For example, when
2074   // the VM is run as part of a Windows NT service (i.e., a servlet
2075   // engine in a web server), the correct behavior is for any console
2076   // control handler to return FALSE, not TRUE, because the OS's
2077   // "final" handler for such events allows the process to continue if
2078   // it is a service (while terminating it if it is not a service).
2079   // To make this behavior uniform and the mechanism simpler, we
2080   // completely disable the VM's usage of these console events if -Xrs
2081   // (=ReduceSignalUsage) is specified.  This means, for example, that
2082   // the CTRL-BREAK thread dump mechanism is also disabled in this
2083   // case.  See bugs 4323062, 4345157, and related bugs.
2084 
2085   // Add a CTRL-C handler
2086   SetConsoleCtrlHandler(consoleHandler, TRUE);
2087 }
2088 
2089 void os::signal_notify(int sig) {
2090   if (sig_sem != NULL) {
2091     Atomic::inc(&pending_signals[sig]);
2092     sig_sem->signal();
2093   } else {
2094     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2095     // initialization isn't called.
2096     assert(ReduceSignalUsage, "signal semaphore should be created");
2097   }
2098 }
2099 
2100 static int check_pending_signals() {
2101   while (true) {
2102     for (int i = 0; i < NSIG + 1; i++) {
2103       jint n = pending_signals[i];
2104       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2105         return i;
2106       }
2107     }
2108     JavaThread *thread = JavaThread::current();
2109 
2110     ThreadBlockInVM tbivm(thread);
2111 
2112     bool threadIsSuspended;
2113     do {
2114       thread->set_suspend_equivalent();
2115       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2116       sig_sem->wait();
2117 
2118       // were we externally suspended while we were waiting?
2119       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2120       if (threadIsSuspended) {
2121         // The semaphore has been incremented, but while we were waiting
2122         // another thread suspended us. We don't want to continue running
2123         // while suspended because that would surprise the thread that
2124         // suspended us.
2125         sig_sem->signal();
2126 
2127         thread->java_suspend_self();
2128       }
2129     } while (threadIsSuspended);
2130   }
2131 }
2132 
2133 int os::signal_wait() {
2134   return check_pending_signals();
2135 }
2136 
2137 // Implicit OS exception handling
2138 
2139 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2140                       address handler) {
2141   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2142   // Save pc in thread
2143 #ifdef _M_AMD64
2144   // Do not blow up if no thread info available.
2145   if (thread) {
2146     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2147   }
2148   // Set pc to handler
2149   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2150 #else
2151   // Do not blow up if no thread info available.
2152   if (thread) {
2153     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2154   }
2155   // Set pc to handler
2156   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2157 #endif
2158 
2159   // Continue the execution
2160   return EXCEPTION_CONTINUE_EXECUTION;
2161 }
2162 
2163 
2164 // Used for PostMortemDump
2165 extern "C" void safepoints();
2166 extern "C" void find(int x);
2167 extern "C" void events();
2168 
2169 // According to Windows API documentation, an illegal instruction sequence should generate
2170 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2171 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2172 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2173 
2174 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2175 
2176 // From "Execution Protection in the Windows Operating System" draft 0.35
2177 // Once a system header becomes available, the "real" define should be
2178 // included or copied here.
2179 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2180 
2181 // Windows Vista/2008 heap corruption check
2182 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2183 
2184 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2185 // C++ compiler contain this error code. Because this is a compiler-generated
2186 // error, the code is not listed in the Win32 API header files.
2187 // The code is actually a cryptic mnemonic device, with the initial "E"
2188 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2189 // ASCII values of "msc".
2190 
2191 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2192 
2193 #define def_excpt(val) { #val, (val) }
2194 
2195 static const struct { const char* name; uint number; } exceptlabels[] = {
2196     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2197     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2198     def_excpt(EXCEPTION_BREAKPOINT),
2199     def_excpt(EXCEPTION_SINGLE_STEP),
2200     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2201     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2202     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2203     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2204     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2205     def_excpt(EXCEPTION_FLT_OVERFLOW),
2206     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2207     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2208     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2209     def_excpt(EXCEPTION_INT_OVERFLOW),
2210     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2211     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2212     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2213     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2214     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2215     def_excpt(EXCEPTION_STACK_OVERFLOW),
2216     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2217     def_excpt(EXCEPTION_GUARD_PAGE),
2218     def_excpt(EXCEPTION_INVALID_HANDLE),
2219     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2220     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2221 };
2222 
2223 #undef def_excpt
2224 
2225 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2226   uint code = static_cast<uint>(exception_code);
2227   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2228     if (exceptlabels[i].number == code) {
2229       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2230       return buf;
2231     }
2232   }
2233 
2234   return NULL;
2235 }
2236 
2237 //-----------------------------------------------------------------------------
2238 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2239   // handle exception caused by idiv; should only happen for -MinInt/-1
2240   // (division by zero is handled explicitly)
2241 #ifdef  _M_AMD64
2242   PCONTEXT ctx = exceptionInfo->ContextRecord;
2243   address pc = (address)ctx->Rip;
2244   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2245   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2246   if (pc[0] == 0xF7) {
2247     // set correct result values and continue after idiv instruction
2248     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2249   } else {
2250     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2251   }
2252   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2253   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2254   // idiv opcode (0xF7).
2255   ctx->Rdx = (DWORD)0;             // remainder
2256   // Continue the execution
2257 #else
2258   PCONTEXT ctx = exceptionInfo->ContextRecord;
2259   address pc = (address)ctx->Eip;
2260   assert(pc[0] == 0xF7, "not an idiv opcode");
2261   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2262   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2263   // set correct result values and continue after idiv instruction
2264   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2265   ctx->Eax = (DWORD)min_jint;      // result
2266   ctx->Edx = (DWORD)0;             // remainder
2267   // Continue the execution
2268 #endif
2269   return EXCEPTION_CONTINUE_EXECUTION;
2270 }
2271 
2272 //-----------------------------------------------------------------------------
2273 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2274   PCONTEXT ctx = exceptionInfo->ContextRecord;
2275 #ifndef  _WIN64
2276   // handle exception caused by native method modifying control word
2277   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2278 
2279   switch (exception_code) {
2280   case EXCEPTION_FLT_DENORMAL_OPERAND:
2281   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2282   case EXCEPTION_FLT_INEXACT_RESULT:
2283   case EXCEPTION_FLT_INVALID_OPERATION:
2284   case EXCEPTION_FLT_OVERFLOW:
2285   case EXCEPTION_FLT_STACK_CHECK:
2286   case EXCEPTION_FLT_UNDERFLOW:
2287     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2288     if (fp_control_word != ctx->FloatSave.ControlWord) {
2289       // Restore FPCW and mask out FLT exceptions
2290       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2291       // Mask out pending FLT exceptions
2292       ctx->FloatSave.StatusWord &=  0xffffff00;
2293       return EXCEPTION_CONTINUE_EXECUTION;
2294     }
2295   }
2296 
2297   if (prev_uef_handler != NULL) {
2298     // We didn't handle this exception so pass it to the previous
2299     // UnhandledExceptionFilter.
2300     return (prev_uef_handler)(exceptionInfo);
2301   }
2302 #else // !_WIN64
2303   // On Windows, the mxcsr control bits are non-volatile across calls
2304   // See also CR 6192333
2305   //
2306   jint MxCsr = INITIAL_MXCSR;
2307   // we can't use StubRoutines::addr_mxcsr_std()
2308   // because in Win64 mxcsr is not saved there
2309   if (MxCsr != ctx->MxCsr) {
2310     ctx->MxCsr = MxCsr;
2311     return EXCEPTION_CONTINUE_EXECUTION;
2312   }
2313 #endif // !_WIN64
2314 
2315   return EXCEPTION_CONTINUE_SEARCH;
2316 }
2317 
2318 static inline void report_error(Thread* t, DWORD exception_code,
2319                                 address addr, void* siginfo, void* context) {
2320   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2321 
2322   // If UseOsErrorReporting, this will return here and save the error file
2323   // somewhere where we can find it in the minidump.
2324 }
2325 
2326 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2327         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2328   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2329   address addr = (address) exceptionRecord->ExceptionInformation[1];
2330   if (Interpreter::contains(pc)) {
2331     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2332     if (!fr->is_first_java_frame()) {
2333       // get_frame_at_stack_banging_point() is only called when we
2334       // have well defined stacks so java_sender() calls do not need
2335       // to assert safe_for_sender() first.
2336       *fr = fr->java_sender();
2337     }
2338   } else {
2339     // more complex code with compiled code
2340     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2341     CodeBlob* cb = CodeCache::find_blob(pc);
2342     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2343       // Not sure where the pc points to, fallback to default
2344       // stack overflow handling
2345       return false;
2346     } else {
2347       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2348       // in compiled code, the stack banging is performed just after the return pc
2349       // has been pushed on the stack
2350       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2351       if (!fr->is_java_frame()) {
2352         // See java_sender() comment above.
2353         *fr = fr->java_sender();
2354       }
2355     }
2356   }
2357   assert(fr->is_java_frame(), "Safety check");
2358   return true;
2359 }
2360 
2361 #if INCLUDE_AOT
2362 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2363   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2364   address addr = (address) exceptionRecord->ExceptionInformation[1];
2365   address pc = (address) exceptionInfo->ContextRecord->Rip;
2366 
2367   // Handle the case where we get an implicit exception in AOT generated
2368   // code.  AOT DLL's loaded are not registered for structured exceptions.
2369   // If the exception occurred in the codeCache or AOT code, pass control
2370   // to our normal exception handler.
2371   CodeBlob* cb = CodeCache::find_blob(pc);
2372   if (cb != NULL) {
2373     return topLevelExceptionFilter(exceptionInfo);
2374   }
2375 
2376   return EXCEPTION_CONTINUE_SEARCH;
2377 }
2378 #endif
2379 
2380 //-----------------------------------------------------------------------------
2381 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2382   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2383   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2384 #ifdef _M_AMD64
2385   address pc = (address) exceptionInfo->ContextRecord->Rip;
2386 #else
2387   address pc = (address) exceptionInfo->ContextRecord->Eip;
2388 #endif
2389   Thread* t = Thread::current_or_null_safe();
2390 
2391   // Handle SafeFetch32 and SafeFetchN exceptions.
2392   if (StubRoutines::is_safefetch_fault(pc)) {
2393     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2394   }
2395 
2396 #ifndef _WIN64
2397   // Execution protection violation - win32 running on AMD64 only
2398   // Handled first to avoid misdiagnosis as a "normal" access violation;
2399   // This is safe to do because we have a new/unique ExceptionInformation
2400   // code for this condition.
2401   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2402     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2403     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2404     address addr = (address) exceptionRecord->ExceptionInformation[1];
2405 
2406     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2407       int page_size = os::vm_page_size();
2408 
2409       // Make sure the pc and the faulting address are sane.
2410       //
2411       // If an instruction spans a page boundary, and the page containing
2412       // the beginning of the instruction is executable but the following
2413       // page is not, the pc and the faulting address might be slightly
2414       // different - we still want to unguard the 2nd page in this case.
2415       //
2416       // 15 bytes seems to be a (very) safe value for max instruction size.
2417       bool pc_is_near_addr =
2418         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2419       bool instr_spans_page_boundary =
2420         (align_down((intptr_t) pc ^ (intptr_t) addr,
2421                          (intptr_t) page_size) > 0);
2422 
2423       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2424         static volatile address last_addr =
2425           (address) os::non_memory_address_word();
2426 
2427         // In conservative mode, don't unguard unless the address is in the VM
2428         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2429             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2430 
2431           // Set memory to RWX and retry
2432           address page_start = align_down(addr, page_size);
2433           bool res = os::protect_memory((char*) page_start, page_size,
2434                                         os::MEM_PROT_RWX);
2435 
2436           log_debug(os)("Execution protection violation "
2437                         "at " INTPTR_FORMAT
2438                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2439                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2440 
2441           // Set last_addr so if we fault again at the same address, we don't
2442           // end up in an endless loop.
2443           //
2444           // There are two potential complications here.  Two threads trapping
2445           // at the same address at the same time could cause one of the
2446           // threads to think it already unguarded, and abort the VM.  Likely
2447           // very rare.
2448           //
2449           // The other race involves two threads alternately trapping at
2450           // different addresses and failing to unguard the page, resulting in
2451           // an endless loop.  This condition is probably even more unlikely
2452           // than the first.
2453           //
2454           // Although both cases could be avoided by using locks or thread
2455           // local last_addr, these solutions are unnecessary complication:
2456           // this handler is a best-effort safety net, not a complete solution.
2457           // It is disabled by default and should only be used as a workaround
2458           // in case we missed any no-execute-unsafe VM code.
2459 
2460           last_addr = addr;
2461 
2462           return EXCEPTION_CONTINUE_EXECUTION;
2463         }
2464       }
2465 
2466       // Last unguard failed or not unguarding
2467       tty->print_raw_cr("Execution protection violation");
2468       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2469                    exceptionInfo->ContextRecord);
2470       return EXCEPTION_CONTINUE_SEARCH;
2471     }
2472   }
2473 #endif // _WIN64
2474 
2475   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2476       VM_Version::is_cpuinfo_segv_addr(pc)) {
2477     // Verify that OS save/restore AVX registers.
2478     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2479   }
2480 
2481   if (t != NULL && t->is_Java_thread()) {
2482     JavaThread* thread = (JavaThread*) t;
2483     bool in_java = thread->thread_state() == _thread_in_Java;
2484 
2485     // Handle potential stack overflows up front.
2486     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2487       if (thread->stack_guards_enabled()) {
2488         if (in_java) {
2489           frame fr;
2490           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2491           address addr = (address) exceptionRecord->ExceptionInformation[1];
2492           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2493             assert(fr.is_java_frame(), "Must be a Java frame");
2494             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2495           }
2496         }
2497         // Yellow zone violation.  The o/s has unprotected the first yellow
2498         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2499         // update the enabled status, even if the zone contains only one page.
2500         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2501         thread->disable_stack_yellow_reserved_zone();
2502         // If not in java code, return and hope for the best.
2503         return in_java
2504             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2505             :  EXCEPTION_CONTINUE_EXECUTION;
2506       } else {
2507         // Fatal red zone violation.
2508         thread->disable_stack_red_zone();
2509         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2510         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2511                       exceptionInfo->ContextRecord);
2512         return EXCEPTION_CONTINUE_SEARCH;
2513       }
2514     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2515       // Either stack overflow or null pointer exception.
2516       if (in_java) {
2517         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2518         address addr = (address) exceptionRecord->ExceptionInformation[1];
2519         address stack_end = thread->stack_end();
2520         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2521           // Stack overflow.
2522           assert(!os::uses_stack_guard_pages(),
2523                  "should be caught by red zone code above.");
2524           return Handle_Exception(exceptionInfo,
2525                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2526         }
2527         // Check for safepoint polling and implicit null
2528         // We only expect null pointers in the stubs (vtable)
2529         // the rest are checked explicitly now.
2530         CodeBlob* cb = CodeCache::find_blob(pc);
2531         if (cb != NULL) {
2532           if (SafepointMechanism::is_poll_address(addr)) {
2533             address stub = SharedRuntime::get_poll_stub(pc);
2534             return Handle_Exception(exceptionInfo, stub);
2535           }
2536         }
2537         {
2538 #ifdef _WIN64
2539           // If it's a legal stack address map the entire region in
2540           //
2541           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2542           address addr = (address) exceptionRecord->ExceptionInformation[1];
2543           if (thread->is_in_usable_stack(addr)) {
2544             addr = (address)((uintptr_t)addr &
2545                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2546             os::commit_memory((char *)addr, thread->stack_base() - addr,
2547                               !ExecMem);
2548             return EXCEPTION_CONTINUE_EXECUTION;
2549           } else
2550 #endif
2551           {
2552             // Null pointer exception.
2553             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2554               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2555               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2556             }
2557             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2558                          exceptionInfo->ContextRecord);
2559             return EXCEPTION_CONTINUE_SEARCH;
2560           }
2561         }
2562       }
2563 
2564 #ifdef _WIN64
2565       // Special care for fast JNI field accessors.
2566       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2567       // in and the heap gets shrunk before the field access.
2568       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2569         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2570         if (addr != (address)-1) {
2571           return Handle_Exception(exceptionInfo, addr);
2572         }
2573       }
2574 #endif
2575 
2576       // Stack overflow or null pointer exception in native code.
2577       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2578                    exceptionInfo->ContextRecord);
2579       return EXCEPTION_CONTINUE_SEARCH;
2580     } // /EXCEPTION_ACCESS_VIOLATION
2581     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2582 
2583     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2584       CompiledMethod* nm = NULL;
2585       JavaThread* thread = (JavaThread*)t;
2586       if (in_java) {
2587         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2588         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2589       }
2590 
2591       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2592       if (((thread->thread_state() == _thread_in_vm ||
2593            thread->thread_state() == _thread_in_native ||
2594            is_unsafe_arraycopy) &&
2595           thread->doing_unsafe_access()) ||
2596           (nm != NULL && nm->has_unsafe_access())) {
2597         address next_pc =  Assembler::locate_next_instruction(pc);
2598         if (is_unsafe_arraycopy) {
2599           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2600         }
2601         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2602       }
2603     }
2604 
2605     if (in_java) {
2606       switch (exception_code) {
2607       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2608         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2609 
2610       case EXCEPTION_INT_OVERFLOW:
2611         return Handle_IDiv_Exception(exceptionInfo);
2612 
2613       } // switch
2614     }
2615     if (((thread->thread_state() == _thread_in_Java) ||
2616          (thread->thread_state() == _thread_in_native)) &&
2617          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2618       LONG result=Handle_FLT_Exception(exceptionInfo);
2619       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2620     }
2621   }
2622 
2623   if (exception_code != EXCEPTION_BREAKPOINT) {
2624     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2625                  exceptionInfo->ContextRecord);
2626   }
2627   return EXCEPTION_CONTINUE_SEARCH;
2628 }
2629 
2630 #ifndef _WIN64
2631 // Special care for fast JNI accessors.
2632 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2633 // the heap gets shrunk before the field access.
2634 // Need to install our own structured exception handler since native code may
2635 // install its own.
2636 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2637   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2638   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2639     address pc = (address) exceptionInfo->ContextRecord->Eip;
2640     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2641     if (addr != (address)-1) {
2642       return Handle_Exception(exceptionInfo, addr);
2643     }
2644   }
2645   return EXCEPTION_CONTINUE_SEARCH;
2646 }
2647 
2648 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2649   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2650                                                      jobject obj,           \
2651                                                      jfieldID fieldID) {    \
2652     __try {                                                                 \
2653       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2654                                                                  obj,       \
2655                                                                  fieldID);  \
2656     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2657                                               _exception_info())) {         \
2658     }                                                                       \
2659     return 0;                                                               \
2660   }
2661 
2662 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2663 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2664 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2665 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2666 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2667 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2668 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2669 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2670 
2671 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2672   switch (type) {
2673   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2674   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2675   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2676   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2677   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2678   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2679   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2680   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2681   default:        ShouldNotReachHere();
2682   }
2683   return (address)-1;
2684 }
2685 #endif
2686 
2687 // Virtual Memory
2688 
2689 int os::vm_page_size() { return os::win32::vm_page_size(); }
2690 int os::vm_allocation_granularity() {
2691   return os::win32::vm_allocation_granularity();
2692 }
2693 
2694 // Windows large page support is available on Windows 2003. In order to use
2695 // large page memory, the administrator must first assign additional privilege
2696 // to the user:
2697 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2698 //   + select Local Policies -> User Rights Assignment
2699 //   + double click "Lock pages in memory", add users and/or groups
2700 //   + reboot
2701 // Note the above steps are needed for administrator as well, as administrators
2702 // by default do not have the privilege to lock pages in memory.
2703 //
2704 // Note about Windows 2003: although the API supports committing large page
2705 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2706 // scenario, I found through experiment it only uses large page if the entire
2707 // memory region is reserved and committed in a single VirtualAlloc() call.
2708 // This makes Windows large page support more or less like Solaris ISM, in
2709 // that the entire heap must be committed upfront. This probably will change
2710 // in the future, if so the code below needs to be revisited.
2711 
2712 #ifndef MEM_LARGE_PAGES
2713   #define MEM_LARGE_PAGES 0x20000000
2714 #endif
2715 
2716 #define VirtualFreeChecked(mem, size, type)                       \
2717   do {                                                            \
2718     bool ret = VirtualFree(mem, size, type);                      \
2719     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2720   } while (false)
2721 
2722 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2723 static const int gdi_tiny_bitmap_width_bytes = 4;
2724 
2725 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2726   // The documentation for CreateBitmap states a word-alignment requirement.
2727   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2728 
2729   // Some callers use this function to test if memory crossing separate memory
2730   // reservations can be used. Create a height of 2 to make sure that one pixel
2731   // ends up in the first reservation and the other in the second.
2732   int nHeight = 2;
2733 
2734   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2735 
2736   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2737   int nWidth = 1;
2738 
2739   // Calculate bit count - will be 32.
2740   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2741 
2742   return CreateBitmap(
2743       nWidth,
2744       nHeight,
2745       1,         // nPlanes
2746       nBitCount,
2747       mem);      // lpBits
2748 }
2749 
2750 // It has been found that some of the GDI functions fail under these two situations:
2751 //  1) When used with large pages
2752 //  2) When mem crosses the boundary between two separate memory reservations.
2753 //
2754 // This is a small test used to see if the current GDI implementation is
2755 // susceptible to any of these problems.
2756 static bool gdi_can_use_memory(void* mem) {
2757   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2758   if (bitmap != NULL) {
2759     DeleteObject(bitmap);
2760     return true;
2761   }
2762 
2763   // Verify that the bitmap could be created with a normal page.
2764   // If this fails, the testing method above isn't reliable.
2765 #ifdef ASSERT
2766   void* verify_mem = ::malloc(4 * 1024);
2767   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2768   if (verify_bitmap == NULL) {
2769     fatal("Couldn't create test bitmap with malloced memory");
2770   } else {
2771     DeleteObject(verify_bitmap);
2772   }
2773   ::free(verify_mem);
2774 #endif
2775 
2776   return false;
2777 }
2778 
2779 // Test if GDI functions work when memory spans
2780 // two adjacent memory reservations.
2781 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2782   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2783 
2784   // Find virtual memory range. Two granules for regions and one for alignment.
2785   void* reserved = VirtualAlloc(NULL,
2786                                 granule * 3,
2787                                 MEM_RESERVE,
2788                                 PAGE_NOACCESS);
2789   if (reserved == NULL) {
2790     // Can't proceed with test - pessimistically report false
2791     return false;
2792   }
2793   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2794 
2795   // Ensure proper alignment
2796   void* res0 = align_up(reserved, granule);
2797   void* res1 = (char*)res0 + granule;
2798 
2799   // Reserve and commit the first part
2800   void* mem0 = VirtualAlloc(res0,
2801                             granule,
2802                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2803                             PAGE_READWRITE);
2804   if (mem0 != res0) {
2805     // Can't proceed with test - pessimistically report false
2806     return false;
2807   }
2808 
2809   // Reserve and commit the second part
2810   void* mem1 = VirtualAlloc(res1,
2811                             granule,
2812                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2813                             PAGE_READWRITE);
2814   if (mem1 != res1) {
2815     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2816     // Can't proceed with test - pessimistically report false
2817     return false;
2818   }
2819 
2820   // Set the bitmap's bits to point one "width" bytes before, so that
2821   // the bitmap extends across the reservation boundary.
2822   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2823 
2824   bool success = gdi_can_use_memory(bitmapBits);
2825 
2826   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2827   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2828 
2829   return success;
2830 }
2831 
2832 // Container for NUMA node list info
2833 class NUMANodeListHolder {
2834  private:
2835   int *_numa_used_node_list;  // allocated below
2836   int _numa_used_node_count;
2837 
2838   void free_node_list() {
2839     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2840   }
2841 
2842  public:
2843   NUMANodeListHolder() {
2844     _numa_used_node_count = 0;
2845     _numa_used_node_list = NULL;
2846     // do rest of initialization in build routine (after function pointers are set up)
2847   }
2848 
2849   ~NUMANodeListHolder() {
2850     free_node_list();
2851   }
2852 
2853   bool build() {
2854     DWORD_PTR proc_aff_mask;
2855     DWORD_PTR sys_aff_mask;
2856     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2857     ULONG highest_node_number;
2858     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2859     free_node_list();
2860     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2861     for (unsigned int i = 0; i <= highest_node_number; i++) {
2862       ULONGLONG proc_mask_numa_node;
2863       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2864       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2865         _numa_used_node_list[_numa_used_node_count++] = i;
2866       }
2867     }
2868     return (_numa_used_node_count > 1);
2869   }
2870 
2871   int get_count() { return _numa_used_node_count; }
2872   int get_node_list_entry(int n) {
2873     // for indexes out of range, returns -1
2874     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2875   }
2876 
2877 } numa_node_list_holder;
2878 
2879 static size_t _large_page_size = 0;
2880 
2881 static bool request_lock_memory_privilege() {
2882   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2883                                 os::current_process_id());
2884 
2885   bool success = false;
2886   HANDLE hToken = NULL;
2887   LUID luid;
2888   if (hProcess != NULL &&
2889       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2890       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2891 
2892     TOKEN_PRIVILEGES tp;
2893     tp.PrivilegeCount = 1;
2894     tp.Privileges[0].Luid = luid;
2895     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2896 
2897     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2898     // privilege. Check GetLastError() too. See MSDN document.
2899     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2900         (GetLastError() == ERROR_SUCCESS)) {
2901       success = true;
2902     }
2903   }
2904 
2905   // Cleanup
2906   if (hProcess != NULL) {
2907     CloseHandle(hProcess);
2908   }
2909   if (hToken != NULL) {
2910     CloseHandle(hToken);
2911   }
2912 
2913   return success;
2914 }
2915 
2916 static bool numa_interleaving_init() {
2917   bool success = false;
2918 
2919   // print a warning if UseNUMAInterleaving flag is specified on command line
2920   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2921 
2922 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2923 
2924   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2925   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2926   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2927 
2928   if (!numa_node_list_holder.build()) {
2929     WARN("Process does not cover multiple NUMA nodes.");
2930     WARN("...Ignoring UseNUMAInterleaving flag.");
2931     return false;
2932   }
2933 
2934   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2935     WARN("Windows GDI cannot handle split reservations.");
2936     WARN("...Ignoring UseNUMAInterleaving flag.");
2937     return false;
2938   }
2939 
2940   if (log_is_enabled(Debug, os, cpu)) {
2941     Log(os, cpu) log;
2942     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2943     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2944       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2945     }
2946   }
2947 
2948 #undef WARN
2949 
2950   return true;
2951 }
2952 
2953 // this routine is used whenever we need to reserve a contiguous VA range
2954 // but we need to make separate VirtualAlloc calls for each piece of the range
2955 // Reasons for doing this:
2956 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2957 //  * UseNUMAInterleaving requires a separate node for each piece
2958 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2959                                          DWORD prot,
2960                                          bool should_inject_error = false) {
2961   char * p_buf;
2962   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2963   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2964   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2965 
2966   // first reserve enough address space in advance since we want to be
2967   // able to break a single contiguous virtual address range into multiple
2968   // large page commits but WS2003 does not allow reserving large page space
2969   // so we just use 4K pages for reserve, this gives us a legal contiguous
2970   // address space. then we will deallocate that reservation, and re alloc
2971   // using large pages
2972   const size_t size_of_reserve = bytes + chunk_size;
2973   if (bytes > size_of_reserve) {
2974     // Overflowed.
2975     return NULL;
2976   }
2977   p_buf = (char *) VirtualAlloc(addr,
2978                                 size_of_reserve,  // size of Reserve
2979                                 MEM_RESERVE,
2980                                 PAGE_READWRITE);
2981   // If reservation failed, return NULL
2982   if (p_buf == NULL) return NULL;
2983   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2984   os::release_memory(p_buf, bytes + chunk_size);
2985 
2986   // we still need to round up to a page boundary (in case we are using large pages)
2987   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2988   // instead we handle this in the bytes_to_rq computation below
2989   p_buf = align_up(p_buf, page_size);
2990 
2991   // now go through and allocate one chunk at a time until all bytes are
2992   // allocated
2993   size_t  bytes_remaining = bytes;
2994   // An overflow of align_up() would have been caught above
2995   // in the calculation of size_of_reserve.
2996   char * next_alloc_addr = p_buf;
2997   HANDLE hProc = GetCurrentProcess();
2998 
2999 #ifdef ASSERT
3000   // Variable for the failure injection
3001   int ran_num = os::random();
3002   size_t fail_after = ran_num % bytes;
3003 #endif
3004 
3005   int count=0;
3006   while (bytes_remaining) {
3007     // select bytes_to_rq to get to the next chunk_size boundary
3008 
3009     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3010     // Note allocate and commit
3011     char * p_new;
3012 
3013 #ifdef ASSERT
3014     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3015 #else
3016     const bool inject_error_now = false;
3017 #endif
3018 
3019     if (inject_error_now) {
3020       p_new = NULL;
3021     } else {
3022       if (!UseNUMAInterleaving) {
3023         p_new = (char *) VirtualAlloc(next_alloc_addr,
3024                                       bytes_to_rq,
3025                                       flags,
3026                                       prot);
3027       } else {
3028         // get the next node to use from the used_node_list
3029         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3030         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3031         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3032       }
3033     }
3034 
3035     if (p_new == NULL) {
3036       // Free any allocated pages
3037       if (next_alloc_addr > p_buf) {
3038         // Some memory was committed so release it.
3039         size_t bytes_to_release = bytes - bytes_remaining;
3040         // NMT has yet to record any individual blocks, so it
3041         // need to create a dummy 'reserve' record to match
3042         // the release.
3043         MemTracker::record_virtual_memory_reserve((address)p_buf,
3044                                                   bytes_to_release, CALLER_PC);
3045         os::release_memory(p_buf, bytes_to_release);
3046       }
3047 #ifdef ASSERT
3048       if (should_inject_error) {
3049         log_develop_debug(pagesize)("Reserving pages individually failed.");
3050       }
3051 #endif
3052       return NULL;
3053     }
3054 
3055     bytes_remaining -= bytes_to_rq;
3056     next_alloc_addr += bytes_to_rq;
3057     count++;
3058   }
3059   // Although the memory is allocated individually, it is returned as one.
3060   // NMT records it as one block.
3061   if ((flags & MEM_COMMIT) != 0) {
3062     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3063   } else {
3064     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3065   }
3066 
3067   // made it this far, success
3068   return p_buf;
3069 }
3070 
3071 static size_t large_page_init_decide_size() {
3072   // print a warning if any large page related flag is specified on command line
3073   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3074                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3075 
3076 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3077 
3078   if (!request_lock_memory_privilege()) {
3079     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3080     return 0;
3081   }
3082 
3083   size_t size = GetLargePageMinimum();
3084   if (size == 0) {
3085     WARN("Large page is not supported by the processor.");
3086     return 0;
3087   }
3088 
3089 #if defined(IA32) || defined(AMD64)
3090   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3091     WARN("JVM cannot use large pages bigger than 4mb.");
3092     return 0;
3093   }
3094 #endif
3095 
3096   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3097     size = LargePageSizeInBytes;
3098   }
3099 
3100   // Now test allocating a page
3101   void* large_page = VirtualAlloc(NULL,
3102                                   size,
3103                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3104                                   PAGE_READWRITE);
3105   if (large_page == NULL) {
3106     WARN("JVM cannot allocate one single large page.");
3107     return 0;
3108   }
3109 
3110   // Detect if GDI can use memory backed by large pages
3111   if (!gdi_can_use_memory(large_page)) {
3112     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3113     return 0;
3114   }
3115 
3116   // Release test page
3117   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3118 
3119 #undef WARN
3120 
3121   return size;
3122 }
3123 
3124 void os::large_page_init() {
3125   if (!UseLargePages) {
3126     return;
3127   }
3128 
3129   _large_page_size = large_page_init_decide_size();
3130 
3131   const size_t default_page_size = (size_t) vm_page_size();
3132   if (_large_page_size > default_page_size) {
3133     _page_sizes[0] = _large_page_size;
3134     _page_sizes[1] = default_page_size;
3135     _page_sizes[2] = 0;
3136   }
3137 
3138   UseLargePages = _large_page_size != 0;
3139 
3140   if (UseLargePages && UseLargePagesIndividualAllocation) {
3141     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3142       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3143         warning("Windows GDI cannot handle split reservations.");
3144         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3145       }
3146       UseLargePagesIndividualAllocation = false;
3147     }
3148   }
3149 }
3150 
3151 int os::create_file_for_heap(const char* dir) {
3152 
3153   const char name_template[] = "/jvmheap.XXXXXX";
3154 
3155   size_t fullname_len = strlen(dir) + strlen(name_template);
3156   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3157   if (fullname == NULL) {
3158     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3159     return -1;
3160   }
3161   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3162   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3163 
3164   os::native_path(fullname);
3165 
3166   char *path = _mktemp(fullname);
3167   if (path == NULL) {
3168     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3169     os::free(fullname);
3170     return -1;
3171   }
3172 
3173   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3174 
3175   os::free(fullname);
3176   if (fd < 0) {
3177     warning("Problem opening file for heap (%s)", os::strerror(errno));
3178     return -1;
3179   }
3180   return fd;
3181 }
3182 
3183 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3184 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3185   assert(fd != -1, "File descriptor is not valid");
3186 
3187   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3188 #ifdef _LP64
3189   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3190     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3191 #else
3192   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3193     0, (DWORD)size, NULL);
3194 #endif
3195   if (fileMapping == NULL) {
3196     if (GetLastError() == ERROR_DISK_FULL) {
3197       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3198     }
3199     else {
3200       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3201     }
3202 
3203     return NULL;
3204   }
3205 
3206   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3207 
3208   CloseHandle(fileMapping);
3209 
3210   return (char*)addr;
3211 }
3212 
3213 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3214   assert(fd != -1, "File descriptor is not valid");
3215   assert(base != NULL, "Base address cannot be NULL");
3216 
3217   release_memory(base, size);
3218   return map_memory_to_file(base, size, fd);
3219 }
3220 
3221 // On win32, one cannot release just a part of reserved memory, it's an
3222 // all or nothing deal.  When we split a reservation, we must break the
3223 // reservation into two reservations.
3224 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3225 
3226   char* const split_address = base + split;
3227   assert(size > 0, "Sanity");
3228   assert(size > split, "Sanity");
3229   assert(split > 0, "Sanity");
3230   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3231   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3232 
3233   release_memory(base, size);
3234   reserve_memory(split, base);
3235   reserve_memory(size - split, split_address);
3236 
3237   // NMT: nothing to do here. Since Windows implements the split by
3238   //  releasing and re-reserving memory, the parts are already registered
3239   //  as individual mappings with NMT.
3240 
3241 }
3242 
3243 // Multiple threads can race in this code but it's not possible to unmap small sections of
3244 // virtual space to get requested alignment, like posix-like os's.
3245 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3246 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3247   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3248          "Alignment must be a multiple of allocation granularity (page size)");
3249   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3250 
3251   size_t extra_size = size + alignment;
3252   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3253 
3254   char* aligned_base = NULL;
3255 
3256   do {
3257     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3258     if (extra_base == NULL) {
3259       return NULL;
3260     }
3261     // Do manual alignment
3262     aligned_base = align_up(extra_base, alignment);
3263 
3264     if (file_desc != -1) {
3265       os::unmap_memory(extra_base, extra_size);
3266     } else {
3267       os::release_memory(extra_base, extra_size);
3268     }
3269 
3270     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3271 
3272   } while (aligned_base == NULL);
3273 
3274   return aligned_base;
3275 }
3276 
3277 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3278   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3279          "reserve alignment");
3280   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3281   char* res;
3282   // note that if UseLargePages is on, all the areas that require interleaving
3283   // will go thru reserve_memory_special rather than thru here.
3284   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3285   if (!use_individual) {
3286     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3287   } else {
3288     elapsedTimer reserveTimer;
3289     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3290     // in numa interleaving, we have to allocate pages individually
3291     // (well really chunks of NUMAInterleaveGranularity size)
3292     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3293     if (res == NULL) {
3294       warning("NUMA page allocation failed");
3295     }
3296     if (Verbose && PrintMiscellaneous) {
3297       reserveTimer.stop();
3298       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3299                     reserveTimer.milliseconds(), reserveTimer.ticks());
3300     }
3301   }
3302   assert(res == NULL || addr == NULL || addr == res,
3303          "Unexpected address from reserve.");
3304 
3305   return res;
3306 }
3307 
3308 // Reserve memory at an arbitrary address, only if that area is
3309 // available (and not reserved for something else).
3310 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3311   // Windows os::reserve_memory() fails of the requested address range is
3312   // not avilable.
3313   return reserve_memory(bytes, requested_addr);
3314 }
3315 
3316 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3317   assert(file_desc >= 0, "file_desc is not valid");
3318   return map_memory_to_file(requested_addr, bytes, file_desc);
3319 }
3320 
3321 size_t os::large_page_size() {
3322   return _large_page_size;
3323 }
3324 
3325 bool os::can_commit_large_page_memory() {
3326   // Windows only uses large page memory when the entire region is reserved
3327   // and committed in a single VirtualAlloc() call. This may change in the
3328   // future, but with Windows 2003 it's not possible to commit on demand.
3329   return false;
3330 }
3331 
3332 bool os::can_execute_large_page_memory() {
3333   return true;
3334 }
3335 
3336 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3337                                     bool exec) {
3338   assert(UseLargePages, "only for large pages");
3339 
3340   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3341     return NULL; // Fallback to small pages.
3342   }
3343 
3344   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3345   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3346 
3347   // with large pages, there are two cases where we need to use Individual Allocation
3348   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3349   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3350   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3351     log_debug(pagesize)("Reserving large pages individually.");
3352 
3353     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3354     if (p_buf == NULL) {
3355       // give an appropriate warning message
3356       if (UseNUMAInterleaving) {
3357         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3358       }
3359       if (UseLargePagesIndividualAllocation) {
3360         warning("Individually allocated large pages failed, "
3361                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3362       }
3363       return NULL;
3364     }
3365 
3366     return p_buf;
3367 
3368   } else {
3369     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3370 
3371     // normal policy just allocate it all at once
3372     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3373     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3374 
3375     return res;
3376   }
3377 }
3378 
3379 bool os::pd_release_memory_special(char* base, size_t bytes) {
3380   assert(base != NULL, "Sanity check");
3381   return pd_release_memory(base, bytes);
3382 }
3383 
3384 void os::print_statistics() {
3385 }
3386 
3387 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3388   int err = os::get_last_error();
3389   char buf[256];
3390   size_t buf_len = os::lasterror(buf, sizeof(buf));
3391   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3392           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3393           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3394 }
3395 
3396 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3397   if (bytes == 0) {
3398     // Don't bother the OS with noops.
3399     return true;
3400   }
3401   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3402   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3403   // Don't attempt to print anything if the OS call fails. We're
3404   // probably low on resources, so the print itself may cause crashes.
3405 
3406   // unless we have NUMAInterleaving enabled, the range of a commit
3407   // is always within a reserve covered by a single VirtualAlloc
3408   // in that case we can just do a single commit for the requested size
3409   if (!UseNUMAInterleaving) {
3410     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3411       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3412       return false;
3413     }
3414     if (exec) {
3415       DWORD oldprot;
3416       // Windows doc says to use VirtualProtect to get execute permissions
3417       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3418         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3419         return false;
3420       }
3421     }
3422     return true;
3423   } else {
3424 
3425     // when NUMAInterleaving is enabled, the commit might cover a range that
3426     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3427     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3428     // returns represents the number of bytes that can be committed in one step.
3429     size_t bytes_remaining = bytes;
3430     char * next_alloc_addr = addr;
3431     while (bytes_remaining > 0) {
3432       MEMORY_BASIC_INFORMATION alloc_info;
3433       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3434       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3435       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3436                        PAGE_READWRITE) == NULL) {
3437         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3438                                             exec);)
3439         return false;
3440       }
3441       if (exec) {
3442         DWORD oldprot;
3443         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3444                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3445           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3446                                               exec);)
3447           return false;
3448         }
3449       }
3450       bytes_remaining -= bytes_to_rq;
3451       next_alloc_addr += bytes_to_rq;
3452     }
3453   }
3454   // if we made it this far, return true
3455   return true;
3456 }
3457 
3458 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3459                           bool exec) {
3460   // alignment_hint is ignored on this OS
3461   return pd_commit_memory(addr, size, exec);
3462 }
3463 
3464 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3465                                   const char* mesg) {
3466   assert(mesg != NULL, "mesg must be specified");
3467   if (!pd_commit_memory(addr, size, exec)) {
3468     warn_fail_commit_memory(addr, size, exec);
3469     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3470   }
3471 }
3472 
3473 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3474                                   size_t alignment_hint, bool exec,
3475                                   const char* mesg) {
3476   // alignment_hint is ignored on this OS
3477   pd_commit_memory_or_exit(addr, size, exec, mesg);
3478 }
3479 
3480 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3481   if (bytes == 0) {
3482     // Don't bother the OS with noops.
3483     return true;
3484   }
3485   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3486   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3487   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3488 }
3489 
3490 bool os::pd_release_memory(char* addr, size_t bytes) {
3491   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3492 }
3493 
3494 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3495   return os::commit_memory(addr, size, !ExecMem);
3496 }
3497 
3498 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3499   return os::uncommit_memory(addr, size);
3500 }
3501 
3502 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3503   uint count = 0;
3504   bool ret = false;
3505   size_t bytes_remaining = bytes;
3506   char * next_protect_addr = addr;
3507 
3508   // Use VirtualQuery() to get the chunk size.
3509   while (bytes_remaining) {
3510     MEMORY_BASIC_INFORMATION alloc_info;
3511     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3512       return false;
3513     }
3514 
3515     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3516     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3517     // but we don't distinguish here as both cases are protected by same API.
3518     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3519     warning("Failed protecting pages individually for chunk #%u", count);
3520     if (!ret) {
3521       return false;
3522     }
3523 
3524     bytes_remaining -= bytes_to_protect;
3525     next_protect_addr += bytes_to_protect;
3526     count++;
3527   }
3528   return ret;
3529 }
3530 
3531 // Set protections specified
3532 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3533                         bool is_committed) {
3534   unsigned int p = 0;
3535   switch (prot) {
3536   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3537   case MEM_PROT_READ: p = PAGE_READONLY; break;
3538   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3539   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3540   default:
3541     ShouldNotReachHere();
3542   }
3543 
3544   DWORD old_status;
3545 
3546   // Strange enough, but on Win32 one can change protection only for committed
3547   // memory, not a big deal anyway, as bytes less or equal than 64K
3548   if (!is_committed) {
3549     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3550                           "cannot commit protection page");
3551   }
3552   // One cannot use os::guard_memory() here, as on Win32 guard page
3553   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3554   //
3555   // Pages in the region become guard pages. Any attempt to access a guard page
3556   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3557   // the guard page status. Guard pages thus act as a one-time access alarm.
3558   bool ret;
3559   if (UseNUMAInterleaving) {
3560     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3561     // so we must protect the chunks individually.
3562     ret = protect_pages_individually(addr, bytes, p, &old_status);
3563   } else {
3564     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3565   }
3566 #ifdef ASSERT
3567   if (!ret) {
3568     int err = os::get_last_error();
3569     char buf[256];
3570     size_t buf_len = os::lasterror(buf, sizeof(buf));
3571     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3572           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3573           buf_len != 0 ? buf : "<no_error_string>", err);
3574   }
3575 #endif
3576   return ret;
3577 }
3578 
3579 bool os::guard_memory(char* addr, size_t bytes) {
3580   DWORD old_status;
3581   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3582 }
3583 
3584 bool os::unguard_memory(char* addr, size_t bytes) {
3585   DWORD old_status;
3586   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3587 }
3588 
3589 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3590 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3591 void os::numa_make_global(char *addr, size_t bytes)    { }
3592 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3593 bool os::numa_topology_changed()                       { return false; }
3594 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3595 int os::numa_get_group_id()                            { return 0; }
3596 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3597   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3598     // Provide an answer for UMA systems
3599     ids[0] = 0;
3600     return 1;
3601   } else {
3602     // check for size bigger than actual groups_num
3603     size = MIN2(size, numa_get_groups_num());
3604     for (int i = 0; i < (int)size; i++) {
3605       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3606     }
3607     return size;
3608   }
3609 }
3610 
3611 int os::numa_get_group_id_for_address(const void* address) {
3612   return 0;
3613 }
3614 
3615 bool os::get_page_info(char *start, page_info* info) {
3616   return false;
3617 }
3618 
3619 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3620                      page_info* page_found) {
3621   return end;
3622 }
3623 
3624 char* os::non_memory_address_word() {
3625   // Must never look like an address returned by reserve_memory,
3626   // even in its subfields (as defined by the CPU immediate fields,
3627   // if the CPU splits constants across multiple instructions).
3628   return (char*)-1;
3629 }
3630 
3631 #define MAX_ERROR_COUNT 100
3632 #define SYS_THREAD_ERROR 0xffffffffUL
3633 
3634 void os::pd_start_thread(Thread* thread) {
3635   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3636   // Returns previous suspend state:
3637   // 0:  Thread was not suspended
3638   // 1:  Thread is running now
3639   // >1: Thread is still suspended.
3640   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3641 }
3642 
3643 
3644 // Short sleep, direct OS call.
3645 //
3646 // ms = 0, means allow others (if any) to run.
3647 //
3648 void os::naked_short_sleep(jlong ms) {
3649   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3650   Sleep(ms);
3651 }
3652 
3653 // Windows does not provide sleep functionality with nanosecond resolution, so we
3654 // try to approximate this with spinning combined with yielding if another thread
3655 // is ready to run on the current processor.
3656 void os::naked_short_nanosleep(jlong ns) {
3657   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3658 
3659   int64_t start = os::javaTimeNanos();
3660   do {
3661     if (SwitchToThread() == 0) {
3662       // Nothing else is ready to run on this cpu, spin a little
3663       SpinPause();
3664     }
3665   } while (os::javaTimeNanos() - start < ns);
3666 }
3667 
3668 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3669 void os::infinite_sleep() {
3670   while (true) {    // sleep forever ...
3671     Sleep(100000);  // ... 100 seconds at a time
3672   }
3673 }
3674 
3675 typedef BOOL (WINAPI * STTSignature)(void);
3676 
3677 void os::naked_yield() {
3678   // Consider passing back the return value from SwitchToThread().
3679   SwitchToThread();
3680 }
3681 
3682 // Win32 only gives you access to seven real priorities at a time,
3683 // so we compress Java's ten down to seven.  It would be better
3684 // if we dynamically adjusted relative priorities.
3685 
3686 int os::java_to_os_priority[CriticalPriority + 1] = {
3687   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3688   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3689   THREAD_PRIORITY_LOWEST,                       // 2
3690   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3691   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3692   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3693   THREAD_PRIORITY_NORMAL,                       // 6
3694   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3695   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3696   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3697   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3698   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3699 };
3700 
3701 int prio_policy1[CriticalPriority + 1] = {
3702   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3703   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3704   THREAD_PRIORITY_LOWEST,                       // 2
3705   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3706   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3707   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3708   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3709   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3710   THREAD_PRIORITY_HIGHEST,                      // 8
3711   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3712   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3713   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3714 };
3715 
3716 static int prio_init() {
3717   // If ThreadPriorityPolicy is 1, switch tables
3718   if (ThreadPriorityPolicy == 1) {
3719     int i;
3720     for (i = 0; i < CriticalPriority + 1; i++) {
3721       os::java_to_os_priority[i] = prio_policy1[i];
3722     }
3723   }
3724   if (UseCriticalJavaThreadPriority) {
3725     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3726   }
3727   return 0;
3728 }
3729 
3730 OSReturn os::set_native_priority(Thread* thread, int priority) {
3731   if (!UseThreadPriorities) return OS_OK;
3732   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3733   return ret ? OS_OK : OS_ERR;
3734 }
3735 
3736 OSReturn os::get_native_priority(const Thread* const thread,
3737                                  int* priority_ptr) {
3738   if (!UseThreadPriorities) {
3739     *priority_ptr = java_to_os_priority[NormPriority];
3740     return OS_OK;
3741   }
3742   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3743   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3744     assert(false, "GetThreadPriority failed");
3745     return OS_ERR;
3746   }
3747   *priority_ptr = os_prio;
3748   return OS_OK;
3749 }
3750 
3751 // GetCurrentThreadId() returns DWORD
3752 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3753 
3754 static int _initial_pid = 0;
3755 
3756 int os::current_process_id() {
3757   return (_initial_pid ? _initial_pid : _getpid());
3758 }
3759 
3760 int    os::win32::_vm_page_size              = 0;
3761 int    os::win32::_vm_allocation_granularity = 0;
3762 int    os::win32::_processor_type            = 0;
3763 // Processor level is not available on non-NT systems, use vm_version instead
3764 int    os::win32::_processor_level           = 0;
3765 julong os::win32::_physical_memory           = 0;
3766 size_t os::win32::_default_stack_size        = 0;
3767 
3768 intx          os::win32::_os_thread_limit    = 0;
3769 volatile intx os::win32::_os_thread_count    = 0;
3770 
3771 bool   os::win32::_is_windows_server         = false;
3772 
3773 // 6573254
3774 // Currently, the bug is observed across all the supported Windows releases,
3775 // including the latest one (as of this writing - Windows Server 2012 R2)
3776 bool   os::win32::_has_exit_bug              = true;
3777 
3778 void os::win32::initialize_system_info() {
3779   SYSTEM_INFO si;
3780   GetSystemInfo(&si);
3781   _vm_page_size    = si.dwPageSize;
3782   _vm_allocation_granularity = si.dwAllocationGranularity;
3783   _processor_type  = si.dwProcessorType;
3784   _processor_level = si.wProcessorLevel;
3785   set_processor_count(si.dwNumberOfProcessors);
3786 
3787   MEMORYSTATUSEX ms;
3788   ms.dwLength = sizeof(ms);
3789 
3790   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3791   // dwMemoryLoad (% of memory in use)
3792   GlobalMemoryStatusEx(&ms);
3793   _physical_memory = ms.ullTotalPhys;
3794 
3795   if (FLAG_IS_DEFAULT(MaxRAM)) {
3796     // Adjust MaxRAM according to the maximum virtual address space available.
3797     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3798   }
3799 
3800   OSVERSIONINFOEX oi;
3801   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3802   GetVersionEx((OSVERSIONINFO*)&oi);
3803   switch (oi.dwPlatformId) {
3804   case VER_PLATFORM_WIN32_NT:
3805     {
3806       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3807       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3808           oi.wProductType == VER_NT_SERVER) {
3809         _is_windows_server = true;
3810       }
3811     }
3812     break;
3813   default: fatal("Unknown platform");
3814   }
3815 
3816   _default_stack_size = os::current_stack_size();
3817   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3818   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3819          "stack size not a multiple of page size");
3820 
3821   initialize_performance_counter();
3822 }
3823 
3824 
3825 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3826                                       int ebuflen) {
3827   char path[MAX_PATH];
3828   DWORD size;
3829   DWORD pathLen = (DWORD)sizeof(path);
3830   HINSTANCE result = NULL;
3831 
3832   // only allow library name without path component
3833   assert(strchr(name, '\\') == NULL, "path not allowed");
3834   assert(strchr(name, ':') == NULL, "path not allowed");
3835   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3836     jio_snprintf(ebuf, ebuflen,
3837                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3838     return NULL;
3839   }
3840 
3841   // search system directory
3842   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3843     if (size >= pathLen) {
3844       return NULL; // truncated
3845     }
3846     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3847       return NULL; // truncated
3848     }
3849     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3850       return result;
3851     }
3852   }
3853 
3854   // try Windows directory
3855   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3856     if (size >= pathLen) {
3857       return NULL; // truncated
3858     }
3859     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3860       return NULL; // truncated
3861     }
3862     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3863       return result;
3864     }
3865   }
3866 
3867   jio_snprintf(ebuf, ebuflen,
3868                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3869   return NULL;
3870 }
3871 
3872 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3873 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3874 
3875 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3876   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3877   return TRUE;
3878 }
3879 
3880 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3881   // Basic approach:
3882   //  - Each exiting thread registers its intent to exit and then does so.
3883   //  - A thread trying to terminate the process must wait for all
3884   //    threads currently exiting to complete their exit.
3885 
3886   if (os::win32::has_exit_bug()) {
3887     // The array holds handles of the threads that have started exiting by calling
3888     // _endthreadex().
3889     // Should be large enough to avoid blocking the exiting thread due to lack of
3890     // a free slot.
3891     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3892     static int handle_count = 0;
3893 
3894     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3895     static CRITICAL_SECTION crit_sect;
3896     static volatile DWORD process_exiting = 0;
3897     int i, j;
3898     DWORD res;
3899     HANDLE hproc, hthr;
3900 
3901     // We only attempt to register threads until a process exiting
3902     // thread manages to set the process_exiting flag. Any threads
3903     // that come through here after the process_exiting flag is set
3904     // are unregistered and will be caught in the SuspendThread()
3905     // infinite loop below.
3906     bool registered = false;
3907 
3908     // The first thread that reached this point, initializes the critical section.
3909     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3910       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3911     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3912       if (what != EPT_THREAD) {
3913         // Atomically set process_exiting before the critical section
3914         // to increase the visibility between racing threads.
3915         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3916       }
3917       EnterCriticalSection(&crit_sect);
3918 
3919       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3920         // Remove from the array those handles of the threads that have completed exiting.
3921         for (i = 0, j = 0; i < handle_count; ++i) {
3922           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3923           if (res == WAIT_TIMEOUT) {
3924             handles[j++] = handles[i];
3925           } else {
3926             if (res == WAIT_FAILED) {
3927               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3928                       GetLastError(), __FILE__, __LINE__);
3929             }
3930             // Don't keep the handle, if we failed waiting for it.
3931             CloseHandle(handles[i]);
3932           }
3933         }
3934 
3935         // If there's no free slot in the array of the kept handles, we'll have to
3936         // wait until at least one thread completes exiting.
3937         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3938           // Raise the priority of the oldest exiting thread to increase its chances
3939           // to complete sooner.
3940           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3941           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3942           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3943             i = (res - WAIT_OBJECT_0);
3944             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3945             for (; i < handle_count; ++i) {
3946               handles[i] = handles[i + 1];
3947             }
3948           } else {
3949             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3950                     (res == WAIT_FAILED ? "failed" : "timed out"),
3951                     GetLastError(), __FILE__, __LINE__);
3952             // Don't keep handles, if we failed waiting for them.
3953             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3954               CloseHandle(handles[i]);
3955             }
3956             handle_count = 0;
3957           }
3958         }
3959 
3960         // Store a duplicate of the current thread handle in the array of handles.
3961         hproc = GetCurrentProcess();
3962         hthr = GetCurrentThread();
3963         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3964                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3965           warning("DuplicateHandle failed (%u) in %s: %d\n",
3966                   GetLastError(), __FILE__, __LINE__);
3967 
3968           // We can't register this thread (no more handles) so this thread
3969           // may be racing with a thread that is calling exit(). If the thread
3970           // that is calling exit() has managed to set the process_exiting
3971           // flag, then this thread will be caught in the SuspendThread()
3972           // infinite loop below which closes that race. A small timing
3973           // window remains before the process_exiting flag is set, but it
3974           // is only exposed when we are out of handles.
3975         } else {
3976           ++handle_count;
3977           registered = true;
3978 
3979           // The current exiting thread has stored its handle in the array, and now
3980           // should leave the critical section before calling _endthreadex().
3981         }
3982 
3983       } else if (what != EPT_THREAD && handle_count > 0) {
3984         jlong start_time, finish_time, timeout_left;
3985         // Before ending the process, make sure all the threads that had called
3986         // _endthreadex() completed.
3987 
3988         // Set the priority level of the current thread to the same value as
3989         // the priority level of exiting threads.
3990         // This is to ensure it will be given a fair chance to execute if
3991         // the timeout expires.
3992         hthr = GetCurrentThread();
3993         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3994         start_time = os::javaTimeNanos();
3995         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3996         for (i = 0; ; ) {
3997           int portion_count = handle_count - i;
3998           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3999             portion_count = MAXIMUM_WAIT_OBJECTS;
4000           }
4001           for (j = 0; j < portion_count; ++j) {
4002             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4003           }
4004           timeout_left = (finish_time - start_time) / 1000000L;
4005           if (timeout_left < 0) {
4006             timeout_left = 0;
4007           }
4008           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4009           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4010             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4011                     (res == WAIT_FAILED ? "failed" : "timed out"),
4012                     GetLastError(), __FILE__, __LINE__);
4013             // Reset portion_count so we close the remaining
4014             // handles due to this error.
4015             portion_count = handle_count - i;
4016           }
4017           for (j = 0; j < portion_count; ++j) {
4018             CloseHandle(handles[i + j]);
4019           }
4020           if ((i += portion_count) >= handle_count) {
4021             break;
4022           }
4023           start_time = os::javaTimeNanos();
4024         }
4025         handle_count = 0;
4026       }
4027 
4028       LeaveCriticalSection(&crit_sect);
4029     }
4030 
4031     if (!registered &&
4032         Atomic::load_acquire(&process_exiting) != 0 &&
4033         process_exiting != GetCurrentThreadId()) {
4034       // Some other thread is about to call exit(), so we don't let
4035       // the current unregistered thread proceed to exit() or _endthreadex()
4036       while (true) {
4037         SuspendThread(GetCurrentThread());
4038         // Avoid busy-wait loop, if SuspendThread() failed.
4039         Sleep(EXIT_TIMEOUT);
4040       }
4041     }
4042   }
4043 
4044   // We are here if either
4045   // - there's no 'race at exit' bug on this OS release;
4046   // - initialization of the critical section failed (unlikely);
4047   // - the current thread has registered itself and left the critical section;
4048   // - the process-exiting thread has raised the flag and left the critical section.
4049   if (what == EPT_THREAD) {
4050     _endthreadex((unsigned)exit_code);
4051   } else if (what == EPT_PROCESS) {
4052     ::exit(exit_code);
4053   } else {
4054     _exit(exit_code);
4055   }
4056 
4057   // Should not reach here
4058   return exit_code;
4059 }
4060 
4061 #undef EXIT_TIMEOUT
4062 
4063 void os::win32::setmode_streams() {
4064   _setmode(_fileno(stdin), _O_BINARY);
4065   _setmode(_fileno(stdout), _O_BINARY);
4066   _setmode(_fileno(stderr), _O_BINARY);
4067 }
4068 
4069 void os::wait_for_keypress_at_exit(void) {
4070   if (PauseAtExit) {
4071     fprintf(stderr, "Press any key to continue...\n");
4072     fgetc(stdin);
4073   }
4074 }
4075 
4076 
4077 bool os::message_box(const char* title, const char* message) {
4078   int result = MessageBox(NULL, message, title,
4079                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4080   return result == IDYES;
4081 }
4082 
4083 #ifndef PRODUCT
4084 #ifndef _WIN64
4085 // Helpers to check whether NX protection is enabled
4086 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4087   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4088       pex->ExceptionRecord->NumberParameters > 0 &&
4089       pex->ExceptionRecord->ExceptionInformation[0] ==
4090       EXCEPTION_INFO_EXEC_VIOLATION) {
4091     return EXCEPTION_EXECUTE_HANDLER;
4092   }
4093   return EXCEPTION_CONTINUE_SEARCH;
4094 }
4095 
4096 void nx_check_protection() {
4097   // If NX is enabled we'll get an exception calling into code on the stack
4098   char code[] = { (char)0xC3 }; // ret
4099   void *code_ptr = (void *)code;
4100   __try {
4101     __asm call code_ptr
4102   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4103     tty->print_raw_cr("NX protection detected.");
4104   }
4105 }
4106 #endif // _WIN64
4107 #endif // PRODUCT
4108 
4109 // This is called _before_ the global arguments have been parsed
4110 void os::init(void) {
4111   _initial_pid = _getpid();
4112 
4113   init_random(1234567);
4114 
4115   win32::initialize_system_info();
4116   win32::setmode_streams();
4117   init_page_sizes((size_t) win32::vm_page_size());
4118 
4119   // This may be overridden later when argument processing is done.
4120   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4121 
4122   // Initialize main_process and main_thread
4123   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4124   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4125                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4126     fatal("DuplicateHandle failed\n");
4127   }
4128   main_thread_id = (int) GetCurrentThreadId();
4129 
4130   // initialize fast thread access - only used for 32-bit
4131   win32::initialize_thread_ptr_offset();
4132 }
4133 
4134 // To install functions for atexit processing
4135 extern "C" {
4136   static void perfMemory_exit_helper() {
4137     perfMemory_exit();
4138   }
4139 }
4140 
4141 static jint initSock();
4142 
4143 // this is called _after_ the global arguments have been parsed
4144 jint os::init_2(void) {
4145 
4146   // This could be set any time but all platforms
4147   // have to set it the same so we have to mirror Solaris.
4148   DEBUG_ONLY(os::set_mutex_init_done();)
4149 
4150   // Setup Windows Exceptions
4151 
4152 #if INCLUDE_AOT
4153   // If AOT is enabled we need to install a vectored exception handler
4154   // in order to forward implicit exceptions from code in AOT
4155   // generated DLLs.  This is necessary since these DLLs are not
4156   // registered for structured exceptions like codecache methods are.
4157   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4158     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4159   }
4160 #endif
4161 
4162   // for debugging float code generation bugs
4163   if (ForceFloatExceptions) {
4164 #ifndef  _WIN64
4165     static long fp_control_word = 0;
4166     __asm { fstcw fp_control_word }
4167     // see Intel PPro Manual, Vol. 2, p 7-16
4168     const long precision = 0x20;
4169     const long underflow = 0x10;
4170     const long overflow  = 0x08;
4171     const long zero_div  = 0x04;
4172     const long denorm    = 0x02;
4173     const long invalid   = 0x01;
4174     fp_control_word |= invalid;
4175     __asm { fldcw fp_control_word }
4176 #endif
4177   }
4178 
4179   // If stack_commit_size is 0, windows will reserve the default size,
4180   // but only commit a small portion of it.
4181   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4182   size_t default_reserve_size = os::win32::default_stack_size();
4183   size_t actual_reserve_size = stack_commit_size;
4184   if (stack_commit_size < default_reserve_size) {
4185     // If stack_commit_size == 0, we want this too
4186     actual_reserve_size = default_reserve_size;
4187   }
4188 
4189   // Check minimum allowable stack size for thread creation and to initialize
4190   // the java system classes, including StackOverflowError - depends on page
4191   // size.  Add two 4K pages for compiler2 recursion in main thread.
4192   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4193   // class initialization depending on 32 or 64 bit VM.
4194   size_t min_stack_allowed =
4195             (size_t)(JavaThread::stack_guard_zone_size() +
4196                      JavaThread::stack_shadow_zone_size() +
4197                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4198 
4199   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4200 
4201   if (actual_reserve_size < min_stack_allowed) {
4202     tty->print_cr("\nThe Java thread stack size specified is too small. "
4203                   "Specify at least %dk",
4204                   min_stack_allowed / K);
4205     return JNI_ERR;
4206   }
4207 
4208   JavaThread::set_stack_size_at_create(stack_commit_size);
4209 
4210   // Calculate theoretical max. size of Threads to guard gainst artifical
4211   // out-of-memory situations, where all available address-space has been
4212   // reserved by thread stacks.
4213   assert(actual_reserve_size != 0, "Must have a stack");
4214 
4215   // Calculate the thread limit when we should start doing Virtual Memory
4216   // banging. Currently when the threads will have used all but 200Mb of space.
4217   //
4218   // TODO: consider performing a similar calculation for commit size instead
4219   // as reserve size, since on a 64-bit platform we'll run into that more
4220   // often than running out of virtual memory space.  We can use the
4221   // lower value of the two calculations as the os_thread_limit.
4222   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4223   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4224 
4225   // at exit methods are called in the reverse order of their registration.
4226   // there is no limit to the number of functions registered. atexit does
4227   // not set errno.
4228 
4229   if (PerfAllowAtExitRegistration) {
4230     // only register atexit functions if PerfAllowAtExitRegistration is set.
4231     // atexit functions can be delayed until process exit time, which
4232     // can be problematic for embedded VM situations. Embedded VMs should
4233     // call DestroyJavaVM() to assure that VM resources are released.
4234 
4235     // note: perfMemory_exit_helper atexit function may be removed in
4236     // the future if the appropriate cleanup code can be added to the
4237     // VM_Exit VMOperation's doit method.
4238     if (atexit(perfMemory_exit_helper) != 0) {
4239       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4240     }
4241   }
4242 
4243 #ifndef _WIN64
4244   // Print something if NX is enabled (win32 on AMD64)
4245   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4246 #endif
4247 
4248   // initialize thread priority policy
4249   prio_init();
4250 
4251   if (UseNUMA && !ForceNUMA) {
4252     UseNUMA = false; // We don't fully support this yet
4253   }
4254 
4255   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4256     if (!numa_interleaving_init()) {
4257       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4258     } else if (!UseNUMAInterleaving) {
4259       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4260       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4261     }
4262   }
4263 
4264   if (initSock() != JNI_OK) {
4265     return JNI_ERR;
4266   }
4267 
4268   SymbolEngine::recalc_search_path();
4269 
4270   // Initialize data for jdk.internal.misc.Signal
4271   if (!ReduceSignalUsage) {
4272     jdk_misc_signal_init();
4273   }
4274 
4275   return JNI_OK;
4276 }
4277 
4278 // combine the high and low DWORD into a ULONGLONG
4279 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4280   ULONGLONG value = high_word;
4281   value <<= sizeof(high_word) * 8;
4282   value |= low_word;
4283   return value;
4284 }
4285 
4286 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4287 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4288   ::memset((void*)sbuf, 0, sizeof(struct stat));
4289   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4290   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4291                                   file_data.ftLastWriteTime.dwLowDateTime);
4292   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4293                                   file_data.ftCreationTime.dwLowDateTime);
4294   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4295                                   file_data.ftLastAccessTime.dwLowDateTime);
4296   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4297     sbuf->st_mode |= S_IFDIR;
4298   } else {
4299     sbuf->st_mode |= S_IFREG;
4300   }
4301 }
4302 
4303 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4304   // Get required buffer size to convert to Unicode
4305   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4306                                              MB_ERR_INVALID_CHARS,
4307                                              char_path, -1,
4308                                              NULL, 0);
4309   if (unicode_path_len == 0) {
4310     return EINVAL;
4311   }
4312 
4313   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4314 
4315   int result = MultiByteToWideChar(CP_ACP,
4316                                    MB_ERR_INVALID_CHARS,
4317                                    char_path, -1,
4318                                    *unicode_path, unicode_path_len);
4319   assert(result == unicode_path_len, "length already checked above");
4320 
4321   return ERROR_SUCCESS;
4322 }
4323 
4324 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4325   // Get required buffer size to convert to full path. The return
4326   // value INCLUDES the terminating null character.
4327   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4328   if (full_path_len == 0) {
4329     return EINVAL;
4330   }
4331 
4332   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4333 
4334   // When the buffer has sufficient size, the return value EXCLUDES the
4335   // terminating null character
4336   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4337   assert(result <= full_path_len, "length already checked above");
4338 
4339   return ERROR_SUCCESS;
4340 }
4341 
4342 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4343   *prefix_off = 0;
4344   *needs_fullpath = true;
4345 
4346   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4347     *prefix = L"\\\\?\\";
4348   } else if (buf[0] == '\\' && buf[1] == '\\') {
4349     if (buf[2] == '?' && buf[3] == '\\') {
4350       *prefix = L"";
4351       *needs_fullpath = false;
4352     } else {
4353       *prefix = L"\\\\?\\UNC";
4354       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4355     }
4356   } else {
4357     *prefix = L"\\\\?\\";
4358   }
4359 }
4360 
4361 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4362 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4363 // additional_space is the size of space, in wchar_t, the function will additionally add to
4364 // the allocation of return buffer (such that the size of the returned buffer is at least
4365 // wcslen(buf) + 1 + additional_space).
4366 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4367   if ((path == NULL) || (path[0] == '\0')) {
4368     err = ENOENT;
4369     return NULL;
4370   }
4371 
4372   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4373   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4374   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4375   strncpy(buf, path, buf_len);
4376   os::native_path(buf);
4377 
4378   LPWSTR prefix = NULL;
4379   int prefix_off = 0;
4380   bool needs_fullpath = true;
4381   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4382 
4383   LPWSTR unicode_path = NULL;
4384   err = convert_to_unicode(buf, &unicode_path);
4385   FREE_C_HEAP_ARRAY(char, buf);
4386   if (err != ERROR_SUCCESS) {
4387     return NULL;
4388   }
4389 
4390   LPWSTR converted_path = NULL;
4391   if (needs_fullpath) {
4392     err = get_full_path(unicode_path, &converted_path);
4393   } else {
4394     converted_path = unicode_path;
4395   }
4396 
4397   LPWSTR result = NULL;
4398   if (converted_path != NULL) {
4399     size_t prefix_len = wcslen(prefix);
4400     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4401     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4402     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4403 
4404     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4405     result_len = wcslen(result);
4406     if ((result[result_len - 1] == L'\\') &&
4407         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4408       result[result_len - 1] = L'\0';
4409     }
4410   }
4411 
4412   if (converted_path != unicode_path) {
4413     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4414   }
4415   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4416 
4417   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4418 }
4419 
4420 int os::stat(const char *path, struct stat *sbuf) {
4421   errno_t err;
4422   wchar_t* wide_path = wide_abs_unc_path(path, err);
4423 
4424   if (wide_path == NULL) {
4425     errno = err;
4426     return -1;
4427   }
4428 
4429   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4430   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4431   os::free(wide_path);
4432 
4433   if (!bret) {
4434     errno = ::GetLastError();
4435     return -1;
4436   }
4437 
4438   file_attribute_data_to_stat(sbuf, file_data);
4439   return 0;
4440 }
4441 
4442 static HANDLE create_read_only_file_handle(const char* file) {
4443   errno_t err;
4444   wchar_t* wide_path = wide_abs_unc_path(file, err);
4445 
4446   if (wide_path == NULL) {
4447     errno = err;
4448     return INVALID_HANDLE_VALUE;
4449   }
4450 
4451   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4452                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4453   os::free(wide_path);
4454 
4455   return handle;
4456 }
4457 
4458 bool os::same_files(const char* file1, const char* file2) {
4459 
4460   if (file1 == NULL && file2 == NULL) {
4461     return true;
4462   }
4463 
4464   if (file1 == NULL || file2 == NULL) {
4465     return false;
4466   }
4467 
4468   if (strcmp(file1, file2) == 0) {
4469     return true;
4470   }
4471 
4472   HANDLE handle1 = create_read_only_file_handle(file1);
4473   HANDLE handle2 = create_read_only_file_handle(file2);
4474   bool result = false;
4475 
4476   // if we could open both paths...
4477   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4478     BY_HANDLE_FILE_INFORMATION fileInfo1;
4479     BY_HANDLE_FILE_INFORMATION fileInfo2;
4480     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4481       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4482       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4483       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4484         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4485         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4486         result = true;
4487       }
4488     }
4489   }
4490 
4491   //free the handles
4492   if (handle1 != INVALID_HANDLE_VALUE) {
4493     ::CloseHandle(handle1);
4494   }
4495 
4496   if (handle2 != INVALID_HANDLE_VALUE) {
4497     ::CloseHandle(handle2);
4498   }
4499 
4500   return result;
4501 }
4502 
4503 #define FT2INT64(ft) \
4504   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4505 
4506 
4507 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4508 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4509 // of a thread.
4510 //
4511 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4512 // the fast estimate available on the platform.
4513 
4514 // current_thread_cpu_time() is not optimized for Windows yet
4515 jlong os::current_thread_cpu_time() {
4516   // return user + sys since the cost is the same
4517   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4518 }
4519 
4520 jlong os::thread_cpu_time(Thread* thread) {
4521   // consistent with what current_thread_cpu_time() returns.
4522   return os::thread_cpu_time(thread, true /* user+sys */);
4523 }
4524 
4525 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4526   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4527 }
4528 
4529 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4530   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4531   // If this function changes, os::is_thread_cpu_time_supported() should too
4532   FILETIME CreationTime;
4533   FILETIME ExitTime;
4534   FILETIME KernelTime;
4535   FILETIME UserTime;
4536 
4537   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4538                       &ExitTime, &KernelTime, &UserTime) == 0) {
4539     return -1;
4540   } else if (user_sys_cpu_time) {
4541     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4542   } else {
4543     return FT2INT64(UserTime) * 100;
4544   }
4545 }
4546 
4547 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4548   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4549   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4550   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4551   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4552 }
4553 
4554 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4555   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4556   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4557   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4558   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4559 }
4560 
4561 bool os::is_thread_cpu_time_supported() {
4562   // see os::thread_cpu_time
4563   FILETIME CreationTime;
4564   FILETIME ExitTime;
4565   FILETIME KernelTime;
4566   FILETIME UserTime;
4567 
4568   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4569                       &KernelTime, &UserTime) == 0) {
4570     return false;
4571   } else {
4572     return true;
4573   }
4574 }
4575 
4576 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4577 // It does have primitives (PDH API) to get CPU usage and run queue length.
4578 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4579 // If we wanted to implement loadavg on Windows, we have a few options:
4580 //
4581 // a) Query CPU usage and run queue length and "fake" an answer by
4582 //    returning the CPU usage if it's under 100%, and the run queue
4583 //    length otherwise.  It turns out that querying is pretty slow
4584 //    on Windows, on the order of 200 microseconds on a fast machine.
4585 //    Note that on the Windows the CPU usage value is the % usage
4586 //    since the last time the API was called (and the first call
4587 //    returns 100%), so we'd have to deal with that as well.
4588 //
4589 // b) Sample the "fake" answer using a sampling thread and store
4590 //    the answer in a global variable.  The call to loadavg would
4591 //    just return the value of the global, avoiding the slow query.
4592 //
4593 // c) Sample a better answer using exponential decay to smooth the
4594 //    value.  This is basically the algorithm used by UNIX kernels.
4595 //
4596 // Note that sampling thread starvation could affect both (b) and (c).
4597 int os::loadavg(double loadavg[], int nelem) {
4598   return -1;
4599 }
4600 
4601 
4602 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4603 bool os::dont_yield() {
4604   return DontYieldALot;
4605 }
4606 
4607 int os::open(const char *path, int oflag, int mode) {
4608   errno_t err;
4609   wchar_t* wide_path = wide_abs_unc_path(path, err);
4610 
4611   if (wide_path == NULL) {
4612     errno = err;
4613     return -1;
4614   }
4615   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4616   os::free(wide_path);
4617 
4618   if (fd == -1) {
4619     errno = ::GetLastError();
4620   }
4621 
4622   return fd;
4623 }
4624 
4625 FILE* os::open(int fd, const char* mode) {
4626   return ::_fdopen(fd, mode);
4627 }
4628 
4629 // Is a (classpath) directory empty?
4630 bool os::dir_is_empty(const char* path) {
4631   errno_t err;
4632   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4633 
4634   if (wide_path == NULL) {
4635     errno = err;
4636     return false;
4637   }
4638 
4639   // Make sure we end with "\\*"
4640   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4641     wcscat(wide_path, L"*");
4642   } else {
4643     wcscat(wide_path, L"\\*");
4644   }
4645 
4646   WIN32_FIND_DATAW fd;
4647   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4648   os::free(wide_path);
4649   bool is_empty = true;
4650 
4651   if (f != INVALID_HANDLE_VALUE) {
4652     while (is_empty && ::FindNextFileW(f, &fd)) {
4653       // An empty directory contains only the current directory file
4654       // and the previous directory file.
4655       if ((wcscmp(fd.cFileName, L".") != 0) &&
4656           (wcscmp(fd.cFileName, L"..") != 0)) {
4657         is_empty = false;
4658       }
4659     }
4660     FindClose(f);
4661   } else {
4662     errno = ::GetLastError();
4663   }
4664 
4665   return is_empty;
4666 }
4667 
4668 // create binary file, rewriting existing file if required
4669 int os::create_binary_file(const char* path, bool rewrite_existing) {
4670   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4671   if (!rewrite_existing) {
4672     oflags |= _O_EXCL;
4673   }
4674   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4675 }
4676 
4677 // return current position of file pointer
4678 jlong os::current_file_offset(int fd) {
4679   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4680 }
4681 
4682 // move file pointer to the specified offset
4683 jlong os::seek_to_file_offset(int fd, jlong offset) {
4684   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4685 }
4686 
4687 
4688 jlong os::lseek(int fd, jlong offset, int whence) {
4689   return (jlong) ::_lseeki64(fd, offset, whence);
4690 }
4691 
4692 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4693   OVERLAPPED ov;
4694   DWORD nread;
4695   BOOL result;
4696 
4697   ZeroMemory(&ov, sizeof(ov));
4698   ov.Offset = (DWORD)offset;
4699   ov.OffsetHigh = (DWORD)(offset >> 32);
4700 
4701   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4702 
4703   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4704 
4705   return result ? nread : 0;
4706 }
4707 
4708 
4709 // This method is a slightly reworked copy of JDK's sysNativePath
4710 // from src/windows/hpi/src/path_md.c
4711 
4712 // Convert a pathname to native format.  On win32, this involves forcing all
4713 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4714 // sometimes rejects '/') and removing redundant separators.  The input path is
4715 // assumed to have been converted into the character encoding used by the local
4716 // system.  Because this might be a double-byte encoding, care is taken to
4717 // treat double-byte lead characters correctly.
4718 //
4719 // This procedure modifies the given path in place, as the result is never
4720 // longer than the original.  There is no error return; this operation always
4721 // succeeds.
4722 char * os::native_path(char *path) {
4723   char *src = path, *dst = path, *end = path;
4724   char *colon = NULL;  // If a drive specifier is found, this will
4725                        // point to the colon following the drive letter
4726 
4727   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4728   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4729           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4730 
4731   // Check for leading separators
4732 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4733   while (isfilesep(*src)) {
4734     src++;
4735   }
4736 
4737   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4738     // Remove leading separators if followed by drive specifier.  This
4739     // hack is necessary to support file URLs containing drive
4740     // specifiers (e.g., "file://c:/path").  As a side effect,
4741     // "/c:/path" can be used as an alternative to "c:/path".
4742     *dst++ = *src++;
4743     colon = dst;
4744     *dst++ = ':';
4745     src++;
4746   } else {
4747     src = path;
4748     if (isfilesep(src[0]) && isfilesep(src[1])) {
4749       // UNC pathname: Retain first separator; leave src pointed at
4750       // second separator so that further separators will be collapsed
4751       // into the second separator.  The result will be a pathname
4752       // beginning with "\\\\" followed (most likely) by a host name.
4753       src = dst = path + 1;
4754       path[0] = '\\';     // Force first separator to '\\'
4755     }
4756   }
4757 
4758   end = dst;
4759 
4760   // Remove redundant separators from remainder of path, forcing all
4761   // separators to be '\\' rather than '/'. Also, single byte space
4762   // characters are removed from the end of the path because those
4763   // are not legal ending characters on this operating system.
4764   //
4765   while (*src != '\0') {
4766     if (isfilesep(*src)) {
4767       *dst++ = '\\'; src++;
4768       while (isfilesep(*src)) src++;
4769       if (*src == '\0') {
4770         // Check for trailing separator
4771         end = dst;
4772         if (colon == dst - 2) break;  // "z:\\"
4773         if (dst == path + 1) break;   // "\\"
4774         if (dst == path + 2 && isfilesep(path[0])) {
4775           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4776           // beginning of a UNC pathname.  Even though it is not, by
4777           // itself, a valid UNC pathname, we leave it as is in order
4778           // to be consistent with the path canonicalizer as well
4779           // as the win32 APIs, which treat this case as an invalid
4780           // UNC pathname rather than as an alias for the root
4781           // directory of the current drive.
4782           break;
4783         }
4784         end = --dst;  // Path does not denote a root directory, so
4785                       // remove trailing separator
4786         break;
4787       }
4788       end = dst;
4789     } else {
4790       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4791         *dst++ = *src++;
4792         if (*src) *dst++ = *src++;
4793         end = dst;
4794       } else {  // Copy a single-byte character
4795         char c = *src++;
4796         *dst++ = c;
4797         // Space is not a legal ending character
4798         if (c != ' ') end = dst;
4799       }
4800     }
4801   }
4802 
4803   *end = '\0';
4804 
4805   // For "z:", add "." to work around a bug in the C runtime library
4806   if (colon == dst - 1) {
4807     path[2] = '.';
4808     path[3] = '\0';
4809   }
4810 
4811   return path;
4812 }
4813 
4814 // This code is a copy of JDK's sysSetLength
4815 // from src/windows/hpi/src/sys_api_md.c
4816 
4817 int os::ftruncate(int fd, jlong length) {
4818   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4819   long high = (long)(length >> 32);
4820   DWORD ret;
4821 
4822   if (h == (HANDLE)(-1)) {
4823     return -1;
4824   }
4825 
4826   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4827   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4828     return -1;
4829   }
4830 
4831   if (::SetEndOfFile(h) == FALSE) {
4832     return -1;
4833   }
4834 
4835   return 0;
4836 }
4837 
4838 int os::get_fileno(FILE* fp) {
4839   return _fileno(fp);
4840 }
4841 
4842 // This code is a copy of JDK's sysSync
4843 // from src/windows/hpi/src/sys_api_md.c
4844 // except for the legacy workaround for a bug in Win 98
4845 
4846 int os::fsync(int fd) {
4847   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4848 
4849   if ((!::FlushFileBuffers(handle)) &&
4850       (GetLastError() != ERROR_ACCESS_DENIED)) {
4851     // from winerror.h
4852     return -1;
4853   }
4854   return 0;
4855 }
4856 
4857 static int nonSeekAvailable(int, long *);
4858 static int stdinAvailable(int, long *);
4859 
4860 // This code is a copy of JDK's sysAvailable
4861 // from src/windows/hpi/src/sys_api_md.c
4862 
4863 int os::available(int fd, jlong *bytes) {
4864   jlong cur, end;
4865   struct _stati64 stbuf64;
4866 
4867   if (::_fstati64(fd, &stbuf64) >= 0) {
4868     int mode = stbuf64.st_mode;
4869     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4870       int ret;
4871       long lpbytes;
4872       if (fd == 0) {
4873         ret = stdinAvailable(fd, &lpbytes);
4874       } else {
4875         ret = nonSeekAvailable(fd, &lpbytes);
4876       }
4877       (*bytes) = (jlong)(lpbytes);
4878       return ret;
4879     }
4880     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4881       return FALSE;
4882     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4883       return FALSE;
4884     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4885       return FALSE;
4886     }
4887     *bytes = end - cur;
4888     return TRUE;
4889   } else {
4890     return FALSE;
4891   }
4892 }
4893 
4894 void os::flockfile(FILE* fp) {
4895   _lock_file(fp);
4896 }
4897 
4898 void os::funlockfile(FILE* fp) {
4899   _unlock_file(fp);
4900 }
4901 
4902 // This code is a copy of JDK's nonSeekAvailable
4903 // from src/windows/hpi/src/sys_api_md.c
4904 
4905 static int nonSeekAvailable(int fd, long *pbytes) {
4906   // This is used for available on non-seekable devices
4907   // (like both named and anonymous pipes, such as pipes
4908   //  connected to an exec'd process).
4909   // Standard Input is a special case.
4910   HANDLE han;
4911 
4912   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4913     return FALSE;
4914   }
4915 
4916   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4917     // PeekNamedPipe fails when at EOF.  In that case we
4918     // simply make *pbytes = 0 which is consistent with the
4919     // behavior we get on Solaris when an fd is at EOF.
4920     // The only alternative is to raise an Exception,
4921     // which isn't really warranted.
4922     //
4923     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4924       return FALSE;
4925     }
4926     *pbytes = 0;
4927   }
4928   return TRUE;
4929 }
4930 
4931 #define MAX_INPUT_EVENTS 2000
4932 
4933 // This code is a copy of JDK's stdinAvailable
4934 // from src/windows/hpi/src/sys_api_md.c
4935 
4936 static int stdinAvailable(int fd, long *pbytes) {
4937   HANDLE han;
4938   DWORD numEventsRead = 0;  // Number of events read from buffer
4939   DWORD numEvents = 0;      // Number of events in buffer
4940   DWORD i = 0;              // Loop index
4941   DWORD curLength = 0;      // Position marker
4942   DWORD actualLength = 0;   // Number of bytes readable
4943   BOOL error = FALSE;       // Error holder
4944   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4945 
4946   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4947     return FALSE;
4948   }
4949 
4950   // Construct an array of input records in the console buffer
4951   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4952   if (error == 0) {
4953     return nonSeekAvailable(fd, pbytes);
4954   }
4955 
4956   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4957   if (numEvents > MAX_INPUT_EVENTS) {
4958     numEvents = MAX_INPUT_EVENTS;
4959   }
4960 
4961   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4962   if (lpBuffer == NULL) {
4963     return FALSE;
4964   }
4965 
4966   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4967   if (error == 0) {
4968     os::free(lpBuffer);
4969     return FALSE;
4970   }
4971 
4972   // Examine input records for the number of bytes available
4973   for (i=0; i<numEvents; i++) {
4974     if (lpBuffer[i].EventType == KEY_EVENT) {
4975 
4976       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4977                                       &(lpBuffer[i].Event);
4978       if (keyRecord->bKeyDown == TRUE) {
4979         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4980         curLength++;
4981         if (*keyPressed == '\r') {
4982           actualLength = curLength;
4983         }
4984       }
4985     }
4986   }
4987 
4988   if (lpBuffer != NULL) {
4989     os::free(lpBuffer);
4990   }
4991 
4992   *pbytes = (long) actualLength;
4993   return TRUE;
4994 }
4995 
4996 // Map a block of memory.
4997 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4998                         char *addr, size_t bytes, bool read_only,
4999                         bool allow_exec) {
5000   HANDLE hFile;
5001   char* base;
5002 
5003   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
5004                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5005   if (hFile == NULL) {
5006     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
5007     return NULL;
5008   }
5009 
5010   if (allow_exec) {
5011     // CreateFileMapping/MapViewOfFileEx can't map executable memory
5012     // unless it comes from a PE image (which the shared archive is not.)
5013     // Even VirtualProtect refuses to give execute access to mapped memory
5014     // that was not previously executable.
5015     //
5016     // Instead, stick the executable region in anonymous memory.  Yuck.
5017     // Penalty is that ~4 pages will not be shareable - in the future
5018     // we might consider DLLizing the shared archive with a proper PE
5019     // header so that mapping executable + sharing is possible.
5020 
5021     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5022                                 PAGE_READWRITE);
5023     if (base == NULL) {
5024       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
5025       CloseHandle(hFile);
5026       return NULL;
5027     }
5028 
5029     // Record virtual memory allocation
5030     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5031 
5032     DWORD bytes_read;
5033     OVERLAPPED overlapped;
5034     overlapped.Offset = (DWORD)file_offset;
5035     overlapped.OffsetHigh = 0;
5036     overlapped.hEvent = NULL;
5037     // ReadFile guarantees that if the return value is true, the requested
5038     // number of bytes were read before returning.
5039     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5040     if (!res) {
5041       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5042       release_memory(base, bytes);
5043       CloseHandle(hFile);
5044       return NULL;
5045     }
5046   } else {
5047     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5048                                     NULL /* file_name */);
5049     if (hMap == NULL) {
5050       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5051       CloseHandle(hFile);
5052       return NULL;
5053     }
5054 
5055     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5056     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5057                                   (DWORD)bytes, addr);
5058     if (base == NULL) {
5059       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5060       CloseHandle(hMap);
5061       CloseHandle(hFile);
5062       return NULL;
5063     }
5064 
5065     if (CloseHandle(hMap) == 0) {
5066       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5067       CloseHandle(hFile);
5068       return base;
5069     }
5070   }
5071 
5072   if (allow_exec) {
5073     DWORD old_protect;
5074     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5075     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5076 
5077     if (!res) {
5078       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5079       // Don't consider this a hard error, on IA32 even if the
5080       // VirtualProtect fails, we should still be able to execute
5081       CloseHandle(hFile);
5082       return base;
5083     }
5084   }
5085 
5086   if (CloseHandle(hFile) == 0) {
5087     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5088     return base;
5089   }
5090 
5091   return base;
5092 }
5093 
5094 
5095 // Remap a block of memory.
5096 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5097                           char *addr, size_t bytes, bool read_only,
5098                           bool allow_exec) {
5099   // This OS does not allow existing memory maps to be remapped so we
5100   // would have to unmap the memory before we remap it.
5101 
5102   // Because there is a small window between unmapping memory and mapping
5103   // it in again with different protections, CDS archives are mapped RW
5104   // on windows, so this function isn't called.
5105   ShouldNotReachHere();
5106   return NULL;
5107 }
5108 
5109 
5110 // Unmap a block of memory.
5111 // Returns true=success, otherwise false.
5112 
5113 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5114   MEMORY_BASIC_INFORMATION mem_info;
5115   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5116     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5117     return false;
5118   }
5119 
5120   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5121   // Instead, executable region was allocated using VirtualAlloc(). See
5122   // pd_map_memory() above.
5123   //
5124   // The following flags should match the 'exec_access' flages used for
5125   // VirtualProtect() in pd_map_memory().
5126   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5127       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5128     return pd_release_memory(addr, bytes);
5129   }
5130 
5131   BOOL result = UnmapViewOfFile(addr);
5132   if (result == 0) {
5133     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5134     return false;
5135   }
5136   return true;
5137 }
5138 
5139 void os::pause() {
5140   char filename[MAX_PATH];
5141   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5142     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5143   } else {
5144     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5145   }
5146 
5147   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5148   if (fd != -1) {
5149     struct stat buf;
5150     ::close(fd);
5151     while (::stat(filename, &buf) == 0) {
5152       Sleep(100);
5153     }
5154   } else {
5155     jio_fprintf(stderr,
5156                 "Could not open pause file '%s', continuing immediately.\n", filename);
5157   }
5158 }
5159 
5160 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5161 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5162 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5163 
5164 os::ThreadCrashProtection::ThreadCrashProtection() {
5165 }
5166 
5167 // See the caveats for this class in os_windows.hpp
5168 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5169 // into this method and returns false. If no OS EXCEPTION was raised, returns
5170 // true.
5171 // The callback is supposed to provide the method that should be protected.
5172 //
5173 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5174 
5175   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5176 
5177   _protected_thread = Thread::current_or_null();
5178   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5179 
5180   bool success = true;
5181   __try {
5182     _crash_protection = this;
5183     cb.call();
5184   } __except(EXCEPTION_EXECUTE_HANDLER) {
5185     // only for protection, nothing to do
5186     success = false;
5187   }
5188   _crash_protection = NULL;
5189   _protected_thread = NULL;
5190   Thread::muxRelease(&_crash_mux);
5191   return success;
5192 }
5193 
5194 
5195 class HighResolutionInterval : public CHeapObj<mtThread> {
5196   // The default timer resolution seems to be 10 milliseconds.
5197   // (Where is this written down?)
5198   // If someone wants to sleep for only a fraction of the default,
5199   // then we set the timer resolution down to 1 millisecond for
5200   // the duration of their interval.
5201   // We carefully set the resolution back, since otherwise we
5202   // seem to incur an overhead (3%?) that we don't need.
5203   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5204   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5205   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5206   // timeBeginPeriod() if the relative error exceeded some threshold.
5207   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5208   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5209   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5210   // resolution timers running.
5211  private:
5212   jlong resolution;
5213  public:
5214   HighResolutionInterval(jlong ms) {
5215     resolution = ms % 10L;
5216     if (resolution != 0) {
5217       MMRESULT result = timeBeginPeriod(1L);
5218     }
5219   }
5220   ~HighResolutionInterval() {
5221     if (resolution != 0) {
5222       MMRESULT result = timeEndPeriod(1L);
5223     }
5224     resolution = 0L;
5225   }
5226 };
5227 
5228 // An Event wraps a win32 "CreateEvent" kernel handle.
5229 //
5230 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5231 //
5232 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5233 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5234 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5235 //     In addition, an unpark() operation might fetch the handle field, but the
5236 //     event could recycle between the fetch and the SetEvent() operation.
5237 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5238 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5239 //     on an stale but recycled handle would be harmless, but in practice this might
5240 //     confuse other non-Sun code, so it's not a viable approach.
5241 //
5242 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5243 //     with the Event.  The event handle is never closed.  This could be construed
5244 //     as handle leakage, but only up to the maximum # of threads that have been extant
5245 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5246 //     permit a process to have hundreds of thousands of open handles.
5247 //
5248 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5249 //     and release unused handles.
5250 //
5251 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5252 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5253 //
5254 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5255 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5256 //
5257 // We use (2).
5258 //
5259 // TODO-FIXME:
5260 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5261 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5262 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5263 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5264 //     into a single win32 CreateEvent() handle.
5265 //
5266 // Assumption:
5267 //    Only one parker can exist on an event, which is why we allocate
5268 //    them per-thread. Multiple unparkers can coexist.
5269 //
5270 // _Event transitions in park()
5271 //   -1 => -1 : illegal
5272 //    1 =>  0 : pass - return immediately
5273 //    0 => -1 : block; then set _Event to 0 before returning
5274 //
5275 // _Event transitions in unpark()
5276 //    0 => 1 : just return
5277 //    1 => 1 : just return
5278 //   -1 => either 0 or 1; must signal target thread
5279 //         That is, we can safely transition _Event from -1 to either
5280 //         0 or 1.
5281 //
5282 // _Event serves as a restricted-range semaphore.
5283 //   -1 : thread is blocked, i.e. there is a waiter
5284 //    0 : neutral: thread is running or ready,
5285 //        could have been signaled after a wait started
5286 //    1 : signaled - thread is running or ready
5287 //
5288 // Another possible encoding of _Event would be with
5289 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5290 //
5291 
5292 int os::PlatformEvent::park(jlong Millis) {
5293   // Transitions for _Event:
5294   //   -1 => -1 : illegal
5295   //    1 =>  0 : pass - return immediately
5296   //    0 => -1 : block; then set _Event to 0 before returning
5297 
5298   guarantee(_ParkHandle != NULL , "Invariant");
5299   guarantee(Millis > 0          , "Invariant");
5300 
5301   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5302   // the initial park() operation.
5303   // Consider: use atomic decrement instead of CAS-loop
5304 
5305   int v;
5306   for (;;) {
5307     v = _Event;
5308     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5309   }
5310   guarantee((v == 0) || (v == 1), "invariant");
5311   if (v != 0) return OS_OK;
5312 
5313   // Do this the hard way by blocking ...
5314   // TODO: consider a brief spin here, gated on the success of recent
5315   // spin attempts by this thread.
5316   //
5317   // We decompose long timeouts into series of shorter timed waits.
5318   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5319   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5320   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5321   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5322   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5323   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5324   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5325   // for the already waited time.  This policy does not admit any new outcomes.
5326   // In the future, however, we might want to track the accumulated wait time and
5327   // adjust Millis accordingly if we encounter a spurious wakeup.
5328 
5329   const int MAXTIMEOUT = 0x10000000;
5330   DWORD rv = WAIT_TIMEOUT;
5331   while (_Event < 0 && Millis > 0) {
5332     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5333     if (Millis > MAXTIMEOUT) {
5334       prd = MAXTIMEOUT;
5335     }
5336     HighResolutionInterval *phri = NULL;
5337     if (!ForceTimeHighResolution) {
5338       phri = new HighResolutionInterval(prd);
5339     }
5340     rv = ::WaitForSingleObject(_ParkHandle, prd);
5341     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5342     if (rv == WAIT_TIMEOUT) {
5343       Millis -= prd;
5344     }
5345     delete phri; // if it is NULL, harmless
5346   }
5347   v = _Event;
5348   _Event = 0;
5349   // see comment at end of os::PlatformEvent::park() below:
5350   OrderAccess::fence();
5351   // If we encounter a nearly simultanous timeout expiry and unpark()
5352   // we return OS_OK indicating we awoke via unpark().
5353   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5354   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5355 }
5356 
5357 void os::PlatformEvent::park() {
5358   // Transitions for _Event:
5359   //   -1 => -1 : illegal
5360   //    1 =>  0 : pass - return immediately
5361   //    0 => -1 : block; then set _Event to 0 before returning
5362 
5363   guarantee(_ParkHandle != NULL, "Invariant");
5364   // Invariant: Only the thread associated with the Event/PlatformEvent
5365   // may call park().
5366   // Consider: use atomic decrement instead of CAS-loop
5367   int v;
5368   for (;;) {
5369     v = _Event;
5370     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5371   }
5372   guarantee((v == 0) || (v == 1), "invariant");
5373   if (v != 0) return;
5374 
5375   // Do this the hard way by blocking ...
5376   // TODO: consider a brief spin here, gated on the success of recent
5377   // spin attempts by this thread.
5378   while (_Event < 0) {
5379     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5380     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5381   }
5382 
5383   // Usually we'll find _Event == 0 at this point, but as
5384   // an optional optimization we clear it, just in case can
5385   // multiple unpark() operations drove _Event up to 1.
5386   _Event = 0;
5387   OrderAccess::fence();
5388   guarantee(_Event >= 0, "invariant");
5389 }
5390 
5391 void os::PlatformEvent::unpark() {
5392   guarantee(_ParkHandle != NULL, "Invariant");
5393 
5394   // Transitions for _Event:
5395   //    0 => 1 : just return
5396   //    1 => 1 : just return
5397   //   -1 => either 0 or 1; must signal target thread
5398   //         That is, we can safely transition _Event from -1 to either
5399   //         0 or 1.
5400   // See also: "Semaphores in Plan 9" by Mullender & Cox
5401   //
5402   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5403   // that it will take two back-to-back park() calls for the owning
5404   // thread to block. This has the benefit of forcing a spurious return
5405   // from the first park() call after an unpark() call which will help
5406   // shake out uses of park() and unpark() without condition variables.
5407 
5408   if (Atomic::xchg(&_Event, 1) >= 0) return;
5409 
5410   ::SetEvent(_ParkHandle);
5411 }
5412 
5413 
5414 // JSR166
5415 // -------------------------------------------------------
5416 
5417 // The Windows implementation of Park is very straightforward: Basic
5418 // operations on Win32 Events turn out to have the right semantics to
5419 // use them directly. We opportunistically resuse the event inherited
5420 // from Monitor.
5421 
5422 void Parker::park(bool isAbsolute, jlong time) {
5423   guarantee(_ParkEvent != NULL, "invariant");
5424   // First, demultiplex/decode time arguments
5425   if (time < 0) { // don't wait
5426     return;
5427   } else if (time == 0 && !isAbsolute) {
5428     time = INFINITE;
5429   } else if (isAbsolute) {
5430     time -= os::javaTimeMillis(); // convert to relative time
5431     if (time <= 0) {  // already elapsed
5432       return;
5433     }
5434   } else { // relative
5435     time /= 1000000;  // Must coarsen from nanos to millis
5436     if (time == 0) {  // Wait for the minimal time unit if zero
5437       time = 1;
5438     }
5439   }
5440 
5441   JavaThread* thread = JavaThread::current();
5442 
5443   // Don't wait if interrupted or already triggered
5444   if (thread->is_interrupted(false) ||
5445       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5446     ResetEvent(_ParkEvent);
5447     return;
5448   } else {
5449     ThreadBlockInVM tbivm(thread);
5450     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5451     thread->set_suspend_equivalent();
5452 
5453     WaitForSingleObject(_ParkEvent, time);
5454     ResetEvent(_ParkEvent);
5455 
5456     // If externally suspended while waiting, re-suspend
5457     if (thread->handle_special_suspend_equivalent_condition()) {
5458       thread->java_suspend_self();
5459     }
5460   }
5461 }
5462 
5463 void Parker::unpark() {
5464   guarantee(_ParkEvent != NULL, "invariant");
5465   SetEvent(_ParkEvent);
5466 }
5467 
5468 // Platform Monitor implementation
5469 
5470 // Must already be locked
5471 int os::PlatformMonitor::wait(jlong millis) {
5472   assert(millis >= 0, "negative timeout");
5473   int ret = OS_TIMEOUT;
5474   int status = SleepConditionVariableCS(&_cond, &_mutex,
5475                                         millis == 0 ? INFINITE : millis);
5476   if (status != 0) {
5477     ret = OS_OK;
5478   }
5479   #ifndef PRODUCT
5480   else {
5481     DWORD err = GetLastError();
5482     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5483   }
5484   #endif
5485   return ret;
5486 }
5487 
5488 // Run the specified command in a separate process. Return its exit value,
5489 // or -1 on failure (e.g. can't create a new process).
5490 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5491   STARTUPINFO si;
5492   PROCESS_INFORMATION pi;
5493   DWORD exit_code;
5494 
5495   char * cmd_string;
5496   const char * cmd_prefix = "cmd /C ";
5497   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5498   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5499   if (cmd_string == NULL) {
5500     return -1;
5501   }
5502   cmd_string[0] = '\0';
5503   strcat(cmd_string, cmd_prefix);
5504   strcat(cmd_string, cmd);
5505 
5506   // now replace all '\n' with '&'
5507   char * substring = cmd_string;
5508   while ((substring = strchr(substring, '\n')) != NULL) {
5509     substring[0] = '&';
5510     substring++;
5511   }
5512   memset(&si, 0, sizeof(si));
5513   si.cb = sizeof(si);
5514   memset(&pi, 0, sizeof(pi));
5515   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5516                             cmd_string,    // command line
5517                             NULL,   // process security attribute
5518                             NULL,   // thread security attribute
5519                             TRUE,   // inherits system handles
5520                             0,      // no creation flags
5521                             NULL,   // use parent's environment block
5522                             NULL,   // use parent's starting directory
5523                             &si,    // (in) startup information
5524                             &pi);   // (out) process information
5525 
5526   if (rslt) {
5527     // Wait until child process exits.
5528     WaitForSingleObject(pi.hProcess, INFINITE);
5529 
5530     GetExitCodeProcess(pi.hProcess, &exit_code);
5531 
5532     // Close process and thread handles.
5533     CloseHandle(pi.hProcess);
5534     CloseHandle(pi.hThread);
5535   } else {
5536     exit_code = -1;
5537   }
5538 
5539   FREE_C_HEAP_ARRAY(char, cmd_string);
5540   return (int)exit_code;
5541 }
5542 
5543 bool os::find(address addr, outputStream* st) {
5544   int offset = -1;
5545   bool result = false;
5546   char buf[256];
5547   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5548     st->print(PTR_FORMAT " ", addr);
5549     if (strlen(buf) < sizeof(buf) - 1) {
5550       char* p = strrchr(buf, '\\');
5551       if (p) {
5552         st->print("%s", p + 1);
5553       } else {
5554         st->print("%s", buf);
5555       }
5556     } else {
5557         // The library name is probably truncated. Let's omit the library name.
5558         // See also JDK-8147512.
5559     }
5560     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5561       st->print("::%s + 0x%x", buf, offset);
5562     }
5563     st->cr();
5564     result = true;
5565   }
5566   return result;
5567 }
5568 
5569 static jint initSock() {
5570   WSADATA wsadata;
5571 
5572   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5573     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5574                 ::GetLastError());
5575     return JNI_ERR;
5576   }
5577   return JNI_OK;
5578 }
5579 
5580 struct hostent* os::get_host_by_name(char* name) {
5581   return (struct hostent*)gethostbyname(name);
5582 }
5583 
5584 int os::socket_close(int fd) {
5585   return ::closesocket(fd);
5586 }
5587 
5588 int os::socket(int domain, int type, int protocol) {
5589   return ::socket(domain, type, protocol);
5590 }
5591 
5592 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5593   return ::connect(fd, him, len);
5594 }
5595 
5596 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5597   return ::recv(fd, buf, (int)nBytes, flags);
5598 }
5599 
5600 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5601   return ::send(fd, buf, (int)nBytes, flags);
5602 }
5603 
5604 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5605   return ::send(fd, buf, (int)nBytes, flags);
5606 }
5607 
5608 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5609 #if defined(IA32)
5610   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5611 #elif defined (AMD64)
5612   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5613 #endif
5614 
5615 // returns true if thread could be suspended,
5616 // false otherwise
5617 static bool do_suspend(HANDLE* h) {
5618   if (h != NULL) {
5619     if (SuspendThread(*h) != ~0) {
5620       return true;
5621     }
5622   }
5623   return false;
5624 }
5625 
5626 // resume the thread
5627 // calling resume on an active thread is a no-op
5628 static void do_resume(HANDLE* h) {
5629   if (h != NULL) {
5630     ResumeThread(*h);
5631   }
5632 }
5633 
5634 // retrieve a suspend/resume context capable handle
5635 // from the tid. Caller validates handle return value.
5636 void get_thread_handle_for_extended_context(HANDLE* h,
5637                                             OSThread::thread_id_t tid) {
5638   if (h != NULL) {
5639     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5640   }
5641 }
5642 
5643 // Thread sampling implementation
5644 //
5645 void os::SuspendedThreadTask::internal_do_task() {
5646   CONTEXT    ctxt;
5647   HANDLE     h = NULL;
5648 
5649   // get context capable handle for thread
5650   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5651 
5652   // sanity
5653   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5654     return;
5655   }
5656 
5657   // suspend the thread
5658   if (do_suspend(&h)) {
5659     ctxt.ContextFlags = sampling_context_flags;
5660     // get thread context
5661     GetThreadContext(h, &ctxt);
5662     SuspendedThreadTaskContext context(_thread, &ctxt);
5663     // pass context to Thread Sampling impl
5664     do_task(context);
5665     // resume thread
5666     do_resume(&h);
5667   }
5668 
5669   // close handle
5670   CloseHandle(h);
5671 }
5672 
5673 bool os::start_debugging(char *buf, int buflen) {
5674   int len = (int)strlen(buf);
5675   char *p = &buf[len];
5676 
5677   jio_snprintf(p, buflen-len,
5678              "\n\n"
5679              "Do you want to debug the problem?\n\n"
5680              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5681              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5682              "Otherwise, select 'No' to abort...",
5683              os::current_process_id(), os::current_thread_id());
5684 
5685   bool yes = os::message_box("Unexpected Error", buf);
5686 
5687   if (yes) {
5688     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5689     // exception. If VM is running inside a debugger, the debugger will
5690     // catch the exception. Otherwise, the breakpoint exception will reach
5691     // the default windows exception handler, which can spawn a debugger and
5692     // automatically attach to the dying VM.
5693     os::breakpoint();
5694     yes = false;
5695   }
5696   return yes;
5697 }
5698 
5699 void* os::get_default_process_handle() {
5700   return (void*)GetModuleHandle(NULL);
5701 }
5702 
5703 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5704 // which is used to find statically linked in agents.
5705 // Additionally for windows, takes into account __stdcall names.
5706 // Parameters:
5707 //            sym_name: Symbol in library we are looking for
5708 //            lib_name: Name of library to look in, NULL for shared libs.
5709 //            is_absolute_path == true if lib_name is absolute path to agent
5710 //                                     such as "C:/a/b/L.dll"
5711 //            == false if only the base name of the library is passed in
5712 //               such as "L"
5713 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5714                                     bool is_absolute_path) {
5715   char *agent_entry_name;
5716   size_t len;
5717   size_t name_len;
5718   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5719   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5720   const char *start;
5721 
5722   if (lib_name != NULL) {
5723     len = name_len = strlen(lib_name);
5724     if (is_absolute_path) {
5725       // Need to strip path, prefix and suffix
5726       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5727         lib_name = ++start;
5728       } else {
5729         // Need to check for drive prefix
5730         if ((start = strchr(lib_name, ':')) != NULL) {
5731           lib_name = ++start;
5732         }
5733       }
5734       if (len <= (prefix_len + suffix_len)) {
5735         return NULL;
5736       }
5737       lib_name += prefix_len;
5738       name_len = strlen(lib_name) - suffix_len;
5739     }
5740   }
5741   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5742   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5743   if (agent_entry_name == NULL) {
5744     return NULL;
5745   }
5746   if (lib_name != NULL) {
5747     const char *p = strrchr(sym_name, '@');
5748     if (p != NULL && p != sym_name) {
5749       // sym_name == _Agent_OnLoad@XX
5750       strncpy(agent_entry_name, sym_name, (p - sym_name));
5751       agent_entry_name[(p-sym_name)] = '\0';
5752       // agent_entry_name == _Agent_OnLoad
5753       strcat(agent_entry_name, "_");
5754       strncat(agent_entry_name, lib_name, name_len);
5755       strcat(agent_entry_name, p);
5756       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5757     } else {
5758       strcpy(agent_entry_name, sym_name);
5759       strcat(agent_entry_name, "_");
5760       strncat(agent_entry_name, lib_name, name_len);
5761     }
5762   } else {
5763     strcpy(agent_entry_name, sym_name);
5764   }
5765   return agent_entry_name;
5766 }
5767 
5768 #ifndef PRODUCT
5769 
5770 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5771 // contiguous memory block at a particular address.
5772 // The test first tries to find a good approximate address to allocate at by using the same
5773 // method to allocate some memory at any address. The test then tries to allocate memory in
5774 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5775 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5776 // the previously allocated memory is available for allocation. The only actual failure
5777 // that is reported is when the test tries to allocate at a particular location but gets a
5778 // different valid one. A NULL return value at this point is not considered an error but may
5779 // be legitimate.
5780 void TestReserveMemorySpecial_test() {
5781   if (!UseLargePages) {
5782     return;
5783   }
5784   // save current value of globals
5785   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5786   bool old_use_numa_interleaving = UseNUMAInterleaving;
5787 
5788   // set globals to make sure we hit the correct code path
5789   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5790 
5791   // do an allocation at an address selected by the OS to get a good one.
5792   const size_t large_allocation_size = os::large_page_size() * 4;
5793   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5794   if (result == NULL) {
5795   } else {
5796     os::release_memory_special(result, large_allocation_size);
5797 
5798     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5799     // we managed to get it once.
5800     const size_t expected_allocation_size = os::large_page_size();
5801     char* expected_location = result + os::large_page_size();
5802     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5803     if (actual_location == NULL) {
5804     } else {
5805       // release memory
5806       os::release_memory_special(actual_location, expected_allocation_size);
5807       // only now check, after releasing any memory to avoid any leaks.
5808       assert(actual_location == expected_location,
5809              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5810              expected_location, expected_allocation_size, actual_location);
5811     }
5812   }
5813 
5814   // restore globals
5815   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5816   UseNUMAInterleaving = old_use_numa_interleaving;
5817 }
5818 #endif // PRODUCT
5819 
5820 /*
5821   All the defined signal names for Windows.
5822 
5823   NOTE that not all of these names are accepted by FindSignal!
5824 
5825   For various reasons some of these may be rejected at runtime.
5826 
5827   Here are the names currently accepted by a user of sun.misc.Signal with
5828   1.4.1 (ignoring potential interaction with use of chaining, etc):
5829 
5830      (LIST TBD)
5831 
5832 */
5833 int os::get_signal_number(const char* name) {
5834   static const struct {
5835     const char* name;
5836     int         number;
5837   } siglabels [] =
5838     // derived from version 6.0 VC98/include/signal.h
5839   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5840   "FPE",        SIGFPE,         // floating point exception
5841   "SEGV",       SIGSEGV,        // segment violation
5842   "INT",        SIGINT,         // interrupt
5843   "TERM",       SIGTERM,        // software term signal from kill
5844   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5845   "ILL",        SIGILL};        // illegal instruction
5846   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5847     if (strcmp(name, siglabels[i].name) == 0) {
5848       return siglabels[i].number;
5849     }
5850   }
5851   return -1;
5852 }
5853 
5854 // Fast current thread access
5855 
5856 int os::win32::_thread_ptr_offset = 0;
5857 
5858 static void call_wrapper_dummy() {}
5859 
5860 // We need to call the os_exception_wrapper once so that it sets
5861 // up the offset from FS of the thread pointer.
5862 void os::win32::initialize_thread_ptr_offset() {
5863   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5864                            NULL, methodHandle(), NULL, NULL);
5865 }
5866 
5867 bool os::supports_map_sync() {
5868   return false;
5869 }