1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/codeCache.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "logging/log.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "os_share_windows.hpp"
  45 #include "os_windows.inline.hpp"
  46 #include "prims/jniFastGetField.hpp"
  47 #include "prims/jvm_misc.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safepointMechanism.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/statSampler.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadCritical.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_version.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/decoder.hpp"
  72 #include "utilities/defaultStream.hpp"
  73 #include "utilities/events.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 #ifdef _DEBUG
  80 #include <crtdbg.h>
  81 #endif
  82 
  83 #include <windows.h>
  84 #include <sys/types.h>
  85 #include <sys/stat.h>
  86 #include <sys/timeb.h>
  87 #include <objidl.h>
  88 #include <shlobj.h>
  89 
  90 #include <malloc.h>
  91 #include <signal.h>
  92 #include <direct.h>
  93 #include <errno.h>
  94 #include <fcntl.h>
  95 #include <io.h>
  96 #include <process.h>              // For _beginthreadex(), _endthreadex()
  97 #include <imagehlp.h>             // For os::dll_address_to_function_name
  98 // for enumerating dll libraries
  99 #include <vdmdbg.h>
 100 #include <psapi.h>
 101 #include <mmsystem.h>
 102 #include <winsock2.h>
 103 
 104 // for timer info max values which include all bits
 105 #define ALL_64_BITS CONST64(-1)
 106 
 107 // For DLL loading/load error detection
 108 // Values of PE COFF
 109 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 110 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 111 
 112 static HANDLE main_process;
 113 static HANDLE main_thread;
 114 static int    main_thread_id;
 115 
 116 static FILETIME process_creation_time;
 117 static FILETIME process_exit_time;
 118 static FILETIME process_user_time;
 119 static FILETIME process_kernel_time;
 120 
 121 #ifdef _M_AMD64
 122   #define __CPU__ amd64
 123 #else
 124   #define __CPU__ i486
 125 #endif
 126 
 127 #if INCLUDE_AOT
 128 PVOID  topLevelVectoredExceptionHandler = NULL;
 129 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 130 #endif
 131 
 132 // save DLL module handle, used by GetModuleFileName
 133 
 134 HINSTANCE vm_lib_handle;
 135 
 136 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 137   switch (reason) {
 138   case DLL_PROCESS_ATTACH:
 139     vm_lib_handle = hinst;
 140     if (ForceTimeHighResolution) {
 141       timeBeginPeriod(1L);
 142     }
 143     WindowsDbgHelp::pre_initialize();
 144     SymbolEngine::pre_initialize();
 145     break;
 146   case DLL_PROCESS_DETACH:
 147     if (ForceTimeHighResolution) {
 148       timeEndPeriod(1L);
 149     }
 150 #if INCLUDE_AOT
 151     if (topLevelVectoredExceptionHandler != NULL) {
 152       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 153       topLevelVectoredExceptionHandler = NULL;
 154     }
 155 #endif
 156     break;
 157   default:
 158     break;
 159   }
 160   return true;
 161 }
 162 
 163 static inline double fileTimeAsDouble(FILETIME* time) {
 164   const double high  = (double) ((unsigned int) ~0);
 165   const double split = 10000000.0;
 166   double result = (time->dwLowDateTime / split) +
 167                    time->dwHighDateTime * (high/split);
 168   return result;
 169 }
 170 
 171 // Implementation of os
 172 
 173 bool os::unsetenv(const char* name) {
 174   assert(name != NULL, "Null pointer");
 175   return (SetEnvironmentVariable(name, NULL) == TRUE);
 176 }
 177 
 178 // No setuid programs under Windows.
 179 bool os::have_special_privileges() {
 180   return false;
 181 }
 182 
 183 
 184 // This method is  a periodic task to check for misbehaving JNI applications
 185 // under CheckJNI, we can add any periodic checks here.
 186 // For Windows at the moment does nothing
 187 void os::run_periodic_checks() {
 188   return;
 189 }
 190 
 191 // previous UnhandledExceptionFilter, if there is one
 192 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 193 
 194 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 195 
 196 void os::init_system_properties_values() {
 197   // sysclasspath, java_home, dll_dir
 198   {
 199     char *home_path;
 200     char *dll_path;
 201     char *pslash;
 202     const char *bin = "\\bin";
 203     char home_dir[MAX_PATH + 1];
 204     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 205 
 206     if (alt_home_dir != NULL)  {
 207       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 208       home_dir[MAX_PATH] = '\0';
 209     } else {
 210       os::jvm_path(home_dir, sizeof(home_dir));
 211       // Found the full path to jvm.dll.
 212       // Now cut the path to <java_home>/jre if we can.
 213       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 214       pslash = strrchr(home_dir, '\\');
 215       if (pslash != NULL) {
 216         *pslash = '\0';                   // get rid of \{client|server}
 217         pslash = strrchr(home_dir, '\\');
 218         if (pslash != NULL) {
 219           *pslash = '\0';                 // get rid of \bin
 220         }
 221       }
 222     }
 223 
 224     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 225     strcpy(home_path, home_dir);
 226     Arguments::set_java_home(home_path);
 227     FREE_C_HEAP_ARRAY(char, home_path);
 228 
 229     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 230                                 mtInternal);
 231     strcpy(dll_path, home_dir);
 232     strcat(dll_path, bin);
 233     Arguments::set_dll_dir(dll_path);
 234     FREE_C_HEAP_ARRAY(char, dll_path);
 235 
 236     if (!set_boot_path('\\', ';')) {
 237       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 238     }
 239   }
 240 
 241 // library_path
 242 #define EXT_DIR "\\lib\\ext"
 243 #define BIN_DIR "\\bin"
 244 #define PACKAGE_DIR "\\Sun\\Java"
 245   {
 246     // Win32 library search order (See the documentation for LoadLibrary):
 247     //
 248     // 1. The directory from which application is loaded.
 249     // 2. The system wide Java Extensions directory (Java only)
 250     // 3. System directory (GetSystemDirectory)
 251     // 4. Windows directory (GetWindowsDirectory)
 252     // 5. The PATH environment variable
 253     // 6. The current directory
 254 
 255     char *library_path;
 256     char tmp[MAX_PATH];
 257     char *path_str = ::getenv("PATH");
 258 
 259     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 260                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 261 
 262     library_path[0] = '\0';
 263 
 264     GetModuleFileName(NULL, tmp, sizeof(tmp));
 265     *(strrchr(tmp, '\\')) = '\0';
 266     strcat(library_path, tmp);
 267 
 268     GetWindowsDirectory(tmp, sizeof(tmp));
 269     strcat(library_path, ";");
 270     strcat(library_path, tmp);
 271     strcat(library_path, PACKAGE_DIR BIN_DIR);
 272 
 273     GetSystemDirectory(tmp, sizeof(tmp));
 274     strcat(library_path, ";");
 275     strcat(library_path, tmp);
 276 
 277     GetWindowsDirectory(tmp, sizeof(tmp));
 278     strcat(library_path, ";");
 279     strcat(library_path, tmp);
 280 
 281     if (path_str) {
 282       strcat(library_path, ";");
 283       strcat(library_path, path_str);
 284     }
 285 
 286     strcat(library_path, ";.");
 287 
 288     Arguments::set_library_path(library_path);
 289     FREE_C_HEAP_ARRAY(char, library_path);
 290   }
 291 
 292   // Default extensions directory
 293   {
 294     char path[MAX_PATH];
 295     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 296     GetWindowsDirectory(path, MAX_PATH);
 297     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 298             path, PACKAGE_DIR, EXT_DIR);
 299     Arguments::set_ext_dirs(buf);
 300   }
 301   #undef EXT_DIR
 302   #undef BIN_DIR
 303   #undef PACKAGE_DIR
 304 
 305 #ifndef _WIN64
 306   // set our UnhandledExceptionFilter and save any previous one
 307   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 308 #endif
 309 
 310   // Done
 311   return;
 312 }
 313 
 314 void os::breakpoint() {
 315   DebugBreak();
 316 }
 317 
 318 // Invoked from the BREAKPOINT Macro
 319 extern "C" void breakpoint() {
 320   os::breakpoint();
 321 }
 322 
 323 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 324 // So far, this method is only used by Native Memory Tracking, which is
 325 // only supported on Windows XP or later.
 326 //
 327 int os::get_native_stack(address* stack, int frames, int toSkip) {
 328   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 329   for (int index = captured; index < frames; index ++) {
 330     stack[index] = NULL;
 331   }
 332   return captured;
 333 }
 334 
 335 
 336 // os::current_stack_base()
 337 //
 338 //   Returns the base of the stack, which is the stack's
 339 //   starting address.  This function must be called
 340 //   while running on the stack of the thread being queried.
 341 
 342 address os::current_stack_base() {
 343   MEMORY_BASIC_INFORMATION minfo;
 344   address stack_bottom;
 345   size_t stack_size;
 346 
 347   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 348   stack_bottom =  (address)minfo.AllocationBase;
 349   stack_size = minfo.RegionSize;
 350 
 351   // Add up the sizes of all the regions with the same
 352   // AllocationBase.
 353   while (1) {
 354     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 355     if (stack_bottom == (address)minfo.AllocationBase) {
 356       stack_size += minfo.RegionSize;
 357     } else {
 358       break;
 359     }
 360   }
 361   return stack_bottom + stack_size;
 362 }
 363 
 364 size_t os::current_stack_size() {
 365   size_t sz;
 366   MEMORY_BASIC_INFORMATION minfo;
 367   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 368   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 369   return sz;
 370 }
 371 
 372 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 373   MEMORY_BASIC_INFORMATION minfo;
 374   committed_start = NULL;
 375   committed_size = 0;
 376   address top = start + size;
 377   const address start_addr = start;
 378   while (start < top) {
 379     VirtualQuery(start, &minfo, sizeof(minfo));
 380     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 381       if (committed_start != NULL) {
 382         break;
 383       }
 384     } else {  // committed
 385       if (committed_start == NULL) {
 386         committed_start = start;
 387       }
 388       size_t offset = start - (address)minfo.BaseAddress;
 389       committed_size += minfo.RegionSize - offset;
 390     }
 391     start = (address)minfo.BaseAddress + minfo.RegionSize;
 392   }
 393 
 394   if (committed_start == NULL) {
 395     assert(committed_size == 0, "Sanity");
 396     return false;
 397   } else {
 398     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 399     // current region may go beyond the limit, trim to the limit
 400     committed_size = MIN2(committed_size, size_t(top - committed_start));
 401     return true;
 402   }
 403 }
 404 
 405 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 406   const struct tm* time_struct_ptr = localtime(clock);
 407   if (time_struct_ptr != NULL) {
 408     *res = *time_struct_ptr;
 409     return res;
 410   }
 411   return NULL;
 412 }
 413 
 414 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 415   const struct tm* time_struct_ptr = gmtime(clock);
 416   if (time_struct_ptr != NULL) {
 417     *res = *time_struct_ptr;
 418     return res;
 419   }
 420   return NULL;
 421 }
 422 
 423 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 424 
 425 // Thread start routine for all newly created threads
 426 static unsigned __stdcall thread_native_entry(Thread* thread) {
 427 
 428   thread->record_stack_base_and_size();
 429 
 430   // Try to randomize the cache line index of hot stack frames.
 431   // This helps when threads of the same stack traces evict each other's
 432   // cache lines. The threads can be either from the same JVM instance, or
 433   // from different JVM instances. The benefit is especially true for
 434   // processors with hyperthreading technology.
 435   static int counter = 0;
 436   int pid = os::current_process_id();
 437   _alloca(((pid ^ counter++) & 7) * 128);
 438 
 439   thread->initialize_thread_current();
 440 
 441   OSThread* osthr = thread->osthread();
 442   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 443 
 444   if (UseNUMA) {
 445     int lgrp_id = os::numa_get_group_id();
 446     if (lgrp_id != -1) {
 447       thread->set_lgrp_id(lgrp_id);
 448     }
 449   }
 450 
 451   // Diagnostic code to investigate JDK-6573254
 452   int res = 30115;  // non-java thread
 453   if (thread->is_Java_thread()) {
 454     res = 20115;    // java thread
 455   }
 456 
 457   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 458 
 459   // Install a win32 structured exception handler around every thread created
 460   // by VM, so VM can generate error dump when an exception occurred in non-
 461   // Java thread (e.g. VM thread).
 462   __try {
 463     thread->call_run();
 464   } __except(topLevelExceptionFilter(
 465                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 466     // Nothing to do.
 467   }
 468 
 469   // Note: at this point the thread object may already have deleted itself.
 470   // Do not dereference it from here on out.
 471 
 472   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 473 
 474   // One less thread is executing
 475   // When the VMThread gets here, the main thread may have already exited
 476   // which frees the CodeHeap containing the Atomic::add code
 477   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 478     Atomic::dec(&os::win32::_os_thread_count);
 479   }
 480 
 481   // Thread must not return from exit_process_or_thread(), but if it does,
 482   // let it proceed to exit normally
 483   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 484 }
 485 
 486 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 487                                   int thread_id) {
 488   // Allocate the OSThread object
 489   OSThread* osthread = new OSThread(NULL, NULL);
 490   if (osthread == NULL) return NULL;
 491 
 492   // Initialize the JDK library's interrupt event.
 493   // This should really be done when OSThread is constructed,
 494   // but there is no way for a constructor to report failure to
 495   // allocate the event.
 496   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 497   if (interrupt_event == NULL) {
 498     delete osthread;
 499     return NULL;
 500   }
 501   osthread->set_interrupt_event(interrupt_event);
 502 
 503   // Store info on the Win32 thread into the OSThread
 504   osthread->set_thread_handle(thread_handle);
 505   osthread->set_thread_id(thread_id);
 506 
 507   if (UseNUMA) {
 508     int lgrp_id = os::numa_get_group_id();
 509     if (lgrp_id != -1) {
 510       thread->set_lgrp_id(lgrp_id);
 511     }
 512   }
 513 
 514   // Initial thread state is INITIALIZED, not SUSPENDED
 515   osthread->set_state(INITIALIZED);
 516 
 517   return osthread;
 518 }
 519 
 520 
 521 bool os::create_attached_thread(JavaThread* thread) {
 522 #ifdef ASSERT
 523   thread->verify_not_published();
 524 #endif
 525   HANDLE thread_h;
 526   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 527                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 528     fatal("DuplicateHandle failed\n");
 529   }
 530   OSThread* osthread = create_os_thread(thread, thread_h,
 531                                         (int)current_thread_id());
 532   if (osthread == NULL) {
 533     return false;
 534   }
 535 
 536   // Initial thread state is RUNNABLE
 537   osthread->set_state(RUNNABLE);
 538 
 539   thread->set_osthread(osthread);
 540 
 541   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 542     os::current_thread_id());
 543 
 544   return true;
 545 }
 546 
 547 bool os::create_main_thread(JavaThread* thread) {
 548 #ifdef ASSERT
 549   thread->verify_not_published();
 550 #endif
 551   if (_starting_thread == NULL) {
 552     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 553     if (_starting_thread == NULL) {
 554       return false;
 555     }
 556   }
 557 
 558   // The primordial thread is runnable from the start)
 559   _starting_thread->set_state(RUNNABLE);
 560 
 561   thread->set_osthread(_starting_thread);
 562   return true;
 563 }
 564 
 565 // Helper function to trace _beginthreadex attributes,
 566 //  similar to os::Posix::describe_pthread_attr()
 567 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 568                                                size_t stacksize, unsigned initflag) {
 569   stringStream ss(buf, buflen);
 570   if (stacksize == 0) {
 571     ss.print("stacksize: default, ");
 572   } else {
 573     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 574   }
 575   ss.print("flags: ");
 576   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 577   #define ALL(X) \
 578     X(CREATE_SUSPENDED) \
 579     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 580   ALL(PRINT_FLAG)
 581   #undef ALL
 582   #undef PRINT_FLAG
 583   return buf;
 584 }
 585 
 586 // Allocate and initialize a new OSThread
 587 bool os::create_thread(Thread* thread, ThreadType thr_type,
 588                        size_t stack_size) {
 589   unsigned thread_id;
 590 
 591   // Allocate the OSThread object
 592   OSThread* osthread = new OSThread(NULL, NULL);
 593   if (osthread == NULL) {
 594     return false;
 595   }
 596 
 597   // Initialize the JDK library's interrupt event.
 598   // This should really be done when OSThread is constructed,
 599   // but there is no way for a constructor to report failure to
 600   // allocate the event.
 601   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 602   if (interrupt_event == NULL) {
 603     delete osthread;
 604     return false;
 605   }
 606   osthread->set_interrupt_event(interrupt_event);
 607   // We don't call set_interrupted(false) as it will trip the assert in there
 608   // as we are not operating on the current thread. We don't need to call it
 609   // because the initial state is already correct.
 610 
 611   thread->set_osthread(osthread);
 612 
 613   if (stack_size == 0) {
 614     switch (thr_type) {
 615     case os::java_thread:
 616       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 617       if (JavaThread::stack_size_at_create() > 0) {
 618         stack_size = JavaThread::stack_size_at_create();
 619       }
 620       break;
 621     case os::compiler_thread:
 622       if (CompilerThreadStackSize > 0) {
 623         stack_size = (size_t)(CompilerThreadStackSize * K);
 624         break;
 625       } // else fall through:
 626         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 627     case os::vm_thread:
 628     case os::pgc_thread:
 629     case os::cgc_thread:
 630     case os::watcher_thread:
 631       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 632       break;
 633     }
 634   }
 635 
 636   // Create the Win32 thread
 637   //
 638   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 639   // does not specify stack size. Instead, it specifies the size of
 640   // initially committed space. The stack size is determined by
 641   // PE header in the executable. If the committed "stack_size" is larger
 642   // than default value in the PE header, the stack is rounded up to the
 643   // nearest multiple of 1MB. For example if the launcher has default
 644   // stack size of 320k, specifying any size less than 320k does not
 645   // affect the actual stack size at all, it only affects the initial
 646   // commitment. On the other hand, specifying 'stack_size' larger than
 647   // default value may cause significant increase in memory usage, because
 648   // not only the stack space will be rounded up to MB, but also the
 649   // entire space is committed upfront.
 650   //
 651   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 652   // for CreateThread() that can treat 'stack_size' as stack size. However we
 653   // are not supposed to call CreateThread() directly according to MSDN
 654   // document because JVM uses C runtime library. The good news is that the
 655   // flag appears to work with _beginthredex() as well.
 656 
 657   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 658   HANDLE thread_handle =
 659     (HANDLE)_beginthreadex(NULL,
 660                            (unsigned)stack_size,
 661                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 662                            thread,
 663                            initflag,
 664                            &thread_id);
 665 
 666   char buf[64];
 667   if (thread_handle != NULL) {
 668     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 669       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 670   } else {
 671     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 672       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 673     // Log some OS information which might explain why creating the thread failed.
 674     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 675     LogStream st(Log(os, thread)::info());
 676     os::print_memory_info(&st);
 677   }
 678 
 679   if (thread_handle == NULL) {
 680     // Need to clean up stuff we've allocated so far
 681     thread->set_osthread(NULL);
 682     delete osthread;
 683     return false;
 684   }
 685 
 686   Atomic::inc(&os::win32::_os_thread_count);
 687 
 688   // Store info on the Win32 thread into the OSThread
 689   osthread->set_thread_handle(thread_handle);
 690   osthread->set_thread_id(thread_id);
 691 
 692   // Initial thread state is INITIALIZED, not SUSPENDED
 693   osthread->set_state(INITIALIZED);
 694 
 695   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 696   return true;
 697 }
 698 
 699 
 700 // Free Win32 resources related to the OSThread
 701 void os::free_thread(OSThread* osthread) {
 702   assert(osthread != NULL, "osthread not set");
 703 
 704   // We are told to free resources of the argument thread,
 705   // but we can only really operate on the current thread.
 706   assert(Thread::current()->osthread() == osthread,
 707          "os::free_thread but not current thread");
 708 
 709   CloseHandle(osthread->thread_handle());
 710   delete osthread;
 711 }
 712 
 713 static jlong first_filetime;
 714 static jlong initial_performance_count;
 715 static jlong performance_frequency;
 716 
 717 
 718 jlong as_long(LARGE_INTEGER x) {
 719   jlong result = 0; // initialization to avoid warning
 720   set_high(&result, x.HighPart);
 721   set_low(&result, x.LowPart);
 722   return result;
 723 }
 724 
 725 
 726 jlong os::elapsed_counter() {
 727   LARGE_INTEGER count;
 728   QueryPerformanceCounter(&count);
 729   return as_long(count) - initial_performance_count;
 730 }
 731 
 732 
 733 jlong os::elapsed_frequency() {
 734   return performance_frequency;
 735 }
 736 
 737 
 738 julong os::available_memory() {
 739   return win32::available_memory();
 740 }
 741 
 742 julong os::win32::available_memory() {
 743   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 744   // value if total memory is larger than 4GB
 745   MEMORYSTATUSEX ms;
 746   ms.dwLength = sizeof(ms);
 747   GlobalMemoryStatusEx(&ms);
 748 
 749   return (julong)ms.ullAvailPhys;
 750 }
 751 
 752 julong os::physical_memory() {
 753   return win32::physical_memory();
 754 }
 755 
 756 bool os::has_allocatable_memory_limit(julong* limit) {
 757   MEMORYSTATUSEX ms;
 758   ms.dwLength = sizeof(ms);
 759   GlobalMemoryStatusEx(&ms);
 760 #ifdef _LP64
 761   *limit = (julong)ms.ullAvailVirtual;
 762   return true;
 763 #else
 764   // Limit to 1400m because of the 2gb address space wall
 765   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 766   return true;
 767 #endif
 768 }
 769 
 770 int os::active_processor_count() {
 771   // User has overridden the number of active processors
 772   if (ActiveProcessorCount > 0) {
 773     log_trace(os)("active_processor_count: "
 774                   "active processor count set by user : %d",
 775                   ActiveProcessorCount);
 776     return ActiveProcessorCount;
 777   }
 778 
 779   DWORD_PTR lpProcessAffinityMask = 0;
 780   DWORD_PTR lpSystemAffinityMask = 0;
 781   int proc_count = processor_count();
 782   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 783       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 784     // Nof active processors is number of bits in process affinity mask
 785     int bitcount = 0;
 786     while (lpProcessAffinityMask != 0) {
 787       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 788       bitcount++;
 789     }
 790     return bitcount;
 791   } else {
 792     return proc_count;
 793   }
 794 }
 795 
 796 uint os::processor_id() {
 797   return (uint)GetCurrentProcessorNumber();
 798 }
 799 
 800 void os::set_native_thread_name(const char *name) {
 801 
 802   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 803   //
 804   // Note that unfortunately this only works if the process
 805   // is already attached to a debugger; debugger must observe
 806   // the exception below to show the correct name.
 807 
 808   // If there is no debugger attached skip raising the exception
 809   if (!IsDebuggerPresent()) {
 810     return;
 811   }
 812 
 813   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 814   struct {
 815     DWORD dwType;     // must be 0x1000
 816     LPCSTR szName;    // pointer to name (in user addr space)
 817     DWORD dwThreadID; // thread ID (-1=caller thread)
 818     DWORD dwFlags;    // reserved for future use, must be zero
 819   } info;
 820 
 821   info.dwType = 0x1000;
 822   info.szName = name;
 823   info.dwThreadID = -1;
 824   info.dwFlags = 0;
 825 
 826   __try {
 827     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 828   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 829 }
 830 
 831 bool os::bind_to_processor(uint processor_id) {
 832   // Not yet implemented.
 833   return false;
 834 }
 835 
 836 void os::win32::initialize_performance_counter() {
 837   LARGE_INTEGER count;
 838   QueryPerformanceFrequency(&count);
 839   performance_frequency = as_long(count);
 840   QueryPerformanceCounter(&count);
 841   initial_performance_count = as_long(count);
 842 }
 843 
 844 
 845 double os::elapsedTime() {
 846   return (double) elapsed_counter() / (double) elapsed_frequency();
 847 }
 848 
 849 
 850 // Windows format:
 851 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 852 // Java format:
 853 //   Java standards require the number of milliseconds since 1/1/1970
 854 
 855 // Constant offset - calculated using offset()
 856 static jlong  _offset   = 116444736000000000;
 857 // Fake time counter for reproducible results when debugging
 858 static jlong  fake_time = 0;
 859 
 860 #ifdef ASSERT
 861 // Just to be safe, recalculate the offset in debug mode
 862 static jlong _calculated_offset = 0;
 863 static int   _has_calculated_offset = 0;
 864 
 865 jlong offset() {
 866   if (_has_calculated_offset) return _calculated_offset;
 867   SYSTEMTIME java_origin;
 868   java_origin.wYear          = 1970;
 869   java_origin.wMonth         = 1;
 870   java_origin.wDayOfWeek     = 0; // ignored
 871   java_origin.wDay           = 1;
 872   java_origin.wHour          = 0;
 873   java_origin.wMinute        = 0;
 874   java_origin.wSecond        = 0;
 875   java_origin.wMilliseconds  = 0;
 876   FILETIME jot;
 877   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 878     fatal("Error = %d\nWindows error", GetLastError());
 879   }
 880   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 881   _has_calculated_offset = 1;
 882   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 883   return _calculated_offset;
 884 }
 885 #else
 886 jlong offset() {
 887   return _offset;
 888 }
 889 #endif
 890 
 891 jlong windows_to_java_time(FILETIME wt) {
 892   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 893   return (a - offset()) / 10000;
 894 }
 895 
 896 // Returns time ticks in (10th of micro seconds)
 897 jlong windows_to_time_ticks(FILETIME wt) {
 898   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 899   return (a - offset());
 900 }
 901 
 902 FILETIME java_to_windows_time(jlong l) {
 903   jlong a = (l * 10000) + offset();
 904   FILETIME result;
 905   result.dwHighDateTime = high(a);
 906   result.dwLowDateTime  = low(a);
 907   return result;
 908 }
 909 
 910 bool os::supports_vtime() { return true; }
 911 
 912 double os::elapsedVTime() {
 913   FILETIME created;
 914   FILETIME exited;
 915   FILETIME kernel;
 916   FILETIME user;
 917   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 918     // the resolution of windows_to_java_time() should be sufficient (ms)
 919     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 920   } else {
 921     return elapsedTime();
 922   }
 923 }
 924 
 925 jlong os::javaTimeMillis() {
 926   FILETIME wt;
 927   GetSystemTimeAsFileTime(&wt);
 928   return windows_to_java_time(wt);
 929 }
 930 
 931 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 932   FILETIME wt;
 933   GetSystemTimeAsFileTime(&wt);
 934   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 935   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 936   seconds = secs;
 937   nanos = jlong(ticks - (secs*10000000)) * 100;
 938 }
 939 
 940 jlong os::javaTimeNanos() {
 941     LARGE_INTEGER current_count;
 942     QueryPerformanceCounter(&current_count);
 943     double current = as_long(current_count);
 944     double freq = performance_frequency;
 945     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 946     return time;
 947 }
 948 
 949 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 950   jlong freq = performance_frequency;
 951   if (freq < NANOSECS_PER_SEC) {
 952     // the performance counter is 64 bits and we will
 953     // be multiplying it -- so no wrap in 64 bits
 954     info_ptr->max_value = ALL_64_BITS;
 955   } else if (freq > NANOSECS_PER_SEC) {
 956     // use the max value the counter can reach to
 957     // determine the max value which could be returned
 958     julong max_counter = (julong)ALL_64_BITS;
 959     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 960   } else {
 961     // the performance counter is 64 bits and we will
 962     // be using it directly -- so no wrap in 64 bits
 963     info_ptr->max_value = ALL_64_BITS;
 964   }
 965 
 966   // using a counter, so no skipping
 967   info_ptr->may_skip_backward = false;
 968   info_ptr->may_skip_forward = false;
 969 
 970   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 971 }
 972 
 973 char* os::local_time_string(char *buf, size_t buflen) {
 974   SYSTEMTIME st;
 975   GetLocalTime(&st);
 976   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 977                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 978   return buf;
 979 }
 980 
 981 bool os::getTimesSecs(double* process_real_time,
 982                       double* process_user_time,
 983                       double* process_system_time) {
 984   HANDLE h_process = GetCurrentProcess();
 985   FILETIME create_time, exit_time, kernel_time, user_time;
 986   BOOL result = GetProcessTimes(h_process,
 987                                 &create_time,
 988                                 &exit_time,
 989                                 &kernel_time,
 990                                 &user_time);
 991   if (result != 0) {
 992     FILETIME wt;
 993     GetSystemTimeAsFileTime(&wt);
 994     jlong rtc_millis = windows_to_java_time(wt);
 995     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 996     *process_user_time =
 997       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 998     *process_system_time =
 999       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1000     return true;
1001   } else {
1002     return false;
1003   }
1004 }
1005 
1006 void os::shutdown() {
1007   // allow PerfMemory to attempt cleanup of any persistent resources
1008   perfMemory_exit();
1009 
1010   // flush buffered output, finish log files
1011   ostream_abort();
1012 
1013   // Check for abort hook
1014   abort_hook_t abort_hook = Arguments::abort_hook();
1015   if (abort_hook != NULL) {
1016     abort_hook();
1017   }
1018 }
1019 
1020 
1021 static HANDLE dumpFile = NULL;
1022 
1023 // Check if dump file can be created.
1024 void os::check_dump_limit(char* buffer, size_t buffsz) {
1025   bool status = true;
1026   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1027     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1028     status = false;
1029   }
1030 
1031 #ifndef ASSERT
1032   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1033     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1034     status = false;
1035   }
1036 #endif
1037 
1038   if (status) {
1039     const char* cwd = get_current_directory(NULL, 0);
1040     int pid = current_process_id();
1041     if (cwd != NULL) {
1042       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1043     } else {
1044       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1045     }
1046 
1047     if (dumpFile == NULL &&
1048        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1049                  == INVALID_HANDLE_VALUE) {
1050       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1051       status = false;
1052     }
1053   }
1054   VMError::record_coredump_status(buffer, status);
1055 }
1056 
1057 void os::abort(bool dump_core, void* siginfo, const void* context) {
1058   EXCEPTION_POINTERS ep;
1059   MINIDUMP_EXCEPTION_INFORMATION mei;
1060   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1061 
1062   HANDLE hProcess = GetCurrentProcess();
1063   DWORD processId = GetCurrentProcessId();
1064   MINIDUMP_TYPE dumpType;
1065 
1066   shutdown();
1067   if (!dump_core || dumpFile == NULL) {
1068     if (dumpFile != NULL) {
1069       CloseHandle(dumpFile);
1070     }
1071     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1072   }
1073 
1074   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1075     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1076 
1077   if (siginfo != NULL && context != NULL) {
1078     ep.ContextRecord = (PCONTEXT) context;
1079     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1080 
1081     mei.ThreadId = GetCurrentThreadId();
1082     mei.ExceptionPointers = &ep;
1083     pmei = &mei;
1084   } else {
1085     pmei = NULL;
1086   }
1087 
1088   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1089   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1090   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1091       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1092     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1093   }
1094   CloseHandle(dumpFile);
1095   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1096 }
1097 
1098 // Die immediately, no exit hook, no abort hook, no cleanup.
1099 void os::die() {
1100   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1101 }
1102 
1103 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1104 //  * dirent_md.c       1.15 00/02/02
1105 //
1106 // The declarations for DIR and struct dirent are in jvm_win32.h.
1107 
1108 // Caller must have already run dirname through JVM_NativePath, which removes
1109 // duplicate slashes and converts all instances of '/' into '\\'.
1110 
1111 DIR * os::opendir(const char *dirname) {
1112   assert(dirname != NULL, "just checking");   // hotspot change
1113   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1114   DWORD fattr;                                // hotspot change
1115   char alt_dirname[4] = { 0, 0, 0, 0 };
1116 
1117   if (dirp == 0) {
1118     errno = ENOMEM;
1119     return 0;
1120   }
1121 
1122   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1123   // as a directory in FindFirstFile().  We detect this case here and
1124   // prepend the current drive name.
1125   //
1126   if (dirname[1] == '\0' && dirname[0] == '\\') {
1127     alt_dirname[0] = _getdrive() + 'A' - 1;
1128     alt_dirname[1] = ':';
1129     alt_dirname[2] = '\\';
1130     alt_dirname[3] = '\0';
1131     dirname = alt_dirname;
1132   }
1133 
1134   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1135   if (dirp->path == 0) {
1136     free(dirp);
1137     errno = ENOMEM;
1138     return 0;
1139   }
1140   strcpy(dirp->path, dirname);
1141 
1142   fattr = GetFileAttributes(dirp->path);
1143   if (fattr == 0xffffffff) {
1144     free(dirp->path);
1145     free(dirp);
1146     errno = ENOENT;
1147     return 0;
1148   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1149     free(dirp->path);
1150     free(dirp);
1151     errno = ENOTDIR;
1152     return 0;
1153   }
1154 
1155   // Append "*.*", or possibly "\\*.*", to path
1156   if (dirp->path[1] == ':' &&
1157       (dirp->path[2] == '\0' ||
1158       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1159     // No '\\' needed for cases like "Z:" or "Z:\"
1160     strcat(dirp->path, "*.*");
1161   } else {
1162     strcat(dirp->path, "\\*.*");
1163   }
1164 
1165   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1166   if (dirp->handle == INVALID_HANDLE_VALUE) {
1167     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1168       free(dirp->path);
1169       free(dirp);
1170       errno = EACCES;
1171       return 0;
1172     }
1173   }
1174   return dirp;
1175 }
1176 
1177 struct dirent * os::readdir(DIR *dirp) {
1178   assert(dirp != NULL, "just checking");      // hotspot change
1179   if (dirp->handle == INVALID_HANDLE_VALUE) {
1180     return NULL;
1181   }
1182 
1183   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1184 
1185   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1186     if (GetLastError() == ERROR_INVALID_HANDLE) {
1187       errno = EBADF;
1188       return NULL;
1189     }
1190     FindClose(dirp->handle);
1191     dirp->handle = INVALID_HANDLE_VALUE;
1192   }
1193 
1194   return &dirp->dirent;
1195 }
1196 
1197 int os::closedir(DIR *dirp) {
1198   assert(dirp != NULL, "just checking");      // hotspot change
1199   if (dirp->handle != INVALID_HANDLE_VALUE) {
1200     if (!FindClose(dirp->handle)) {
1201       errno = EBADF;
1202       return -1;
1203     }
1204     dirp->handle = INVALID_HANDLE_VALUE;
1205   }
1206   free(dirp->path);
1207   free(dirp);
1208   return 0;
1209 }
1210 
1211 // This must be hard coded because it's the system's temporary
1212 // directory not the java application's temp directory, ala java.io.tmpdir.
1213 const char* os::get_temp_directory() {
1214   static char path_buf[MAX_PATH];
1215   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1216     return path_buf;
1217   } else {
1218     path_buf[0] = '\0';
1219     return path_buf;
1220   }
1221 }
1222 
1223 // Needs to be in os specific directory because windows requires another
1224 // header file <direct.h>
1225 const char* os::get_current_directory(char *buf, size_t buflen) {
1226   int n = static_cast<int>(buflen);
1227   if (buflen > INT_MAX)  n = INT_MAX;
1228   return _getcwd(buf, n);
1229 }
1230 
1231 //-----------------------------------------------------------
1232 // Helper functions for fatal error handler
1233 #ifdef _WIN64
1234 // Helper routine which returns true if address in
1235 // within the NTDLL address space.
1236 //
1237 static bool _addr_in_ntdll(address addr) {
1238   HMODULE hmod;
1239   MODULEINFO minfo;
1240 
1241   hmod = GetModuleHandle("NTDLL.DLL");
1242   if (hmod == NULL) return false;
1243   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1244                                           &minfo, sizeof(MODULEINFO))) {
1245     return false;
1246   }
1247 
1248   if ((addr >= minfo.lpBaseOfDll) &&
1249       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1250     return true;
1251   } else {
1252     return false;
1253   }
1254 }
1255 #endif
1256 
1257 struct _modinfo {
1258   address addr;
1259   char*   full_path;   // point to a char buffer
1260   int     buflen;      // size of the buffer
1261   address base_addr;
1262 };
1263 
1264 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1265                                   address top_address, void * param) {
1266   struct _modinfo *pmod = (struct _modinfo *)param;
1267   if (!pmod) return -1;
1268 
1269   if (base_addr   <= pmod->addr &&
1270       top_address > pmod->addr) {
1271     // if a buffer is provided, copy path name to the buffer
1272     if (pmod->full_path) {
1273       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1274     }
1275     pmod->base_addr = base_addr;
1276     return 1;
1277   }
1278   return 0;
1279 }
1280 
1281 bool os::dll_address_to_library_name(address addr, char* buf,
1282                                      int buflen, int* offset) {
1283   // buf is not optional, but offset is optional
1284   assert(buf != NULL, "sanity check");
1285 
1286 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1287 //       return the full path to the DLL file, sometimes it returns path
1288 //       to the corresponding PDB file (debug info); sometimes it only
1289 //       returns partial path, which makes life painful.
1290 
1291   struct _modinfo mi;
1292   mi.addr      = addr;
1293   mi.full_path = buf;
1294   mi.buflen    = buflen;
1295   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1296     // buf already contains path name
1297     if (offset) *offset = addr - mi.base_addr;
1298     return true;
1299   }
1300 
1301   buf[0] = '\0';
1302   if (offset) *offset = -1;
1303   return false;
1304 }
1305 
1306 bool os::dll_address_to_function_name(address addr, char *buf,
1307                                       int buflen, int *offset,
1308                                       bool demangle) {
1309   // buf is not optional, but offset is optional
1310   assert(buf != NULL, "sanity check");
1311 
1312   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1313     return true;
1314   }
1315   if (offset != NULL)  *offset  = -1;
1316   buf[0] = '\0';
1317   return false;
1318 }
1319 
1320 // save the start and end address of jvm.dll into param[0] and param[1]
1321 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1322                            address top_address, void * param) {
1323   if (!param) return -1;
1324 
1325   if (base_addr   <= (address)_locate_jvm_dll &&
1326       top_address > (address)_locate_jvm_dll) {
1327     ((address*)param)[0] = base_addr;
1328     ((address*)param)[1] = top_address;
1329     return 1;
1330   }
1331   return 0;
1332 }
1333 
1334 address vm_lib_location[2];    // start and end address of jvm.dll
1335 
1336 // check if addr is inside jvm.dll
1337 bool os::address_is_in_vm(address addr) {
1338   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1339     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1340       assert(false, "Can't find jvm module.");
1341       return false;
1342     }
1343   }
1344 
1345   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1346 }
1347 
1348 // print module info; param is outputStream*
1349 static int _print_module(const char* fname, address base_address,
1350                          address top_address, void* param) {
1351   if (!param) return -1;
1352 
1353   outputStream* st = (outputStream*)param;
1354 
1355   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1356   return 0;
1357 }
1358 
1359 // Loads .dll/.so and
1360 // in case of error it checks if .dll/.so was built for the
1361 // same architecture as Hotspot is running on
1362 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1363   log_info(os)("attempting shared library load of %s", name);
1364 
1365   void * result = LoadLibrary(name);
1366   if (result != NULL) {
1367     Events::log(NULL, "Loaded shared library %s", name);
1368     // Recalculate pdb search path if a DLL was loaded successfully.
1369     SymbolEngine::recalc_search_path();
1370     log_info(os)("shared library load of %s was successful", name);
1371     return result;
1372   }
1373   DWORD errcode = GetLastError();
1374   // Read system error message into ebuf
1375   // It may or may not be overwritten below (in the for loop and just above)
1376   lasterror(ebuf, (size_t) ebuflen);
1377   ebuf[ebuflen - 1] = '\0';
1378   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1379   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1380 
1381   if (errcode == ERROR_MOD_NOT_FOUND) {
1382     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1383     ebuf[ebuflen - 1] = '\0';
1384     return NULL;
1385   }
1386 
1387   // Parsing dll below
1388   // If we can read dll-info and find that dll was built
1389   // for an architecture other than Hotspot is running in
1390   // - then print to buffer "DLL was built for a different architecture"
1391   // else call os::lasterror to obtain system error message
1392   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1393   if (fd < 0) {
1394     return NULL;
1395   }
1396 
1397   uint32_t signature_offset;
1398   uint16_t lib_arch = 0;
1399   bool failed_to_get_lib_arch =
1400     ( // Go to position 3c in the dll
1401      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1402      ||
1403      // Read location of signature
1404      (sizeof(signature_offset) !=
1405      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1406      ||
1407      // Go to COFF File Header in dll
1408      // that is located after "signature" (4 bytes long)
1409      (os::seek_to_file_offset(fd,
1410      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1411      ||
1412      // Read field that contains code of architecture
1413      // that dll was built for
1414      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1415     );
1416 
1417   ::close(fd);
1418   if (failed_to_get_lib_arch) {
1419     // file i/o error - report os::lasterror(...) msg
1420     return NULL;
1421   }
1422 
1423   typedef struct {
1424     uint16_t arch_code;
1425     char* arch_name;
1426   } arch_t;
1427 
1428   static const arch_t arch_array[] = {
1429     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1430     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1431   };
1432 #if (defined _M_AMD64)
1433   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1434 #elif (defined _M_IX86)
1435   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1436 #else
1437   #error Method os::dll_load requires that one of following \
1438          is defined :_M_AMD64 or _M_IX86
1439 #endif
1440 
1441 
1442   // Obtain a string for printf operation
1443   // lib_arch_str shall contain string what platform this .dll was built for
1444   // running_arch_str shall string contain what platform Hotspot was built for
1445   char *running_arch_str = NULL, *lib_arch_str = NULL;
1446   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1447     if (lib_arch == arch_array[i].arch_code) {
1448       lib_arch_str = arch_array[i].arch_name;
1449     }
1450     if (running_arch == arch_array[i].arch_code) {
1451       running_arch_str = arch_array[i].arch_name;
1452     }
1453   }
1454 
1455   assert(running_arch_str,
1456          "Didn't find running architecture code in arch_array");
1457 
1458   // If the architecture is right
1459   // but some other error took place - report os::lasterror(...) msg
1460   if (lib_arch == running_arch) {
1461     return NULL;
1462   }
1463 
1464   if (lib_arch_str != NULL) {
1465     ::_snprintf(ebuf, ebuflen - 1,
1466                 "Can't load %s-bit .dll on a %s-bit platform",
1467                 lib_arch_str, running_arch_str);
1468   } else {
1469     // don't know what architecture this dll was build for
1470     ::_snprintf(ebuf, ebuflen - 1,
1471                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1472                 lib_arch, running_arch_str);
1473   }
1474 
1475   return NULL;
1476 }
1477 
1478 void os::print_dll_info(outputStream *st) {
1479   st->print_cr("Dynamic libraries:");
1480   get_loaded_modules_info(_print_module, (void *)st);
1481 }
1482 
1483 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1484   HANDLE   hProcess;
1485 
1486 # define MAX_NUM_MODULES 128
1487   HMODULE     modules[MAX_NUM_MODULES];
1488   static char filename[MAX_PATH];
1489   int         result = 0;
1490 
1491   int pid = os::current_process_id();
1492   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1493                          FALSE, pid);
1494   if (hProcess == NULL) return 0;
1495 
1496   DWORD size_needed;
1497   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1498     CloseHandle(hProcess);
1499     return 0;
1500   }
1501 
1502   // number of modules that are currently loaded
1503   int num_modules = size_needed / sizeof(HMODULE);
1504 
1505   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1506     // Get Full pathname:
1507     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1508       filename[0] = '\0';
1509     }
1510 
1511     MODULEINFO modinfo;
1512     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1513       modinfo.lpBaseOfDll = NULL;
1514       modinfo.SizeOfImage = 0;
1515     }
1516 
1517     // Invoke callback function
1518     result = callback(filename, (address)modinfo.lpBaseOfDll,
1519                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1520     if (result) break;
1521   }
1522 
1523   CloseHandle(hProcess);
1524   return result;
1525 }
1526 
1527 bool os::get_host_name(char* buf, size_t buflen) {
1528   DWORD size = (DWORD)buflen;
1529   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1530 }
1531 
1532 void os::get_summary_os_info(char* buf, size_t buflen) {
1533   stringStream sst(buf, buflen);
1534   os::win32::print_windows_version(&sst);
1535   // chop off newline character
1536   char* nl = strchr(buf, '\n');
1537   if (nl != NULL) *nl = '\0';
1538 }
1539 
1540 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1541 #if _MSC_VER >= 1900
1542   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1543   int result = ::vsnprintf(buf, len, fmt, args);
1544   // If an encoding error occurred (result < 0) then it's not clear
1545   // whether the buffer is NUL terminated, so ensure it is.
1546   if ((result < 0) && (len > 0)) {
1547     buf[len - 1] = '\0';
1548   }
1549   return result;
1550 #else
1551   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1552   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1553   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1554   // go straight to _vscprintf.  The output is going to be truncated in
1555   // that case, except in the unusual case of empty output.  More
1556   // importantly, the documentation for various versions of Visual Studio
1557   // are inconsistent about the behavior of _vsnprintf when len == 0,
1558   // including it possibly being an error.
1559   int result = -1;
1560   if (len > 0) {
1561     result = _vsnprintf(buf, len, fmt, args);
1562     // If output (including NUL terminator) is truncated, the buffer
1563     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1564     if ((result < 0) || ((size_t)result >= len)) {
1565       buf[len - 1] = '\0';
1566     }
1567   }
1568   if (result < 0) {
1569     result = _vscprintf(fmt, args);
1570   }
1571   return result;
1572 #endif // _MSC_VER dispatch
1573 }
1574 
1575 static inline time_t get_mtime(const char* filename) {
1576   struct stat st;
1577   int ret = os::stat(filename, &st);
1578   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1579   return st.st_mtime;
1580 }
1581 
1582 int os::compare_file_modified_times(const char* file1, const char* file2) {
1583   time_t t1 = get_mtime(file1);
1584   time_t t2 = get_mtime(file2);
1585   return t1 - t2;
1586 }
1587 
1588 void os::print_os_info_brief(outputStream* st) {
1589   os::print_os_info(st);
1590 }
1591 
1592 void os::win32::print_uptime_info(outputStream* st) {
1593   unsigned long long ticks = GetTickCount64();
1594   os::print_dhm(st, "OS uptime:", ticks/1000);
1595 }
1596 
1597 void os::print_os_info(outputStream* st) {
1598 #ifdef ASSERT
1599   char buffer[1024];
1600   st->print("HostName: ");
1601   if (get_host_name(buffer, sizeof(buffer))) {
1602     st->print("%s ", buffer);
1603   } else {
1604     st->print("N/A ");
1605   }
1606 #endif
1607   st->print_cr("OS:");
1608   os::win32::print_windows_version(st);
1609 
1610   os::win32::print_uptime_info(st);
1611 
1612 #ifdef _LP64
1613   VM_Version::print_platform_virtualization_info(st);
1614 #endif
1615 }
1616 
1617 void os::win32::print_windows_version(outputStream* st) {
1618   OSVERSIONINFOEX osvi;
1619   VS_FIXEDFILEINFO *file_info;
1620   TCHAR kernel32_path[MAX_PATH];
1621   UINT len, ret;
1622 
1623   // Use the GetVersionEx information to see if we're on a server or
1624   // workstation edition of Windows. Starting with Windows 8.1 we can't
1625   // trust the OS version information returned by this API.
1626   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1627   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1628   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1629     st->print_cr("Call to GetVersionEx failed");
1630     return;
1631   }
1632   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1633 
1634   // Get the full path to \Windows\System32\kernel32.dll and use that for
1635   // determining what version of Windows we're running on.
1636   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1637   ret = GetSystemDirectory(kernel32_path, len);
1638   if (ret == 0 || ret > len) {
1639     st->print_cr("Call to GetSystemDirectory failed");
1640     return;
1641   }
1642   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1643 
1644   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1645   if (version_size == 0) {
1646     st->print_cr("Call to GetFileVersionInfoSize failed");
1647     return;
1648   }
1649 
1650   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1651   if (version_info == NULL) {
1652     st->print_cr("Failed to allocate version_info");
1653     return;
1654   }
1655 
1656   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1657     os::free(version_info);
1658     st->print_cr("Call to GetFileVersionInfo failed");
1659     return;
1660   }
1661 
1662   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1663     os::free(version_info);
1664     st->print_cr("Call to VerQueryValue failed");
1665     return;
1666   }
1667 
1668   int major_version = HIWORD(file_info->dwProductVersionMS);
1669   int minor_version = LOWORD(file_info->dwProductVersionMS);
1670   int build_number = HIWORD(file_info->dwProductVersionLS);
1671   int build_minor = LOWORD(file_info->dwProductVersionLS);
1672   int os_vers = major_version * 1000 + minor_version;
1673   os::free(version_info);
1674 
1675   st->print(" Windows ");
1676   switch (os_vers) {
1677 
1678   case 6000:
1679     if (is_workstation) {
1680       st->print("Vista");
1681     } else {
1682       st->print("Server 2008");
1683     }
1684     break;
1685 
1686   case 6001:
1687     if (is_workstation) {
1688       st->print("7");
1689     } else {
1690       st->print("Server 2008 R2");
1691     }
1692     break;
1693 
1694   case 6002:
1695     if (is_workstation) {
1696       st->print("8");
1697     } else {
1698       st->print("Server 2012");
1699     }
1700     break;
1701 
1702   case 6003:
1703     if (is_workstation) {
1704       st->print("8.1");
1705     } else {
1706       st->print("Server 2012 R2");
1707     }
1708     break;
1709 
1710   case 10000:
1711     if (is_workstation) {
1712       st->print("10");
1713     } else {
1714       // distinguish Windows Server 2016 and 2019 by build number
1715       // Windows server 2019 GA 10/2018 build number is 17763
1716       if (build_number > 17762) {
1717         st->print("Server 2019");
1718       } else {
1719         st->print("Server 2016");
1720       }
1721     }
1722     break;
1723 
1724   default:
1725     // Unrecognized windows, print out its major and minor versions
1726     st->print("%d.%d", major_version, minor_version);
1727     break;
1728   }
1729 
1730   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1731   // find out whether we are running on 64 bit processor or not
1732   SYSTEM_INFO si;
1733   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1734   GetNativeSystemInfo(&si);
1735   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1736     st->print(" , 64 bit");
1737   }
1738 
1739   st->print(" Build %d", build_number);
1740   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1741   st->cr();
1742 }
1743 
1744 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1745   // Nothing to do for now.
1746 }
1747 
1748 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1749   HKEY key;
1750   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1751                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1752   if (status == ERROR_SUCCESS) {
1753     DWORD size = (DWORD)buflen;
1754     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1755     if (status != ERROR_SUCCESS) {
1756         strncpy(buf, "## __CPU__", buflen);
1757     }
1758     RegCloseKey(key);
1759   } else {
1760     // Put generic cpu info to return
1761     strncpy(buf, "## __CPU__", buflen);
1762   }
1763 }
1764 
1765 void os::print_memory_info(outputStream* st) {
1766   st->print("Memory:");
1767   st->print(" %dk page", os::vm_page_size()>>10);
1768 
1769   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1770   // value if total memory is larger than 4GB
1771   MEMORYSTATUSEX ms;
1772   ms.dwLength = sizeof(ms);
1773   int r1 = GlobalMemoryStatusEx(&ms);
1774 
1775   if (r1 != 0) {
1776     st->print(", system-wide physical " INT64_FORMAT "M ",
1777              (int64_t) ms.ullTotalPhys >> 20);
1778     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1779 
1780     st->print("TotalPageFile size " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPageFile >> 20);
1782     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1783              (int64_t) ms.ullAvailPageFile >> 20);
1784 
1785     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1786 #if defined(_M_IX86)
1787     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1788              (int64_t) ms.ullTotalVirtual >> 20);
1789     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1790 #endif
1791   } else {
1792     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1793   }
1794 
1795   // extended memory statistics for a process
1796   PROCESS_MEMORY_COUNTERS_EX pmex;
1797   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1798   pmex.cb = sizeof(pmex);
1799   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1800 
1801   if (r2 != 0) {
1802     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1803              (int64_t) pmex.WorkingSetSize >> 20);
1804     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1805 
1806     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.PrivateUsage >> 20);
1808     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1809   } else {
1810     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1811   }
1812 
1813   st->cr();
1814 }
1815 
1816 bool os::signal_sent_by_kill(const void* siginfo) {
1817   // TODO: Is this possible?
1818   return false;
1819 }
1820 
1821 void os::print_siginfo(outputStream *st, const void* siginfo) {
1822   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1823   st->print("siginfo:");
1824 
1825   char tmp[64];
1826   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1827     strcpy(tmp, "EXCEPTION_??");
1828   }
1829   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1830 
1831   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1832        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1833        er->NumberParameters >= 2) {
1834     switch (er->ExceptionInformation[0]) {
1835     case 0: st->print(", reading address"); break;
1836     case 1: st->print(", writing address"); break;
1837     case 8: st->print(", data execution prevention violation at address"); break;
1838     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1839                        er->ExceptionInformation[0]);
1840     }
1841     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1842   } else {
1843     int num = er->NumberParameters;
1844     if (num > 0) {
1845       st->print(", ExceptionInformation=");
1846       for (int i = 0; i < num; i++) {
1847         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1848       }
1849     }
1850   }
1851   st->cr();
1852 }
1853 
1854 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1855   // TODO: Can we kill thread?
1856   return false;
1857 }
1858 
1859 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1860   // do nothing
1861 }
1862 
1863 static char saved_jvm_path[MAX_PATH] = {0};
1864 
1865 // Find the full path to the current module, jvm.dll
1866 void os::jvm_path(char *buf, jint buflen) {
1867   // Error checking.
1868   if (buflen < MAX_PATH) {
1869     assert(false, "must use a large-enough buffer");
1870     buf[0] = '\0';
1871     return;
1872   }
1873   // Lazy resolve the path to current module.
1874   if (saved_jvm_path[0] != 0) {
1875     strcpy(buf, saved_jvm_path);
1876     return;
1877   }
1878 
1879   buf[0] = '\0';
1880   if (Arguments::sun_java_launcher_is_altjvm()) {
1881     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1882     // for a JAVA_HOME environment variable and fix up the path so it
1883     // looks like jvm.dll is installed there (append a fake suffix
1884     // hotspot/jvm.dll).
1885     char* java_home_var = ::getenv("JAVA_HOME");
1886     if (java_home_var != NULL && java_home_var[0] != 0 &&
1887         strlen(java_home_var) < (size_t)buflen) {
1888       strncpy(buf, java_home_var, buflen);
1889 
1890       // determine if this is a legacy image or modules image
1891       // modules image doesn't have "jre" subdirectory
1892       size_t len = strlen(buf);
1893       char* jrebin_p = buf + len;
1894       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1895       if (0 != _access(buf, 0)) {
1896         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1897       }
1898       len = strlen(buf);
1899       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1900     }
1901   }
1902 
1903   if (buf[0] == '\0') {
1904     GetModuleFileName(vm_lib_handle, buf, buflen);
1905   }
1906   strncpy(saved_jvm_path, buf, MAX_PATH);
1907   saved_jvm_path[MAX_PATH - 1] = '\0';
1908 }
1909 
1910 
1911 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1912 #ifndef _WIN64
1913   st->print("_");
1914 #endif
1915 }
1916 
1917 
1918 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1919 #ifndef _WIN64
1920   st->print("@%d", args_size  * sizeof(int));
1921 #endif
1922 }
1923 
1924 // This method is a copy of JDK's sysGetLastErrorString
1925 // from src/windows/hpi/src/system_md.c
1926 
1927 size_t os::lasterror(char* buf, size_t len) {
1928   DWORD errval;
1929 
1930   if ((errval = GetLastError()) != 0) {
1931     // DOS error
1932     size_t n = (size_t)FormatMessage(
1933                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1934                                      NULL,
1935                                      errval,
1936                                      0,
1937                                      buf,
1938                                      (DWORD)len,
1939                                      NULL);
1940     if (n > 3) {
1941       // Drop final '.', CR, LF
1942       if (buf[n - 1] == '\n') n--;
1943       if (buf[n - 1] == '\r') n--;
1944       if (buf[n - 1] == '.') n--;
1945       buf[n] = '\0';
1946     }
1947     return n;
1948   }
1949 
1950   if (errno != 0) {
1951     // C runtime error that has no corresponding DOS error code
1952     const char* s = os::strerror(errno);
1953     size_t n = strlen(s);
1954     if (n >= len) n = len - 1;
1955     strncpy(buf, s, n);
1956     buf[n] = '\0';
1957     return n;
1958   }
1959 
1960   return 0;
1961 }
1962 
1963 int os::get_last_error() {
1964   DWORD error = GetLastError();
1965   if (error == 0) {
1966     error = errno;
1967   }
1968   return (int)error;
1969 }
1970 
1971 // sun.misc.Signal
1972 // NOTE that this is a workaround for an apparent kernel bug where if
1973 // a signal handler for SIGBREAK is installed then that signal handler
1974 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1975 // See bug 4416763.
1976 static void (*sigbreakHandler)(int) = NULL;
1977 
1978 static void UserHandler(int sig, void *siginfo, void *context) {
1979   os::signal_notify(sig);
1980   // We need to reinstate the signal handler each time...
1981   os::signal(sig, (void*)UserHandler);
1982 }
1983 
1984 void* os::user_handler() {
1985   return (void*) UserHandler;
1986 }
1987 
1988 void* os::signal(int signal_number, void* handler) {
1989   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1990     void (*oldHandler)(int) = sigbreakHandler;
1991     sigbreakHandler = (void (*)(int)) handler;
1992     return (void*) oldHandler;
1993   } else {
1994     return (void*)::signal(signal_number, (void (*)(int))handler);
1995   }
1996 }
1997 
1998 void os::signal_raise(int signal_number) {
1999   raise(signal_number);
2000 }
2001 
2002 // The Win32 C runtime library maps all console control events other than ^C
2003 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2004 // logoff, and shutdown events.  We therefore install our own console handler
2005 // that raises SIGTERM for the latter cases.
2006 //
2007 static BOOL WINAPI consoleHandler(DWORD event) {
2008   switch (event) {
2009   case CTRL_C_EVENT:
2010     if (VMError::is_error_reported()) {
2011       // Ctrl-C is pressed during error reporting, likely because the error
2012       // handler fails to abort. Let VM die immediately.
2013       os::die();
2014     }
2015 
2016     os::signal_raise(SIGINT);
2017     return TRUE;
2018     break;
2019   case CTRL_BREAK_EVENT:
2020     if (sigbreakHandler != NULL) {
2021       (*sigbreakHandler)(SIGBREAK);
2022     }
2023     return TRUE;
2024     break;
2025   case CTRL_LOGOFF_EVENT: {
2026     // Don't terminate JVM if it is running in a non-interactive session,
2027     // such as a service process.
2028     USEROBJECTFLAGS flags;
2029     HANDLE handle = GetProcessWindowStation();
2030     if (handle != NULL &&
2031         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2032         sizeof(USEROBJECTFLAGS), NULL)) {
2033       // If it is a non-interactive session, let next handler to deal
2034       // with it.
2035       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2036         return FALSE;
2037       }
2038     }
2039   }
2040   case CTRL_CLOSE_EVENT:
2041   case CTRL_SHUTDOWN_EVENT:
2042     os::signal_raise(SIGTERM);
2043     return TRUE;
2044     break;
2045   default:
2046     break;
2047   }
2048   return FALSE;
2049 }
2050 
2051 // The following code is moved from os.cpp for making this
2052 // code platform specific, which it is by its very nature.
2053 
2054 // Return maximum OS signal used + 1 for internal use only
2055 // Used as exit signal for signal_thread
2056 int os::sigexitnum_pd() {
2057   return NSIG;
2058 }
2059 
2060 // a counter for each possible signal value, including signal_thread exit signal
2061 static volatile jint pending_signals[NSIG+1] = { 0 };
2062 static Semaphore* sig_sem = NULL;
2063 
2064 static void jdk_misc_signal_init() {
2065   // Initialize signal structures
2066   memset((void*)pending_signals, 0, sizeof(pending_signals));
2067 
2068   // Initialize signal semaphore
2069   sig_sem = new Semaphore();
2070 
2071   // Programs embedding the VM do not want it to attempt to receive
2072   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2073   // shutdown hooks mechanism introduced in 1.3.  For example, when
2074   // the VM is run as part of a Windows NT service (i.e., a servlet
2075   // engine in a web server), the correct behavior is for any console
2076   // control handler to return FALSE, not TRUE, because the OS's
2077   // "final" handler for such events allows the process to continue if
2078   // it is a service (while terminating it if it is not a service).
2079   // To make this behavior uniform and the mechanism simpler, we
2080   // completely disable the VM's usage of these console events if -Xrs
2081   // (=ReduceSignalUsage) is specified.  This means, for example, that
2082   // the CTRL-BREAK thread dump mechanism is also disabled in this
2083   // case.  See bugs 4323062, 4345157, and related bugs.
2084 
2085   // Add a CTRL-C handler
2086   SetConsoleCtrlHandler(consoleHandler, TRUE);
2087 }
2088 
2089 void os::signal_notify(int sig) {
2090   if (sig_sem != NULL) {
2091     Atomic::inc(&pending_signals[sig]);
2092     sig_sem->signal();
2093   } else {
2094     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2095     // initialization isn't called.
2096     assert(ReduceSignalUsage, "signal semaphore should be created");
2097   }
2098 }
2099 
2100 static int check_pending_signals() {
2101   while (true) {
2102     for (int i = 0; i < NSIG + 1; i++) {
2103       jint n = pending_signals[i];
2104       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2105         return i;
2106       }
2107     }
2108     JavaThread *thread = JavaThread::current();
2109 
2110     ThreadBlockInVM tbivm(thread);
2111 
2112     bool threadIsSuspended;
2113     do {
2114       thread->set_suspend_equivalent();
2115       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2116       sig_sem->wait();
2117 
2118       // were we externally suspended while we were waiting?
2119       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2120       if (threadIsSuspended) {
2121         // The semaphore has been incremented, but while we were waiting
2122         // another thread suspended us. We don't want to continue running
2123         // while suspended because that would surprise the thread that
2124         // suspended us.
2125         sig_sem->signal();
2126 
2127         thread->java_suspend_self();
2128       }
2129     } while (threadIsSuspended);
2130   }
2131 }
2132 
2133 int os::signal_wait() {
2134   return check_pending_signals();
2135 }
2136 
2137 // Implicit OS exception handling
2138 
2139 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2140                       address handler) {
2141   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2142   // Save pc in thread
2143 #ifdef _M_AMD64
2144   // Do not blow up if no thread info available.
2145   if (thread) {
2146     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2147   }
2148   // Set pc to handler
2149   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2150 #else
2151   // Do not blow up if no thread info available.
2152   if (thread) {
2153     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2154   }
2155   // Set pc to handler
2156   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2157 #endif
2158 
2159   // Continue the execution
2160   return EXCEPTION_CONTINUE_EXECUTION;
2161 }
2162 
2163 
2164 // Used for PostMortemDump
2165 extern "C" void safepoints();
2166 extern "C" void find(int x);
2167 extern "C" void events();
2168 
2169 // According to Windows API documentation, an illegal instruction sequence should generate
2170 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2171 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2172 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2173 
2174 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2175 
2176 // From "Execution Protection in the Windows Operating System" draft 0.35
2177 // Once a system header becomes available, the "real" define should be
2178 // included or copied here.
2179 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2180 
2181 // Windows Vista/2008 heap corruption check
2182 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2183 
2184 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2185 // C++ compiler contain this error code. Because this is a compiler-generated
2186 // error, the code is not listed in the Win32 API header files.
2187 // The code is actually a cryptic mnemonic device, with the initial "E"
2188 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2189 // ASCII values of "msc".
2190 
2191 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2192 
2193 #define def_excpt(val) { #val, (val) }
2194 
2195 static const struct { const char* name; uint number; } exceptlabels[] = {
2196     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2197     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2198     def_excpt(EXCEPTION_BREAKPOINT),
2199     def_excpt(EXCEPTION_SINGLE_STEP),
2200     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2201     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2202     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2203     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2204     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2205     def_excpt(EXCEPTION_FLT_OVERFLOW),
2206     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2207     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2208     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2209     def_excpt(EXCEPTION_INT_OVERFLOW),
2210     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2211     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2212     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2213     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2214     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2215     def_excpt(EXCEPTION_STACK_OVERFLOW),
2216     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2217     def_excpt(EXCEPTION_GUARD_PAGE),
2218     def_excpt(EXCEPTION_INVALID_HANDLE),
2219     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2220     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2221 };
2222 
2223 #undef def_excpt
2224 
2225 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2226   uint code = static_cast<uint>(exception_code);
2227   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2228     if (exceptlabels[i].number == code) {
2229       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2230       return buf;
2231     }
2232   }
2233 
2234   return NULL;
2235 }
2236 
2237 //-----------------------------------------------------------------------------
2238 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2239   // handle exception caused by idiv; should only happen for -MinInt/-1
2240   // (division by zero is handled explicitly)
2241 #ifdef  _M_AMD64
2242   PCONTEXT ctx = exceptionInfo->ContextRecord;
2243   address pc = (address)ctx->Rip;
2244   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2245   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2246   if (pc[0] == 0xF7) {
2247     // set correct result values and continue after idiv instruction
2248     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2249   } else {
2250     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2251   }
2252   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2253   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2254   // idiv opcode (0xF7).
2255   ctx->Rdx = (DWORD)0;             // remainder
2256   // Continue the execution
2257 #else
2258   PCONTEXT ctx = exceptionInfo->ContextRecord;
2259   address pc = (address)ctx->Eip;
2260   assert(pc[0] == 0xF7, "not an idiv opcode");
2261   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2262   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2263   // set correct result values and continue after idiv instruction
2264   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2265   ctx->Eax = (DWORD)min_jint;      // result
2266   ctx->Edx = (DWORD)0;             // remainder
2267   // Continue the execution
2268 #endif
2269   return EXCEPTION_CONTINUE_EXECUTION;
2270 }
2271 
2272 //-----------------------------------------------------------------------------
2273 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2274   PCONTEXT ctx = exceptionInfo->ContextRecord;
2275 #ifndef  _WIN64
2276   // handle exception caused by native method modifying control word
2277   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2278 
2279   switch (exception_code) {
2280   case EXCEPTION_FLT_DENORMAL_OPERAND:
2281   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2282   case EXCEPTION_FLT_INEXACT_RESULT:
2283   case EXCEPTION_FLT_INVALID_OPERATION:
2284   case EXCEPTION_FLT_OVERFLOW:
2285   case EXCEPTION_FLT_STACK_CHECK:
2286   case EXCEPTION_FLT_UNDERFLOW:
2287     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2288     if (fp_control_word != ctx->FloatSave.ControlWord) {
2289       // Restore FPCW and mask out FLT exceptions
2290       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2291       // Mask out pending FLT exceptions
2292       ctx->FloatSave.StatusWord &=  0xffffff00;
2293       return EXCEPTION_CONTINUE_EXECUTION;
2294     }
2295   }
2296 
2297   if (prev_uef_handler != NULL) {
2298     // We didn't handle this exception so pass it to the previous
2299     // UnhandledExceptionFilter.
2300     return (prev_uef_handler)(exceptionInfo);
2301   }
2302 #else // !_WIN64
2303   // On Windows, the mxcsr control bits are non-volatile across calls
2304   // See also CR 6192333
2305   //
2306   jint MxCsr = INITIAL_MXCSR;
2307   // we can't use StubRoutines::addr_mxcsr_std()
2308   // because in Win64 mxcsr is not saved there
2309   if (MxCsr != ctx->MxCsr) {
2310     ctx->MxCsr = MxCsr;
2311     return EXCEPTION_CONTINUE_EXECUTION;
2312   }
2313 #endif // !_WIN64
2314 
2315   return EXCEPTION_CONTINUE_SEARCH;
2316 }
2317 
2318 static inline void report_error(Thread* t, DWORD exception_code,
2319                                 address addr, void* siginfo, void* context) {
2320   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2321 
2322   // If UseOsErrorReporting, this will return here and save the error file
2323   // somewhere where we can find it in the minidump.
2324 }
2325 
2326 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2327         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2328   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2329   address addr = (address) exceptionRecord->ExceptionInformation[1];
2330   if (Interpreter::contains(pc)) {
2331     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2332     if (!fr->is_first_java_frame()) {
2333       // get_frame_at_stack_banging_point() is only called when we
2334       // have well defined stacks so java_sender() calls do not need
2335       // to assert safe_for_sender() first.
2336       *fr = fr->java_sender();
2337     }
2338   } else {
2339     // more complex code with compiled code
2340     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2341     CodeBlob* cb = CodeCache::find_blob(pc);
2342     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2343       // Not sure where the pc points to, fallback to default
2344       // stack overflow handling
2345       return false;
2346     } else {
2347       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2348       // in compiled code, the stack banging is performed just after the return pc
2349       // has been pushed on the stack
2350       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2351       if (!fr->is_java_frame()) {
2352         // See java_sender() comment above.
2353         *fr = fr->java_sender();
2354       }
2355     }
2356   }
2357   assert(fr->is_java_frame(), "Safety check");
2358   return true;
2359 }
2360 
2361 #if INCLUDE_AOT
2362 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2363   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2364   address addr = (address) exceptionRecord->ExceptionInformation[1];
2365   address pc = (address) exceptionInfo->ContextRecord->Rip;
2366 
2367   // Handle the case where we get an implicit exception in AOT generated
2368   // code.  AOT DLL's loaded are not registered for structured exceptions.
2369   // If the exception occurred in the codeCache or AOT code, pass control
2370   // to our normal exception handler.
2371   CodeBlob* cb = CodeCache::find_blob(pc);
2372   if (cb != NULL) {
2373     return topLevelExceptionFilter(exceptionInfo);
2374   }
2375 
2376   return EXCEPTION_CONTINUE_SEARCH;
2377 }
2378 #endif
2379 
2380 //-----------------------------------------------------------------------------
2381 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2382   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2383   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2384   DWORD exception_code = exception_record->ExceptionCode;
2385 #ifdef _M_AMD64
2386   address pc = (address) exceptionInfo->ContextRecord->Rip;
2387 #else
2388   address pc = (address) exceptionInfo->ContextRecord->Eip;
2389 #endif
2390   Thread* t = Thread::current_or_null_safe();
2391 
2392   // Handle SafeFetch32 and SafeFetchN exceptions.
2393   if (StubRoutines::is_safefetch_fault(pc)) {
2394     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2395   }
2396 
2397 #ifndef _WIN64
2398   // Execution protection violation - win32 running on AMD64 only
2399   // Handled first to avoid misdiagnosis as a "normal" access violation;
2400   // This is safe to do because we have a new/unique ExceptionInformation
2401   // code for this condition.
2402   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2403     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2404     address addr = (address) exception_record->ExceptionInformation[1];
2405 
2406     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2407       int page_size = os::vm_page_size();
2408 
2409       // Make sure the pc and the faulting address are sane.
2410       //
2411       // If an instruction spans a page boundary, and the page containing
2412       // the beginning of the instruction is executable but the following
2413       // page is not, the pc and the faulting address might be slightly
2414       // different - we still want to unguard the 2nd page in this case.
2415       //
2416       // 15 bytes seems to be a (very) safe value for max instruction size.
2417       bool pc_is_near_addr =
2418         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2419       bool instr_spans_page_boundary =
2420         (align_down((intptr_t) pc ^ (intptr_t) addr,
2421                          (intptr_t) page_size) > 0);
2422 
2423       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2424         static volatile address last_addr =
2425           (address) os::non_memory_address_word();
2426 
2427         // In conservative mode, don't unguard unless the address is in the VM
2428         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2429             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2430 
2431           // Set memory to RWX and retry
2432           address page_start = align_down(addr, page_size);
2433           bool res = os::protect_memory((char*) page_start, page_size,
2434                                         os::MEM_PROT_RWX);
2435 
2436           log_debug(os)("Execution protection violation "
2437                         "at " INTPTR_FORMAT
2438                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2439                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2440 
2441           // Set last_addr so if we fault again at the same address, we don't
2442           // end up in an endless loop.
2443           //
2444           // There are two potential complications here.  Two threads trapping
2445           // at the same address at the same time could cause one of the
2446           // threads to think it already unguarded, and abort the VM.  Likely
2447           // very rare.
2448           //
2449           // The other race involves two threads alternately trapping at
2450           // different addresses and failing to unguard the page, resulting in
2451           // an endless loop.  This condition is probably even more unlikely
2452           // than the first.
2453           //
2454           // Although both cases could be avoided by using locks or thread
2455           // local last_addr, these solutions are unnecessary complication:
2456           // this handler is a best-effort safety net, not a complete solution.
2457           // It is disabled by default and should only be used as a workaround
2458           // in case we missed any no-execute-unsafe VM code.
2459 
2460           last_addr = addr;
2461 
2462           return EXCEPTION_CONTINUE_EXECUTION;
2463         }
2464       }
2465 
2466       // Last unguard failed or not unguarding
2467       tty->print_raw_cr("Execution protection violation");
2468       report_error(t, exception_code, addr, exception_record,
2469                    exceptionInfo->ContextRecord);
2470       return EXCEPTION_CONTINUE_SEARCH;
2471     }
2472   }
2473 #endif // _WIN64
2474 
2475   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2476       VM_Version::is_cpuinfo_segv_addr(pc)) {
2477     // Verify that OS save/restore AVX registers.
2478     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2479   }
2480 
2481   if (t != NULL && t->is_Java_thread()) {
2482     JavaThread* thread = (JavaThread*) t;
2483     bool in_java = thread->thread_state() == _thread_in_Java;
2484     bool in_native = thread->thread_state() == _thread_in_native;
2485     bool in_vm = thread->thread_state() == _thread_in_vm;
2486 
2487     // Handle potential stack overflows up front.
2488     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2489       if (thread->stack_guards_enabled()) {
2490         if (in_java) {
2491           frame fr;
2492           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2493             assert(fr.is_java_frame(), "Must be a Java frame");
2494             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2495           }
2496         }
2497         // Yellow zone violation.  The o/s has unprotected the first yellow
2498         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2499         // update the enabled status, even if the zone contains only one page.
2500         assert(!in_vm, "Undersized StackShadowPages");
2501         thread->disable_stack_yellow_reserved_zone();
2502         // If not in java code, return and hope for the best.
2503         return in_java
2504             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2505             :  EXCEPTION_CONTINUE_EXECUTION;
2506       } else {
2507         // Fatal red zone violation.
2508         thread->disable_stack_red_zone();
2509         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2510         report_error(t, exception_code, pc, exception_record,
2511                       exceptionInfo->ContextRecord);
2512         return EXCEPTION_CONTINUE_SEARCH;
2513       }
2514     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2515       if (in_java) {
2516         // Either stack overflow or null pointer exception.
2517         address addr = (address) exception_record->ExceptionInformation[1];
2518         address stack_end = thread->stack_end();
2519         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2520           // Stack overflow.
2521           assert(!os::uses_stack_guard_pages(),
2522                  "should be caught by red zone code above.");
2523           return Handle_Exception(exceptionInfo,
2524                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2525         }
2526         // Check for safepoint polling and implicit null
2527         // We only expect null pointers in the stubs (vtable)
2528         // the rest are checked explicitly now.
2529         CodeBlob* cb = CodeCache::find_blob(pc);
2530         if (cb != NULL) {
2531           if (SafepointMechanism::is_poll_address(addr)) {
2532             address stub = SharedRuntime::get_poll_stub(pc);
2533             return Handle_Exception(exceptionInfo, stub);
2534           }
2535         }
2536 #ifdef _WIN64
2537         // If it's a legal stack address map the entire region in
2538         if (thread->is_in_usable_stack(addr)) {
2539           addr = (address)((uintptr_t)addr &
2540                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2541           os::commit_memory((char *)addr, thread->stack_base() - addr,
2542                             !ExecMem);
2543           return EXCEPTION_CONTINUE_EXECUTION;
2544         }
2545 #endif
2546         // Null pointer exception.
2547         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2548           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2549           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2550         }
2551         report_error(t, exception_code, pc, exception_record,
2552                       exceptionInfo->ContextRecord);
2553         return EXCEPTION_CONTINUE_SEARCH;
2554       }
2555 
2556 #ifdef _WIN64
2557       // Special care for fast JNI field accessors.
2558       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2559       // in and the heap gets shrunk before the field access.
2560       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2561       if (slowcase_pc != (address)-1) {
2562         return Handle_Exception(exceptionInfo, slowcase_pc);
2563       }
2564 #endif
2565 
2566       // Stack overflow or null pointer exception in native code.
2567       report_error(t, exception_code, pc, exception_record,
2568                    exceptionInfo->ContextRecord);
2569       return EXCEPTION_CONTINUE_SEARCH;
2570     } // /EXCEPTION_ACCESS_VIOLATION
2571     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2572 
2573     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2574       CompiledMethod* nm = NULL;
2575       JavaThread* thread = (JavaThread*)t;
2576       if (in_java) {
2577         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2578         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2579       }
2580 
2581       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2582       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2583           (nm != NULL && nm->has_unsafe_access())) {
2584         address next_pc =  Assembler::locate_next_instruction(pc);
2585         if (is_unsafe_arraycopy) {
2586           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2587         }
2588         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2589       }
2590     }
2591 
2592     if (in_java) {
2593       switch (exception_code) {
2594       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2595         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2596 
2597       case EXCEPTION_INT_OVERFLOW:
2598         return Handle_IDiv_Exception(exceptionInfo);
2599 
2600       } // switch
2601     }
2602     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2603       LONG result=Handle_FLT_Exception(exceptionInfo);
2604       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2605     }
2606   }
2607 
2608   if (exception_code != EXCEPTION_BREAKPOINT) {
2609     report_error(t, exception_code, pc, exception_record,
2610                  exceptionInfo->ContextRecord);
2611   }
2612   return EXCEPTION_CONTINUE_SEARCH;
2613 }
2614 
2615 #ifndef _WIN64
2616 // Special care for fast JNI accessors.
2617 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2618 // the heap gets shrunk before the field access.
2619 // Need to install our own structured exception handler since native code may
2620 // install its own.
2621 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2622   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2623   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2624     address pc = (address) exceptionInfo->ContextRecord->Eip;
2625     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2626     if (addr != (address)-1) {
2627       return Handle_Exception(exceptionInfo, addr);
2628     }
2629   }
2630   return EXCEPTION_CONTINUE_SEARCH;
2631 }
2632 
2633 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2634   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2635                                                      jobject obj,           \
2636                                                      jfieldID fieldID) {    \
2637     __try {                                                                 \
2638       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2639                                                                  obj,       \
2640                                                                  fieldID);  \
2641     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2642                                               _exception_info())) {         \
2643     }                                                                       \
2644     return 0;                                                               \
2645   }
2646 
2647 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2648 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2649 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2650 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2651 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2652 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2653 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2654 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2655 
2656 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2657   switch (type) {
2658   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2659   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2660   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2661   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2662   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2663   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2664   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2665   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2666   default:        ShouldNotReachHere();
2667   }
2668   return (address)-1;
2669 }
2670 #endif
2671 
2672 // Virtual Memory
2673 
2674 int os::vm_page_size() { return os::win32::vm_page_size(); }
2675 int os::vm_allocation_granularity() {
2676   return os::win32::vm_allocation_granularity();
2677 }
2678 
2679 // Windows large page support is available on Windows 2003. In order to use
2680 // large page memory, the administrator must first assign additional privilege
2681 // to the user:
2682 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2683 //   + select Local Policies -> User Rights Assignment
2684 //   + double click "Lock pages in memory", add users and/or groups
2685 //   + reboot
2686 // Note the above steps are needed for administrator as well, as administrators
2687 // by default do not have the privilege to lock pages in memory.
2688 //
2689 // Note about Windows 2003: although the API supports committing large page
2690 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2691 // scenario, I found through experiment it only uses large page if the entire
2692 // memory region is reserved and committed in a single VirtualAlloc() call.
2693 // This makes Windows large page support more or less like Solaris ISM, in
2694 // that the entire heap must be committed upfront. This probably will change
2695 // in the future, if so the code below needs to be revisited.
2696 
2697 #ifndef MEM_LARGE_PAGES
2698   #define MEM_LARGE_PAGES 0x20000000
2699 #endif
2700 
2701 #define VirtualFreeChecked(mem, size, type)                       \
2702   do {                                                            \
2703     bool ret = VirtualFree(mem, size, type);                      \
2704     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2705   } while (false)
2706 
2707 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2708 static const int gdi_tiny_bitmap_width_bytes = 4;
2709 
2710 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2711   // The documentation for CreateBitmap states a word-alignment requirement.
2712   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2713 
2714   // Some callers use this function to test if memory crossing separate memory
2715   // reservations can be used. Create a height of 2 to make sure that one pixel
2716   // ends up in the first reservation and the other in the second.
2717   int nHeight = 2;
2718 
2719   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2720 
2721   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2722   int nWidth = 1;
2723 
2724   // Calculate bit count - will be 32.
2725   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2726 
2727   return CreateBitmap(
2728       nWidth,
2729       nHeight,
2730       1,         // nPlanes
2731       nBitCount,
2732       mem);      // lpBits
2733 }
2734 
2735 // It has been found that some of the GDI functions fail under these two situations:
2736 //  1) When used with large pages
2737 //  2) When mem crosses the boundary between two separate memory reservations.
2738 //
2739 // This is a small test used to see if the current GDI implementation is
2740 // susceptible to any of these problems.
2741 static bool gdi_can_use_memory(void* mem) {
2742   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2743   if (bitmap != NULL) {
2744     DeleteObject(bitmap);
2745     return true;
2746   }
2747 
2748   // Verify that the bitmap could be created with a normal page.
2749   // If this fails, the testing method above isn't reliable.
2750 #ifdef ASSERT
2751   void* verify_mem = ::malloc(4 * 1024);
2752   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2753   if (verify_bitmap == NULL) {
2754     fatal("Couldn't create test bitmap with malloced memory");
2755   } else {
2756     DeleteObject(verify_bitmap);
2757   }
2758   ::free(verify_mem);
2759 #endif
2760 
2761   return false;
2762 }
2763 
2764 // Test if GDI functions work when memory spans
2765 // two adjacent memory reservations.
2766 static bool gdi_can_use_split_reservation_memory() {
2767   size_t granule = os::vm_allocation_granularity();
2768 
2769   // Find virtual memory range
2770   void* reserved = VirtualAlloc(NULL,
2771                                 granule * 2,
2772                                 MEM_RESERVE,
2773                                 PAGE_NOACCESS);
2774   if (reserved == NULL) {
2775     // Can't proceed with test - pessimistically report false
2776     return false;
2777   }
2778   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2779 
2780   void* res0 = reserved;
2781   void* res1 = (char*)reserved + granule;

2782 
2783   // Reserve and commit the first part
2784   void* mem0 = VirtualAlloc(res0,
2785                             granule,
2786                             MEM_RESERVE|MEM_COMMIT,
2787                             PAGE_READWRITE);
2788   if (mem0 != res0) {
2789     // Can't proceed with test - pessimistically report false
2790     return false;
2791   }
2792 
2793   // Reserve and commit the second part
2794   void* mem1 = VirtualAlloc(res1,
2795                             granule,
2796                             MEM_RESERVE|MEM_COMMIT,
2797                             PAGE_READWRITE);
2798   if (mem1 != res1) {
2799     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2800     // Can't proceed with test - pessimistically report false
2801     return false;
2802   }
2803 
2804   // Set the bitmap's bits to point one "width" bytes before, so that
2805   // the bitmap extends across the reservation boundary.
2806   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2807 
2808   bool success = gdi_can_use_memory(bitmapBits);
2809 
2810   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2811   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2812 
2813   return success;
2814 }
2815 
2816 // Container for NUMA node list info
2817 class NUMANodeListHolder {
2818  private:
2819   int *_numa_used_node_list;  // allocated below
2820   int _numa_used_node_count;
2821 
2822   void free_node_list() {
2823     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2824   }
2825 
2826  public:
2827   NUMANodeListHolder() {
2828     _numa_used_node_count = 0;
2829     _numa_used_node_list = NULL;
2830     // do rest of initialization in build routine (after function pointers are set up)
2831   }
2832 
2833   ~NUMANodeListHolder() {
2834     free_node_list();
2835   }
2836 
2837   bool build() {
2838     DWORD_PTR proc_aff_mask;
2839     DWORD_PTR sys_aff_mask;
2840     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2841     ULONG highest_node_number;
2842     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2843     free_node_list();
2844     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2845     for (unsigned int i = 0; i <= highest_node_number; i++) {
2846       ULONGLONG proc_mask_numa_node;
2847       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2848       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2849         _numa_used_node_list[_numa_used_node_count++] = i;
2850       }
2851     }
2852     return (_numa_used_node_count > 1);
2853   }
2854 
2855   int get_count() { return _numa_used_node_count; }
2856   int get_node_list_entry(int n) {
2857     // for indexes out of range, returns -1
2858     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2859   }
2860 
2861 } numa_node_list_holder;
2862 
2863 static size_t _large_page_size = 0;
2864 
2865 static bool request_lock_memory_privilege() {
2866   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2867                                 os::current_process_id());
2868 
2869   bool success = false;
2870   HANDLE hToken = NULL;
2871   LUID luid;
2872   if (hProcess != NULL &&
2873       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2874       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2875 
2876     TOKEN_PRIVILEGES tp;
2877     tp.PrivilegeCount = 1;
2878     tp.Privileges[0].Luid = luid;
2879     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2880 
2881     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2882     // privilege. Check GetLastError() too. See MSDN document.
2883     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2884         (GetLastError() == ERROR_SUCCESS)) {
2885       success = true;
2886     }
2887   }
2888 
2889   // Cleanup
2890   if (hProcess != NULL) {
2891     CloseHandle(hProcess);
2892   }
2893   if (hToken != NULL) {
2894     CloseHandle(hToken);
2895   }
2896 
2897   return success;
2898 }
2899 
2900 static bool numa_interleaving_init() {
2901   bool success = false;
2902 
2903   // print a warning if UseNUMAInterleaving flag is specified on command line
2904   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2905 
2906 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2907 
2908   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2909   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2910   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2911 
2912   if (!numa_node_list_holder.build()) {
2913     WARN("Process does not cover multiple NUMA nodes.");
2914     WARN("...Ignoring UseNUMAInterleaving flag.");
2915     return false;
2916   }
2917 
2918   if (!gdi_can_use_split_reservation_memory()) {
2919     WARN("Windows GDI cannot handle split reservations.");
2920     WARN("...Ignoring UseNUMAInterleaving flag.");
2921     return false;
2922   }
2923 
2924   if (log_is_enabled(Debug, os, cpu)) {
2925     Log(os, cpu) log;
2926     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2927     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2928       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2929     }
2930   }
2931 
2932 #undef WARN
2933 
2934   return true;
2935 }
2936 
2937 // this routine is used whenever we need to reserve a contiguous VA range
2938 // but we need to make separate VirtualAlloc calls for each piece of the range
2939 // Reasons for doing this:
2940 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2941 //  * UseNUMAInterleaving requires a separate node for each piece
2942 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2943                                          DWORD prot,
2944                                          bool should_inject_error = false) {
2945   char * p_buf;
2946   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2947   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2948   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2949 
2950   // first reserve enough address space in advance since we want to be
2951   // able to break a single contiguous virtual address range into multiple
2952   // large page commits but WS2003 does not allow reserving large page space
2953   // so we just use 4K pages for reserve, this gives us a legal contiguous
2954   // address space. then we will deallocate that reservation, and re alloc
2955   // using large pages
2956   const size_t size_of_reserve = bytes + chunk_size;
2957   if (bytes > size_of_reserve) {
2958     // Overflowed.
2959     return NULL;
2960   }
2961   p_buf = (char *) VirtualAlloc(addr,
2962                                 size_of_reserve,  // size of Reserve
2963                                 MEM_RESERVE,
2964                                 PAGE_READWRITE);
2965   // If reservation failed, return NULL
2966   if (p_buf == NULL) return NULL;
2967   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2968   os::release_memory(p_buf, bytes + chunk_size);
2969 
2970   // we still need to round up to a page boundary (in case we are using large pages)
2971   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2972   // instead we handle this in the bytes_to_rq computation below
2973   p_buf = align_up(p_buf, page_size);
2974 
2975   // now go through and allocate one chunk at a time until all bytes are
2976   // allocated
2977   size_t  bytes_remaining = bytes;
2978   // An overflow of align_up() would have been caught above
2979   // in the calculation of size_of_reserve.
2980   char * next_alloc_addr = p_buf;
2981   HANDLE hProc = GetCurrentProcess();
2982 
2983 #ifdef ASSERT
2984   // Variable for the failure injection
2985   int ran_num = os::random();
2986   size_t fail_after = ran_num % bytes;
2987 #endif
2988 
2989   int count=0;
2990   while (bytes_remaining) {
2991     // select bytes_to_rq to get to the next chunk_size boundary
2992 
2993     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2994     // Note allocate and commit
2995     char * p_new;
2996 
2997 #ifdef ASSERT
2998     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2999 #else
3000     const bool inject_error_now = false;
3001 #endif
3002 
3003     if (inject_error_now) {
3004       p_new = NULL;
3005     } else {
3006       if (!UseNUMAInterleaving) {
3007         p_new = (char *) VirtualAlloc(next_alloc_addr,
3008                                       bytes_to_rq,
3009                                       flags,
3010                                       prot);
3011       } else {
3012         // get the next node to use from the used_node_list
3013         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3014         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3015         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3016       }
3017     }
3018 
3019     if (p_new == NULL) {
3020       // Free any allocated pages
3021       if (next_alloc_addr > p_buf) {
3022         // Some memory was committed so release it.
3023         size_t bytes_to_release = bytes - bytes_remaining;
3024         // NMT has yet to record any individual blocks, so it
3025         // need to create a dummy 'reserve' record to match
3026         // the release.
3027         MemTracker::record_virtual_memory_reserve((address)p_buf,
3028                                                   bytes_to_release, CALLER_PC);
3029         os::release_memory(p_buf, bytes_to_release);
3030       }
3031 #ifdef ASSERT
3032       if (should_inject_error) {
3033         log_develop_debug(pagesize)("Reserving pages individually failed.");
3034       }
3035 #endif
3036       return NULL;
3037     }
3038 
3039     bytes_remaining -= bytes_to_rq;
3040     next_alloc_addr += bytes_to_rq;
3041     count++;
3042   }
3043   // Although the memory is allocated individually, it is returned as one.
3044   // NMT records it as one block.
3045   if ((flags & MEM_COMMIT) != 0) {
3046     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3047   } else {
3048     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3049   }
3050 
3051   // made it this far, success
3052   return p_buf;
3053 }
3054 
3055 static size_t large_page_init_decide_size() {
3056   // print a warning if any large page related flag is specified on command line
3057   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3058                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3059 
3060 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3061 
3062   if (!request_lock_memory_privilege()) {
3063     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3064     return 0;
3065   }
3066 
3067   size_t size = GetLargePageMinimum();
3068   if (size == 0) {
3069     WARN("Large page is not supported by the processor.");
3070     return 0;
3071   }
3072 
3073 #if defined(IA32) || defined(AMD64)
3074   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3075     WARN("JVM cannot use large pages bigger than 4mb.");
3076     return 0;
3077   }
3078 #endif
3079 
3080   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3081     size = LargePageSizeInBytes;
3082   }
3083 



















3084 #undef WARN
3085 
3086   return size;
3087 }
3088 
3089 void os::large_page_init() {
3090   if (!UseLargePages) {
3091     return;
3092   }
3093 
3094   _large_page_size = large_page_init_decide_size();
3095 
3096   const size_t default_page_size = (size_t) vm_page_size();
3097   if (_large_page_size > default_page_size) {
3098     _page_sizes[0] = _large_page_size;
3099     _page_sizes[1] = default_page_size;
3100     _page_sizes[2] = 0;
3101   }
3102 
3103   UseLargePages = _large_page_size != 0;










3104 }
3105 
3106 int os::create_file_for_heap(const char* dir) {
3107 
3108   const char name_template[] = "/jvmheap.XXXXXX";
3109 
3110   size_t fullname_len = strlen(dir) + strlen(name_template);
3111   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3112   if (fullname == NULL) {
3113     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3114     return -1;
3115   }
3116   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3117   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3118 
3119   os::native_path(fullname);
3120 
3121   char *path = _mktemp(fullname);
3122   if (path == NULL) {
3123     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3124     os::free(fullname);
3125     return -1;
3126   }
3127 
3128   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3129 
3130   os::free(fullname);
3131   if (fd < 0) {
3132     warning("Problem opening file for heap (%s)", os::strerror(errno));
3133     return -1;
3134   }
3135   return fd;
3136 }
3137 
3138 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3139 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3140   assert(fd != -1, "File descriptor is not valid");
3141 
3142   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3143 #ifdef _LP64
3144   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3145     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3146 #else
3147   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3148     0, (DWORD)size, NULL);
3149 #endif
3150   if (fileMapping == NULL) {
3151     if (GetLastError() == ERROR_DISK_FULL) {
3152       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3153     }
3154     else {
3155       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3156     }
3157 
3158     return NULL;
3159   }
3160 
3161   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3162 
3163   CloseHandle(fileMapping);
3164 
3165   return (char*)addr;
3166 }
3167 
3168 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3169   assert(fd != -1, "File descriptor is not valid");
3170   assert(base != NULL, "Base address cannot be NULL");
3171 
3172   release_memory(base, size);
3173   return map_memory_to_file(base, size, fd);
3174 }
3175 
3176 // On win32, one cannot release just a part of reserved memory, it's an
3177 // all or nothing deal.  When we split a reservation, we must break the
3178 // reservation into two reservations.
3179 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3180 
3181   char* const split_address = base + split;
3182   assert(size > 0, "Sanity");
3183   assert(size > split, "Sanity");
3184   assert(split > 0, "Sanity");
3185   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3186   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3187 
3188   release_memory(base, size);
3189   reserve_memory(split, base);
3190   reserve_memory(size - split, split_address);
3191 
3192   // NMT: nothing to do here. Since Windows implements the split by
3193   //  releasing and re-reserving memory, the parts are already registered
3194   //  as individual mappings with NMT.
3195 
3196 }
3197 
3198 // Multiple threads can race in this code but it's not possible to unmap small sections of
3199 // virtual space to get requested alignment, like posix-like os's.
3200 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3201 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3202   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3203          "Alignment must be a multiple of allocation granularity (page size)");
3204   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3205 
3206   size_t extra_size = size + alignment;
3207   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3208 
3209   char* aligned_base = NULL;
3210 
3211   do {
3212     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3213     if (extra_base == NULL) {
3214       return NULL;
3215     }
3216     // Do manual alignment
3217     aligned_base = align_up(extra_base, alignment);
3218 
3219     if (file_desc != -1) {
3220       os::unmap_memory(extra_base, extra_size);
3221     } else {
3222       os::release_memory(extra_base, extra_size);
3223     }
3224 
3225     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3226 
3227   } while (aligned_base == NULL);
3228 
3229   return aligned_base;
3230 }
3231 
3232 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3233   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3234          "reserve alignment");
3235   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3236   char* res;
3237   // note that if UseLargePages is on, all the areas that require interleaving
3238   // will go thru reserve_memory_special rather than thru here.
3239   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3240   if (!use_individual) {
3241     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3242   } else {
3243     elapsedTimer reserveTimer;
3244     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3245     // in numa interleaving, we have to allocate pages individually
3246     // (well really chunks of NUMAInterleaveGranularity size)
3247     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3248     if (res == NULL) {
3249       warning("NUMA page allocation failed");
3250     }
3251     if (Verbose && PrintMiscellaneous) {
3252       reserveTimer.stop();
3253       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3254                     reserveTimer.milliseconds(), reserveTimer.ticks());
3255     }
3256   }
3257   assert(res == NULL || addr == NULL || addr == res,
3258          "Unexpected address from reserve.");
3259 
3260   return res;
3261 }
3262 
3263 // Reserve memory at an arbitrary address, only if that area is
3264 // available (and not reserved for something else).
3265 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3266   // Windows os::reserve_memory() fails of the requested address range is
3267   // not avilable.
3268   return reserve_memory(bytes, requested_addr);
3269 }
3270 
3271 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3272   assert(file_desc >= 0, "file_desc is not valid");
3273   return map_memory_to_file(requested_addr, bytes, file_desc);
3274 }
3275 
3276 size_t os::large_page_size() {
3277   return _large_page_size;
3278 }
3279 
3280 bool os::can_commit_large_page_memory() {
3281   // Windows only uses large page memory when the entire region is reserved
3282   // and committed in a single VirtualAlloc() call. This may change in the
3283   // future, but with Windows 2003 it's not possible to commit on demand.
3284   return false;
3285 }
3286 
3287 bool os::can_execute_large_page_memory() {
3288   return true;
3289 }
3290 
3291 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3292                                     bool exec) {
3293   assert(UseLargePages, "only for large pages");
3294 
3295   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3296     return NULL; // Fallback to small pages.
3297   }
3298 
3299   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3300   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3301 
3302   // with large pages, there are two cases where we need to use Individual Allocation
3303   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3304   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3305   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3306     log_debug(pagesize)("Reserving large pages individually.");
3307 
3308     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3309     if (p_buf == NULL) {
3310       // give an appropriate warning message
3311       if (UseNUMAInterleaving) {
3312         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3313       }
3314       if (UseLargePagesIndividualAllocation) {
3315         warning("Individually allocated large pages failed, "
3316                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3317       }
3318       return NULL;
3319     }
3320 
3321     return p_buf;
3322 
3323   } else {
3324     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3325 
3326     // normal policy just allocate it all at once
3327     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3328     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3329 
3330     return res;
3331   }
3332 }
3333 
3334 bool os::pd_release_memory_special(char* base, size_t bytes) {
3335   assert(base != NULL, "Sanity check");
3336   return pd_release_memory(base, bytes);
3337 }
3338 
3339 void os::print_statistics() {
3340 }
3341 
3342 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3343   int err = os::get_last_error();
3344   char buf[256];
3345   size_t buf_len = os::lasterror(buf, sizeof(buf));
3346   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3347           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3348           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3349 }
3350 
3351 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3352   if (bytes == 0) {
3353     // Don't bother the OS with noops.
3354     return true;
3355   }
3356   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3357   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3358   // Don't attempt to print anything if the OS call fails. We're
3359   // probably low on resources, so the print itself may cause crashes.
3360 
3361   // unless we have NUMAInterleaving enabled, the range of a commit
3362   // is always within a reserve covered by a single VirtualAlloc
3363   // in that case we can just do a single commit for the requested size
3364   if (!UseNUMAInterleaving) {
3365     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3366       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3367       return false;
3368     }
3369     if (exec) {
3370       DWORD oldprot;
3371       // Windows doc says to use VirtualProtect to get execute permissions
3372       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3373         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3374         return false;
3375       }
3376     }
3377     return true;
3378   } else {
3379 
3380     // when NUMAInterleaving is enabled, the commit might cover a range that
3381     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3382     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3383     // returns represents the number of bytes that can be committed in one step.
3384     size_t bytes_remaining = bytes;
3385     char * next_alloc_addr = addr;
3386     while (bytes_remaining > 0) {
3387       MEMORY_BASIC_INFORMATION alloc_info;
3388       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3389       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3390       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3391                        PAGE_READWRITE) == NULL) {
3392         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3393                                             exec);)
3394         return false;
3395       }
3396       if (exec) {
3397         DWORD oldprot;
3398         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3399                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3400           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3401                                               exec);)
3402           return false;
3403         }
3404       }
3405       bytes_remaining -= bytes_to_rq;
3406       next_alloc_addr += bytes_to_rq;
3407     }
3408   }
3409   // if we made it this far, return true
3410   return true;
3411 }
3412 
3413 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3414                           bool exec) {
3415   // alignment_hint is ignored on this OS
3416   return pd_commit_memory(addr, size, exec);
3417 }
3418 
3419 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3420                                   const char* mesg) {
3421   assert(mesg != NULL, "mesg must be specified");
3422   if (!pd_commit_memory(addr, size, exec)) {
3423     warn_fail_commit_memory(addr, size, exec);
3424     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3425   }
3426 }
3427 
3428 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3429                                   size_t alignment_hint, bool exec,
3430                                   const char* mesg) {
3431   // alignment_hint is ignored on this OS
3432   pd_commit_memory_or_exit(addr, size, exec, mesg);
3433 }
3434 
3435 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3436   if (bytes == 0) {
3437     // Don't bother the OS with noops.
3438     return true;
3439   }
3440   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3441   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3442   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3443 }
3444 
3445 bool os::pd_release_memory(char* addr, size_t bytes) {
3446   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3447 }
3448 
3449 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3450   return os::commit_memory(addr, size, !ExecMem);
3451 }
3452 
3453 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3454   return os::uncommit_memory(addr, size);
3455 }
3456 
3457 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3458   uint count = 0;
3459   bool ret = false;
3460   size_t bytes_remaining = bytes;
3461   char * next_protect_addr = addr;
3462 
3463   // Use VirtualQuery() to get the chunk size.
3464   while (bytes_remaining) {
3465     MEMORY_BASIC_INFORMATION alloc_info;
3466     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3467       return false;
3468     }
3469 
3470     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3471     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3472     // but we don't distinguish here as both cases are protected by same API.
3473     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3474     warning("Failed protecting pages individually for chunk #%u", count);
3475     if (!ret) {
3476       return false;
3477     }
3478 
3479     bytes_remaining -= bytes_to_protect;
3480     next_protect_addr += bytes_to_protect;
3481     count++;
3482   }
3483   return ret;
3484 }
3485 
3486 // Set protections specified
3487 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3488                         bool is_committed) {
3489   unsigned int p = 0;
3490   switch (prot) {
3491   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3492   case MEM_PROT_READ: p = PAGE_READONLY; break;
3493   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3494   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3495   default:
3496     ShouldNotReachHere();
3497   }
3498 
3499   DWORD old_status;
3500 
3501   // Strange enough, but on Win32 one can change protection only for committed
3502   // memory, not a big deal anyway, as bytes less or equal than 64K
3503   if (!is_committed) {
3504     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3505                           "cannot commit protection page");
3506   }
3507   // One cannot use os::guard_memory() here, as on Win32 guard page
3508   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3509   //
3510   // Pages in the region become guard pages. Any attempt to access a guard page
3511   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3512   // the guard page status. Guard pages thus act as a one-time access alarm.
3513   bool ret;
3514   if (UseNUMAInterleaving) {
3515     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3516     // so we must protect the chunks individually.
3517     ret = protect_pages_individually(addr, bytes, p, &old_status);
3518   } else {
3519     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3520   }
3521 #ifdef ASSERT
3522   if (!ret) {
3523     int err = os::get_last_error();
3524     char buf[256];
3525     size_t buf_len = os::lasterror(buf, sizeof(buf));
3526     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3527           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3528           buf_len != 0 ? buf : "<no_error_string>", err);
3529   }
3530 #endif
3531   return ret;
3532 }
3533 
3534 bool os::guard_memory(char* addr, size_t bytes) {
3535   DWORD old_status;
3536   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3537 }
3538 
3539 bool os::unguard_memory(char* addr, size_t bytes) {
3540   DWORD old_status;
3541   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3542 }
3543 
3544 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3545 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3546 void os::numa_make_global(char *addr, size_t bytes)    { }
3547 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3548 bool os::numa_topology_changed()                       { return false; }
3549 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3550 int os::numa_get_group_id()                            { return 0; }
3551 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3552   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3553     // Provide an answer for UMA systems
3554     ids[0] = 0;
3555     return 1;
3556   } else {
3557     // check for size bigger than actual groups_num
3558     size = MIN2(size, numa_get_groups_num());
3559     for (int i = 0; i < (int)size; i++) {
3560       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3561     }
3562     return size;
3563   }
3564 }
3565 
3566 int os::numa_get_group_id_for_address(const void* address) {
3567   return 0;
3568 }
3569 
3570 bool os::get_page_info(char *start, page_info* info) {
3571   return false;
3572 }
3573 
3574 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3575                      page_info* page_found) {
3576   return end;
3577 }
3578 
3579 char* os::non_memory_address_word() {
3580   // Must never look like an address returned by reserve_memory,
3581   // even in its subfields (as defined by the CPU immediate fields,
3582   // if the CPU splits constants across multiple instructions).
3583   return (char*)-1;
3584 }
3585 
3586 #define MAX_ERROR_COUNT 100
3587 #define SYS_THREAD_ERROR 0xffffffffUL
3588 
3589 void os::pd_start_thread(Thread* thread) {
3590   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3591   // Returns previous suspend state:
3592   // 0:  Thread was not suspended
3593   // 1:  Thread is running now
3594   // >1: Thread is still suspended.
3595   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3596 }
3597 
3598 
3599 // Short sleep, direct OS call.
3600 //
3601 // ms = 0, means allow others (if any) to run.
3602 //
3603 void os::naked_short_sleep(jlong ms) {
3604   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3605   Sleep(ms);
3606 }
3607 
3608 // Windows does not provide sleep functionality with nanosecond resolution, so we
3609 // try to approximate this with spinning combined with yielding if another thread
3610 // is ready to run on the current processor.
3611 void os::naked_short_nanosleep(jlong ns) {
3612   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3613 
3614   int64_t start = os::javaTimeNanos();
3615   do {
3616     if (SwitchToThread() == 0) {
3617       // Nothing else is ready to run on this cpu, spin a little
3618       SpinPause();
3619     }
3620   } while (os::javaTimeNanos() - start < ns);
3621 }
3622 
3623 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3624 void os::infinite_sleep() {
3625   while (true) {    // sleep forever ...
3626     Sleep(100000);  // ... 100 seconds at a time
3627   }
3628 }
3629 
3630 typedef BOOL (WINAPI * STTSignature)(void);
3631 
3632 void os::naked_yield() {
3633   // Consider passing back the return value from SwitchToThread().
3634   SwitchToThread();
3635 }
3636 
3637 // Win32 only gives you access to seven real priorities at a time,
3638 // so we compress Java's ten down to seven.  It would be better
3639 // if we dynamically adjusted relative priorities.
3640 
3641 int os::java_to_os_priority[CriticalPriority + 1] = {
3642   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3643   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3644   THREAD_PRIORITY_LOWEST,                       // 2
3645   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3646   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3647   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3648   THREAD_PRIORITY_NORMAL,                       // 6
3649   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3650   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3651   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3652   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3653   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3654 };
3655 
3656 int prio_policy1[CriticalPriority + 1] = {
3657   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3658   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3659   THREAD_PRIORITY_LOWEST,                       // 2
3660   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3661   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3662   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3663   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3664   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3665   THREAD_PRIORITY_HIGHEST,                      // 8
3666   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3667   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3668   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3669 };
3670 
3671 static int prio_init() {
3672   // If ThreadPriorityPolicy is 1, switch tables
3673   if (ThreadPriorityPolicy == 1) {
3674     int i;
3675     for (i = 0; i < CriticalPriority + 1; i++) {
3676       os::java_to_os_priority[i] = prio_policy1[i];
3677     }
3678   }
3679   if (UseCriticalJavaThreadPriority) {
3680     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3681   }
3682   return 0;
3683 }
3684 
3685 OSReturn os::set_native_priority(Thread* thread, int priority) {
3686   if (!UseThreadPriorities) return OS_OK;
3687   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3688   return ret ? OS_OK : OS_ERR;
3689 }
3690 
3691 OSReturn os::get_native_priority(const Thread* const thread,
3692                                  int* priority_ptr) {
3693   if (!UseThreadPriorities) {
3694     *priority_ptr = java_to_os_priority[NormPriority];
3695     return OS_OK;
3696   }
3697   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3698   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3699     assert(false, "GetThreadPriority failed");
3700     return OS_ERR;
3701   }
3702   *priority_ptr = os_prio;
3703   return OS_OK;
3704 }
3705 
3706 // GetCurrentThreadId() returns DWORD
3707 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3708 
3709 static int _initial_pid = 0;
3710 
3711 int os::current_process_id() {
3712   return (_initial_pid ? _initial_pid : _getpid());
3713 }
3714 
3715 int    os::win32::_vm_page_size              = 0;
3716 int    os::win32::_vm_allocation_granularity = 0;
3717 int    os::win32::_processor_type            = 0;
3718 // Processor level is not available on non-NT systems, use vm_version instead
3719 int    os::win32::_processor_level           = 0;
3720 julong os::win32::_physical_memory           = 0;
3721 size_t os::win32::_default_stack_size        = 0;
3722 
3723 intx          os::win32::_os_thread_limit    = 0;
3724 volatile intx os::win32::_os_thread_count    = 0;
3725 
3726 bool   os::win32::_is_windows_server         = false;
3727 
3728 // 6573254
3729 // Currently, the bug is observed across all the supported Windows releases,
3730 // including the latest one (as of this writing - Windows Server 2012 R2)
3731 bool   os::win32::_has_exit_bug              = true;
3732 
3733 void os::win32::initialize_system_info() {
3734   SYSTEM_INFO si;
3735   GetSystemInfo(&si);
3736   _vm_page_size    = si.dwPageSize;
3737   _vm_allocation_granularity = si.dwAllocationGranularity;
3738   _processor_type  = si.dwProcessorType;
3739   _processor_level = si.wProcessorLevel;
3740   set_processor_count(si.dwNumberOfProcessors);
3741 
3742   MEMORYSTATUSEX ms;
3743   ms.dwLength = sizeof(ms);
3744 
3745   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3746   // dwMemoryLoad (% of memory in use)
3747   GlobalMemoryStatusEx(&ms);
3748   _physical_memory = ms.ullTotalPhys;
3749 
3750   if (FLAG_IS_DEFAULT(MaxRAM)) {
3751     // Adjust MaxRAM according to the maximum virtual address space available.
3752     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3753   }
3754 
3755   OSVERSIONINFOEX oi;
3756   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3757   GetVersionEx((OSVERSIONINFO*)&oi);
3758   switch (oi.dwPlatformId) {
3759   case VER_PLATFORM_WIN32_NT:
3760     {
3761       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3762       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3763           oi.wProductType == VER_NT_SERVER) {
3764         _is_windows_server = true;
3765       }
3766     }
3767     break;
3768   default: fatal("Unknown platform");
3769   }
3770 
3771   _default_stack_size = os::current_stack_size();
3772   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3773   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3774          "stack size not a multiple of page size");
3775 
3776   initialize_performance_counter();
3777 }
3778 
3779 
3780 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3781                                       int ebuflen) {
3782   char path[MAX_PATH];
3783   DWORD size;
3784   DWORD pathLen = (DWORD)sizeof(path);
3785   HINSTANCE result = NULL;
3786 
3787   // only allow library name without path component
3788   assert(strchr(name, '\\') == NULL, "path not allowed");
3789   assert(strchr(name, ':') == NULL, "path not allowed");
3790   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3791     jio_snprintf(ebuf, ebuflen,
3792                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3793     return NULL;
3794   }
3795 
3796   // search system directory
3797   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3798     if (size >= pathLen) {
3799       return NULL; // truncated
3800     }
3801     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3802       return NULL; // truncated
3803     }
3804     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3805       return result;
3806     }
3807   }
3808 
3809   // try Windows directory
3810   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3811     if (size >= pathLen) {
3812       return NULL; // truncated
3813     }
3814     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3815       return NULL; // truncated
3816     }
3817     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3818       return result;
3819     }
3820   }
3821 
3822   jio_snprintf(ebuf, ebuflen,
3823                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3824   return NULL;
3825 }
3826 
3827 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3828 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3829 
3830 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3831   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3832   return TRUE;
3833 }
3834 
3835 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3836   // Basic approach:
3837   //  - Each exiting thread registers its intent to exit and then does so.
3838   //  - A thread trying to terminate the process must wait for all
3839   //    threads currently exiting to complete their exit.
3840 
3841   if (os::win32::has_exit_bug()) {
3842     // The array holds handles of the threads that have started exiting by calling
3843     // _endthreadex().
3844     // Should be large enough to avoid blocking the exiting thread due to lack of
3845     // a free slot.
3846     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3847     static int handle_count = 0;
3848 
3849     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3850     static CRITICAL_SECTION crit_sect;
3851     static volatile DWORD process_exiting = 0;
3852     int i, j;
3853     DWORD res;
3854     HANDLE hproc, hthr;
3855 
3856     // We only attempt to register threads until a process exiting
3857     // thread manages to set the process_exiting flag. Any threads
3858     // that come through here after the process_exiting flag is set
3859     // are unregistered and will be caught in the SuspendThread()
3860     // infinite loop below.
3861     bool registered = false;
3862 
3863     // The first thread that reached this point, initializes the critical section.
3864     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3865       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3866     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3867       if (what != EPT_THREAD) {
3868         // Atomically set process_exiting before the critical section
3869         // to increase the visibility between racing threads.
3870         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3871       }
3872       EnterCriticalSection(&crit_sect);
3873 
3874       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3875         // Remove from the array those handles of the threads that have completed exiting.
3876         for (i = 0, j = 0; i < handle_count; ++i) {
3877           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3878           if (res == WAIT_TIMEOUT) {
3879             handles[j++] = handles[i];
3880           } else {
3881             if (res == WAIT_FAILED) {
3882               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3883                       GetLastError(), __FILE__, __LINE__);
3884             }
3885             // Don't keep the handle, if we failed waiting for it.
3886             CloseHandle(handles[i]);
3887           }
3888         }
3889 
3890         // If there's no free slot in the array of the kept handles, we'll have to
3891         // wait until at least one thread completes exiting.
3892         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3893           // Raise the priority of the oldest exiting thread to increase its chances
3894           // to complete sooner.
3895           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3896           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3897           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3898             i = (res - WAIT_OBJECT_0);
3899             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3900             for (; i < handle_count; ++i) {
3901               handles[i] = handles[i + 1];
3902             }
3903           } else {
3904             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3905                     (res == WAIT_FAILED ? "failed" : "timed out"),
3906                     GetLastError(), __FILE__, __LINE__);
3907             // Don't keep handles, if we failed waiting for them.
3908             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3909               CloseHandle(handles[i]);
3910             }
3911             handle_count = 0;
3912           }
3913         }
3914 
3915         // Store a duplicate of the current thread handle in the array of handles.
3916         hproc = GetCurrentProcess();
3917         hthr = GetCurrentThread();
3918         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3919                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3920           warning("DuplicateHandle failed (%u) in %s: %d\n",
3921                   GetLastError(), __FILE__, __LINE__);
3922 
3923           // We can't register this thread (no more handles) so this thread
3924           // may be racing with a thread that is calling exit(). If the thread
3925           // that is calling exit() has managed to set the process_exiting
3926           // flag, then this thread will be caught in the SuspendThread()
3927           // infinite loop below which closes that race. A small timing
3928           // window remains before the process_exiting flag is set, but it
3929           // is only exposed when we are out of handles.
3930         } else {
3931           ++handle_count;
3932           registered = true;
3933 
3934           // The current exiting thread has stored its handle in the array, and now
3935           // should leave the critical section before calling _endthreadex().
3936         }
3937 
3938       } else if (what != EPT_THREAD && handle_count > 0) {
3939         jlong start_time, finish_time, timeout_left;
3940         // Before ending the process, make sure all the threads that had called
3941         // _endthreadex() completed.
3942 
3943         // Set the priority level of the current thread to the same value as
3944         // the priority level of exiting threads.
3945         // This is to ensure it will be given a fair chance to execute if
3946         // the timeout expires.
3947         hthr = GetCurrentThread();
3948         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3949         start_time = os::javaTimeNanos();
3950         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3951         for (i = 0; ; ) {
3952           int portion_count = handle_count - i;
3953           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3954             portion_count = MAXIMUM_WAIT_OBJECTS;
3955           }
3956           for (j = 0; j < portion_count; ++j) {
3957             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3958           }
3959           timeout_left = (finish_time - start_time) / 1000000L;
3960           if (timeout_left < 0) {
3961             timeout_left = 0;
3962           }
3963           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3964           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3965             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3966                     (res == WAIT_FAILED ? "failed" : "timed out"),
3967                     GetLastError(), __FILE__, __LINE__);
3968             // Reset portion_count so we close the remaining
3969             // handles due to this error.
3970             portion_count = handle_count - i;
3971           }
3972           for (j = 0; j < portion_count; ++j) {
3973             CloseHandle(handles[i + j]);
3974           }
3975           if ((i += portion_count) >= handle_count) {
3976             break;
3977           }
3978           start_time = os::javaTimeNanos();
3979         }
3980         handle_count = 0;
3981       }
3982 
3983       LeaveCriticalSection(&crit_sect);
3984     }
3985 
3986     if (!registered &&
3987         Atomic::load_acquire(&process_exiting) != 0 &&
3988         process_exiting != GetCurrentThreadId()) {
3989       // Some other thread is about to call exit(), so we don't let
3990       // the current unregistered thread proceed to exit() or _endthreadex()
3991       while (true) {
3992         SuspendThread(GetCurrentThread());
3993         // Avoid busy-wait loop, if SuspendThread() failed.
3994         Sleep(EXIT_TIMEOUT);
3995       }
3996     }
3997   }
3998 
3999   // We are here if either
4000   // - there's no 'race at exit' bug on this OS release;
4001   // - initialization of the critical section failed (unlikely);
4002   // - the current thread has registered itself and left the critical section;
4003   // - the process-exiting thread has raised the flag and left the critical section.
4004   if (what == EPT_THREAD) {
4005     _endthreadex((unsigned)exit_code);
4006   } else if (what == EPT_PROCESS) {
4007     ::exit(exit_code);
4008   } else {
4009     _exit(exit_code);
4010   }
4011 
4012   // Should not reach here
4013   return exit_code;
4014 }
4015 
4016 #undef EXIT_TIMEOUT
4017 
4018 void os::win32::setmode_streams() {
4019   _setmode(_fileno(stdin), _O_BINARY);
4020   _setmode(_fileno(stdout), _O_BINARY);
4021   _setmode(_fileno(stderr), _O_BINARY);
4022 }
4023 
4024 void os::wait_for_keypress_at_exit(void) {
4025   if (PauseAtExit) {
4026     fprintf(stderr, "Press any key to continue...\n");
4027     fgetc(stdin);
4028   }
4029 }
4030 
4031 
4032 bool os::message_box(const char* title, const char* message) {
4033   int result = MessageBox(NULL, message, title,
4034                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4035   return result == IDYES;
4036 }
4037 
4038 #ifndef PRODUCT
4039 #ifndef _WIN64
4040 // Helpers to check whether NX protection is enabled
4041 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4042   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4043       pex->ExceptionRecord->NumberParameters > 0 &&
4044       pex->ExceptionRecord->ExceptionInformation[0] ==
4045       EXCEPTION_INFO_EXEC_VIOLATION) {
4046     return EXCEPTION_EXECUTE_HANDLER;
4047   }
4048   return EXCEPTION_CONTINUE_SEARCH;
4049 }
4050 
4051 void nx_check_protection() {
4052   // If NX is enabled we'll get an exception calling into code on the stack
4053   char code[] = { (char)0xC3 }; // ret
4054   void *code_ptr = (void *)code;
4055   __try {
4056     __asm call code_ptr
4057   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4058     tty->print_raw_cr("NX protection detected.");
4059   }
4060 }
4061 #endif // _WIN64
4062 #endif // PRODUCT
4063 
4064 // This is called _before_ the global arguments have been parsed
4065 void os::init(void) {
4066   _initial_pid = _getpid();
4067 
4068   init_random(1234567);
4069 
4070   win32::initialize_system_info();
4071   win32::setmode_streams();
4072   init_page_sizes((size_t) win32::vm_page_size());
4073 
4074   // This may be overridden later when argument processing is done.
4075   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4076 
4077   // Initialize main_process and main_thread
4078   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4079   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4080                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4081     fatal("DuplicateHandle failed\n");
4082   }
4083   main_thread_id = (int) GetCurrentThreadId();
4084 
4085   // initialize fast thread access - only used for 32-bit
4086   win32::initialize_thread_ptr_offset();
4087 }
4088 
4089 // To install functions for atexit processing
4090 extern "C" {
4091   static void perfMemory_exit_helper() {
4092     perfMemory_exit();
4093   }
4094 }
4095 
4096 static jint initSock();
4097 
4098 // this is called _after_ the global arguments have been parsed
4099 jint os::init_2(void) {
4100 
4101   // This could be set any time but all platforms
4102   // have to set it the same so we have to mirror Solaris.
4103   DEBUG_ONLY(os::set_mutex_init_done();)
4104 
4105   // Setup Windows Exceptions
4106 
4107 #if INCLUDE_AOT
4108   // If AOT is enabled we need to install a vectored exception handler
4109   // in order to forward implicit exceptions from code in AOT
4110   // generated DLLs.  This is necessary since these DLLs are not
4111   // registered for structured exceptions like codecache methods are.
4112   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4113     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4114   }
4115 #endif
4116 
4117   // for debugging float code generation bugs
4118   if (ForceFloatExceptions) {
4119 #ifndef  _WIN64
4120     static long fp_control_word = 0;
4121     __asm { fstcw fp_control_word }
4122     // see Intel PPro Manual, Vol. 2, p 7-16
4123     const long precision = 0x20;
4124     const long underflow = 0x10;
4125     const long overflow  = 0x08;
4126     const long zero_div  = 0x04;
4127     const long denorm    = 0x02;
4128     const long invalid   = 0x01;
4129     fp_control_word |= invalid;
4130     __asm { fldcw fp_control_word }
4131 #endif
4132   }
4133 
4134   // If stack_commit_size is 0, windows will reserve the default size,
4135   // but only commit a small portion of it.
4136   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4137   size_t default_reserve_size = os::win32::default_stack_size();
4138   size_t actual_reserve_size = stack_commit_size;
4139   if (stack_commit_size < default_reserve_size) {
4140     // If stack_commit_size == 0, we want this too
4141     actual_reserve_size = default_reserve_size;
4142   }
4143 
4144   // Check minimum allowable stack size for thread creation and to initialize
4145   // the java system classes, including StackOverflowError - depends on page
4146   // size.  Add two 4K pages for compiler2 recursion in main thread.
4147   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4148   // class initialization depending on 32 or 64 bit VM.
4149   size_t min_stack_allowed =
4150             (size_t)(JavaThread::stack_guard_zone_size() +
4151                      JavaThread::stack_shadow_zone_size() +
4152                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4153 
4154   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4155 
4156   if (actual_reserve_size < min_stack_allowed) {
4157     tty->print_cr("\nThe Java thread stack size specified is too small. "
4158                   "Specify at least %dk",
4159                   min_stack_allowed / K);
4160     return JNI_ERR;
4161   }
4162 
4163   JavaThread::set_stack_size_at_create(stack_commit_size);
4164 
4165   // Calculate theoretical max. size of Threads to guard gainst artifical
4166   // out-of-memory situations, where all available address-space has been
4167   // reserved by thread stacks.
4168   assert(actual_reserve_size != 0, "Must have a stack");
4169 
4170   // Calculate the thread limit when we should start doing Virtual Memory
4171   // banging. Currently when the threads will have used all but 200Mb of space.
4172   //
4173   // TODO: consider performing a similar calculation for commit size instead
4174   // as reserve size, since on a 64-bit platform we'll run into that more
4175   // often than running out of virtual memory space.  We can use the
4176   // lower value of the two calculations as the os_thread_limit.
4177   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4178   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4179 
4180   // at exit methods are called in the reverse order of their registration.
4181   // there is no limit to the number of functions registered. atexit does
4182   // not set errno.
4183 
4184   if (PerfAllowAtExitRegistration) {
4185     // only register atexit functions if PerfAllowAtExitRegistration is set.
4186     // atexit functions can be delayed until process exit time, which
4187     // can be problematic for embedded VM situations. Embedded VMs should
4188     // call DestroyJavaVM() to assure that VM resources are released.
4189 
4190     // note: perfMemory_exit_helper atexit function may be removed in
4191     // the future if the appropriate cleanup code can be added to the
4192     // VM_Exit VMOperation's doit method.
4193     if (atexit(perfMemory_exit_helper) != 0) {
4194       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4195     }
4196   }
4197 
4198 #ifndef _WIN64
4199   // Print something if NX is enabled (win32 on AMD64)
4200   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4201 #endif
4202 
4203   // initialize thread priority policy
4204   prio_init();
4205 
4206   UseNUMA = false; // We don't fully support this yet
4207 
4208   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4209     if (!numa_interleaving_init()) {
4210       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4211     } else if (!UseNUMAInterleaving) {
4212       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4213       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4214     }
4215   }
4216 
4217   if (initSock() != JNI_OK) {
4218     return JNI_ERR;
4219   }
4220 
4221   SymbolEngine::recalc_search_path();
4222 
4223   // Initialize data for jdk.internal.misc.Signal
4224   if (!ReduceSignalUsage) {
4225     jdk_misc_signal_init();
4226   }
4227 
4228   return JNI_OK;
4229 }
4230 
4231 // combine the high and low DWORD into a ULONGLONG
4232 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4233   ULONGLONG value = high_word;
4234   value <<= sizeof(high_word) * 8;
4235   value |= low_word;
4236   return value;
4237 }
4238 
4239 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4240 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4241   ::memset((void*)sbuf, 0, sizeof(struct stat));
4242   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4243   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4244                                   file_data.ftLastWriteTime.dwLowDateTime);
4245   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4246                                   file_data.ftCreationTime.dwLowDateTime);
4247   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4248                                   file_data.ftLastAccessTime.dwLowDateTime);
4249   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4250     sbuf->st_mode |= S_IFDIR;
4251   } else {
4252     sbuf->st_mode |= S_IFREG;
4253   }
4254 }
4255 
4256 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4257   // Get required buffer size to convert to Unicode
4258   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4259                                              MB_ERR_INVALID_CHARS,
4260                                              char_path, -1,
4261                                              NULL, 0);
4262   if (unicode_path_len == 0) {
4263     return EINVAL;
4264   }
4265 
4266   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4267 
4268   int result = MultiByteToWideChar(CP_ACP,
4269                                    MB_ERR_INVALID_CHARS,
4270                                    char_path, -1,
4271                                    *unicode_path, unicode_path_len);
4272   assert(result == unicode_path_len, "length already checked above");
4273 
4274   return ERROR_SUCCESS;
4275 }
4276 
4277 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4278   // Get required buffer size to convert to full path. The return
4279   // value INCLUDES the terminating null character.
4280   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4281   if (full_path_len == 0) {
4282     return EINVAL;
4283   }
4284 
4285   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4286 
4287   // When the buffer has sufficient size, the return value EXCLUDES the
4288   // terminating null character
4289   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4290   assert(result <= full_path_len, "length already checked above");
4291 
4292   return ERROR_SUCCESS;
4293 }
4294 
4295 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4296   *prefix_off = 0;
4297   *needs_fullpath = true;
4298 
4299   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4300     *prefix = L"\\\\?\\";
4301   } else if (buf[0] == '\\' && buf[1] == '\\') {
4302     if (buf[2] == '?' && buf[3] == '\\') {
4303       *prefix = L"";
4304       *needs_fullpath = false;
4305     } else {
4306       *prefix = L"\\\\?\\UNC";
4307       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4308     }
4309   } else {
4310     *prefix = L"\\\\?\\";
4311   }
4312 }
4313 
4314 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4315 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4316 // additional_space is the size of space, in wchar_t, the function will additionally add to
4317 // the allocation of return buffer (such that the size of the returned buffer is at least
4318 // wcslen(buf) + 1 + additional_space).
4319 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4320   if ((path == NULL) || (path[0] == '\0')) {
4321     err = ENOENT;
4322     return NULL;
4323   }
4324 
4325   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4326   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4327   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4328   strncpy(buf, path, buf_len);
4329   os::native_path(buf);
4330 
4331   LPWSTR prefix = NULL;
4332   int prefix_off = 0;
4333   bool needs_fullpath = true;
4334   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4335 
4336   LPWSTR unicode_path = NULL;
4337   err = convert_to_unicode(buf, &unicode_path);
4338   FREE_C_HEAP_ARRAY(char, buf);
4339   if (err != ERROR_SUCCESS) {
4340     return NULL;
4341   }
4342 
4343   LPWSTR converted_path = NULL;
4344   if (needs_fullpath) {
4345     err = get_full_path(unicode_path, &converted_path);
4346   } else {
4347     converted_path = unicode_path;
4348   }
4349 
4350   LPWSTR result = NULL;
4351   if (converted_path != NULL) {
4352     size_t prefix_len = wcslen(prefix);
4353     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4354     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4355     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4356 
4357     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4358     result_len = wcslen(result);
4359     if ((result[result_len - 1] == L'\\') &&
4360         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4361       result[result_len - 1] = L'\0';
4362     }
4363   }
4364 
4365   if (converted_path != unicode_path) {
4366     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4367   }
4368   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4369 
4370   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4371 }
4372 
4373 int os::stat(const char *path, struct stat *sbuf) {
4374   errno_t err;
4375   wchar_t* wide_path = wide_abs_unc_path(path, err);
4376 
4377   if (wide_path == NULL) {
4378     errno = err;
4379     return -1;
4380   }
4381 
4382   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4383   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4384   os::free(wide_path);
4385 
4386   if (!bret) {
4387     errno = ::GetLastError();
4388     return -1;
4389   }
4390 
4391   file_attribute_data_to_stat(sbuf, file_data);
4392   return 0;
4393 }
4394 
4395 static HANDLE create_read_only_file_handle(const char* file) {
4396   errno_t err;
4397   wchar_t* wide_path = wide_abs_unc_path(file, err);
4398 
4399   if (wide_path == NULL) {
4400     errno = err;
4401     return INVALID_HANDLE_VALUE;
4402   }
4403 
4404   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4405                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4406   os::free(wide_path);
4407 
4408   return handle;
4409 }
4410 
4411 bool os::same_files(const char* file1, const char* file2) {
4412 
4413   if (file1 == NULL && file2 == NULL) {
4414     return true;
4415   }
4416 
4417   if (file1 == NULL || file2 == NULL) {
4418     return false;
4419   }
4420 
4421   if (strcmp(file1, file2) == 0) {
4422     return true;
4423   }
4424 
4425   HANDLE handle1 = create_read_only_file_handle(file1);
4426   HANDLE handle2 = create_read_only_file_handle(file2);
4427   bool result = false;
4428 
4429   // if we could open both paths...
4430   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4431     BY_HANDLE_FILE_INFORMATION fileInfo1;
4432     BY_HANDLE_FILE_INFORMATION fileInfo2;
4433     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4434       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4435       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4436       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4437         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4438         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4439         result = true;
4440       }
4441     }
4442   }
4443 
4444   //free the handles
4445   if (handle1 != INVALID_HANDLE_VALUE) {
4446     ::CloseHandle(handle1);
4447   }
4448 
4449   if (handle2 != INVALID_HANDLE_VALUE) {
4450     ::CloseHandle(handle2);
4451   }
4452 
4453   return result;
4454 }
4455 
4456 #define FT2INT64(ft) \
4457   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4458 
4459 
4460 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4461 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4462 // of a thread.
4463 //
4464 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4465 // the fast estimate available on the platform.
4466 
4467 // current_thread_cpu_time() is not optimized for Windows yet
4468 jlong os::current_thread_cpu_time() {
4469   // return user + sys since the cost is the same
4470   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4471 }
4472 
4473 jlong os::thread_cpu_time(Thread* thread) {
4474   // consistent with what current_thread_cpu_time() returns.
4475   return os::thread_cpu_time(thread, true /* user+sys */);
4476 }
4477 
4478 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4479   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4480 }
4481 
4482 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4483   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4484   // If this function changes, os::is_thread_cpu_time_supported() should too
4485   FILETIME CreationTime;
4486   FILETIME ExitTime;
4487   FILETIME KernelTime;
4488   FILETIME UserTime;
4489 
4490   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4491                       &ExitTime, &KernelTime, &UserTime) == 0) {
4492     return -1;
4493   } else if (user_sys_cpu_time) {
4494     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4495   } else {
4496     return FT2INT64(UserTime) * 100;
4497   }
4498 }
4499 
4500 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4501   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4502   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4503   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4504   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4505 }
4506 
4507 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4508   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4509   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4510   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4511   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4512 }
4513 
4514 bool os::is_thread_cpu_time_supported() {
4515   // see os::thread_cpu_time
4516   FILETIME CreationTime;
4517   FILETIME ExitTime;
4518   FILETIME KernelTime;
4519   FILETIME UserTime;
4520 
4521   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4522                       &KernelTime, &UserTime) == 0) {
4523     return false;
4524   } else {
4525     return true;
4526   }
4527 }
4528 
4529 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4530 // It does have primitives (PDH API) to get CPU usage and run queue length.
4531 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4532 // If we wanted to implement loadavg on Windows, we have a few options:
4533 //
4534 // a) Query CPU usage and run queue length and "fake" an answer by
4535 //    returning the CPU usage if it's under 100%, and the run queue
4536 //    length otherwise.  It turns out that querying is pretty slow
4537 //    on Windows, on the order of 200 microseconds on a fast machine.
4538 //    Note that on the Windows the CPU usage value is the % usage
4539 //    since the last time the API was called (and the first call
4540 //    returns 100%), so we'd have to deal with that as well.
4541 //
4542 // b) Sample the "fake" answer using a sampling thread and store
4543 //    the answer in a global variable.  The call to loadavg would
4544 //    just return the value of the global, avoiding the slow query.
4545 //
4546 // c) Sample a better answer using exponential decay to smooth the
4547 //    value.  This is basically the algorithm used by UNIX kernels.
4548 //
4549 // Note that sampling thread starvation could affect both (b) and (c).
4550 int os::loadavg(double loadavg[], int nelem) {
4551   return -1;
4552 }
4553 
4554 
4555 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4556 bool os::dont_yield() {
4557   return DontYieldALot;
4558 }
4559 
4560 int os::open(const char *path, int oflag, int mode) {
4561   errno_t err;
4562   wchar_t* wide_path = wide_abs_unc_path(path, err);
4563 
4564   if (wide_path == NULL) {
4565     errno = err;
4566     return -1;
4567   }
4568   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4569   os::free(wide_path);
4570 
4571   if (fd == -1) {
4572     errno = ::GetLastError();
4573   }
4574 
4575   return fd;
4576 }
4577 
4578 FILE* os::open(int fd, const char* mode) {
4579   return ::_fdopen(fd, mode);
4580 }
4581 
4582 // Is a (classpath) directory empty?
4583 bool os::dir_is_empty(const char* path) {
4584   errno_t err;
4585   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4586 
4587   if (wide_path == NULL) {
4588     errno = err;
4589     return false;
4590   }
4591 
4592   // Make sure we end with "\\*"
4593   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4594     wcscat(wide_path, L"*");
4595   } else {
4596     wcscat(wide_path, L"\\*");
4597   }
4598 
4599   WIN32_FIND_DATAW fd;
4600   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4601   os::free(wide_path);
4602   bool is_empty = true;
4603 
4604   if (f != INVALID_HANDLE_VALUE) {
4605     while (is_empty && ::FindNextFileW(f, &fd)) {
4606       // An empty directory contains only the current directory file
4607       // and the previous directory file.
4608       if ((wcscmp(fd.cFileName, L".") != 0) &&
4609           (wcscmp(fd.cFileName, L"..") != 0)) {
4610         is_empty = false;
4611       }
4612     }
4613     FindClose(f);
4614   } else {
4615     errno = ::GetLastError();
4616   }
4617 
4618   return is_empty;
4619 }
4620 
4621 // create binary file, rewriting existing file if required
4622 int os::create_binary_file(const char* path, bool rewrite_existing) {
4623   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4624   if (!rewrite_existing) {
4625     oflags |= _O_EXCL;
4626   }
4627   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4628 }
4629 
4630 // return current position of file pointer
4631 jlong os::current_file_offset(int fd) {
4632   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4633 }
4634 
4635 // move file pointer to the specified offset
4636 jlong os::seek_to_file_offset(int fd, jlong offset) {
4637   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4638 }
4639 
4640 
4641 jlong os::lseek(int fd, jlong offset, int whence) {
4642   return (jlong) ::_lseeki64(fd, offset, whence);
4643 }
4644 
4645 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4646   OVERLAPPED ov;
4647   DWORD nread;
4648   BOOL result;
4649 
4650   ZeroMemory(&ov, sizeof(ov));
4651   ov.Offset = (DWORD)offset;
4652   ov.OffsetHigh = (DWORD)(offset >> 32);
4653 
4654   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4655 
4656   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4657 
4658   return result ? nread : 0;
4659 }
4660 
4661 
4662 // This method is a slightly reworked copy of JDK's sysNativePath
4663 // from src/windows/hpi/src/path_md.c
4664 
4665 // Convert a pathname to native format.  On win32, this involves forcing all
4666 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4667 // sometimes rejects '/') and removing redundant separators.  The input path is
4668 // assumed to have been converted into the character encoding used by the local
4669 // system.  Because this might be a double-byte encoding, care is taken to
4670 // treat double-byte lead characters correctly.
4671 //
4672 // This procedure modifies the given path in place, as the result is never
4673 // longer than the original.  There is no error return; this operation always
4674 // succeeds.
4675 char * os::native_path(char *path) {
4676   char *src = path, *dst = path, *end = path;
4677   char *colon = NULL;  // If a drive specifier is found, this will
4678                        // point to the colon following the drive letter
4679 
4680   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4681   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4682           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4683 
4684   // Check for leading separators
4685 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4686   while (isfilesep(*src)) {
4687     src++;
4688   }
4689 
4690   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4691     // Remove leading separators if followed by drive specifier.  This
4692     // hack is necessary to support file URLs containing drive
4693     // specifiers (e.g., "file://c:/path").  As a side effect,
4694     // "/c:/path" can be used as an alternative to "c:/path".
4695     *dst++ = *src++;
4696     colon = dst;
4697     *dst++ = ':';
4698     src++;
4699   } else {
4700     src = path;
4701     if (isfilesep(src[0]) && isfilesep(src[1])) {
4702       // UNC pathname: Retain first separator; leave src pointed at
4703       // second separator so that further separators will be collapsed
4704       // into the second separator.  The result will be a pathname
4705       // beginning with "\\\\" followed (most likely) by a host name.
4706       src = dst = path + 1;
4707       path[0] = '\\';     // Force first separator to '\\'
4708     }
4709   }
4710 
4711   end = dst;
4712 
4713   // Remove redundant separators from remainder of path, forcing all
4714   // separators to be '\\' rather than '/'. Also, single byte space
4715   // characters are removed from the end of the path because those
4716   // are not legal ending characters on this operating system.
4717   //
4718   while (*src != '\0') {
4719     if (isfilesep(*src)) {
4720       *dst++ = '\\'; src++;
4721       while (isfilesep(*src)) src++;
4722       if (*src == '\0') {
4723         // Check for trailing separator
4724         end = dst;
4725         if (colon == dst - 2) break;  // "z:\\"
4726         if (dst == path + 1) break;   // "\\"
4727         if (dst == path + 2 && isfilesep(path[0])) {
4728           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4729           // beginning of a UNC pathname.  Even though it is not, by
4730           // itself, a valid UNC pathname, we leave it as is in order
4731           // to be consistent with the path canonicalizer as well
4732           // as the win32 APIs, which treat this case as an invalid
4733           // UNC pathname rather than as an alias for the root
4734           // directory of the current drive.
4735           break;
4736         }
4737         end = --dst;  // Path does not denote a root directory, so
4738                       // remove trailing separator
4739         break;
4740       }
4741       end = dst;
4742     } else {
4743       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4744         *dst++ = *src++;
4745         if (*src) *dst++ = *src++;
4746         end = dst;
4747       } else {  // Copy a single-byte character
4748         char c = *src++;
4749         *dst++ = c;
4750         // Space is not a legal ending character
4751         if (c != ' ') end = dst;
4752       }
4753     }
4754   }
4755 
4756   *end = '\0';
4757 
4758   // For "z:", add "." to work around a bug in the C runtime library
4759   if (colon == dst - 1) {
4760     path[2] = '.';
4761     path[3] = '\0';
4762   }
4763 
4764   return path;
4765 }
4766 
4767 // This code is a copy of JDK's sysSetLength
4768 // from src/windows/hpi/src/sys_api_md.c
4769 
4770 int os::ftruncate(int fd, jlong length) {
4771   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4772   long high = (long)(length >> 32);
4773   DWORD ret;
4774 
4775   if (h == (HANDLE)(-1)) {
4776     return -1;
4777   }
4778 
4779   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4780   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4781     return -1;
4782   }
4783 
4784   if (::SetEndOfFile(h) == FALSE) {
4785     return -1;
4786   }
4787 
4788   return 0;
4789 }
4790 
4791 int os::get_fileno(FILE* fp) {
4792   return _fileno(fp);
4793 }
4794 
4795 // This code is a copy of JDK's sysSync
4796 // from src/windows/hpi/src/sys_api_md.c
4797 // except for the legacy workaround for a bug in Win 98
4798 
4799 int os::fsync(int fd) {
4800   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4801 
4802   if ((!::FlushFileBuffers(handle)) &&
4803       (GetLastError() != ERROR_ACCESS_DENIED)) {
4804     // from winerror.h
4805     return -1;
4806   }
4807   return 0;
4808 }
4809 
4810 static int nonSeekAvailable(int, long *);
4811 static int stdinAvailable(int, long *);
4812 
4813 // This code is a copy of JDK's sysAvailable
4814 // from src/windows/hpi/src/sys_api_md.c
4815 
4816 int os::available(int fd, jlong *bytes) {
4817   jlong cur, end;
4818   struct _stati64 stbuf64;
4819 
4820   if (::_fstati64(fd, &stbuf64) >= 0) {
4821     int mode = stbuf64.st_mode;
4822     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4823       int ret;
4824       long lpbytes;
4825       if (fd == 0) {
4826         ret = stdinAvailable(fd, &lpbytes);
4827       } else {
4828         ret = nonSeekAvailable(fd, &lpbytes);
4829       }
4830       (*bytes) = (jlong)(lpbytes);
4831       return ret;
4832     }
4833     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4834       return FALSE;
4835     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4836       return FALSE;
4837     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4838       return FALSE;
4839     }
4840     *bytes = end - cur;
4841     return TRUE;
4842   } else {
4843     return FALSE;
4844   }
4845 }
4846 
4847 void os::flockfile(FILE* fp) {
4848   _lock_file(fp);
4849 }
4850 
4851 void os::funlockfile(FILE* fp) {
4852   _unlock_file(fp);
4853 }
4854 
4855 // This code is a copy of JDK's nonSeekAvailable
4856 // from src/windows/hpi/src/sys_api_md.c
4857 
4858 static int nonSeekAvailable(int fd, long *pbytes) {
4859   // This is used for available on non-seekable devices
4860   // (like both named and anonymous pipes, such as pipes
4861   //  connected to an exec'd process).
4862   // Standard Input is a special case.
4863   HANDLE han;
4864 
4865   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4866     return FALSE;
4867   }
4868 
4869   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4870     // PeekNamedPipe fails when at EOF.  In that case we
4871     // simply make *pbytes = 0 which is consistent with the
4872     // behavior we get on Solaris when an fd is at EOF.
4873     // The only alternative is to raise an Exception,
4874     // which isn't really warranted.
4875     //
4876     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4877       return FALSE;
4878     }
4879     *pbytes = 0;
4880   }
4881   return TRUE;
4882 }
4883 
4884 #define MAX_INPUT_EVENTS 2000
4885 
4886 // This code is a copy of JDK's stdinAvailable
4887 // from src/windows/hpi/src/sys_api_md.c
4888 
4889 static int stdinAvailable(int fd, long *pbytes) {
4890   HANDLE han;
4891   DWORD numEventsRead = 0;  // Number of events read from buffer
4892   DWORD numEvents = 0;      // Number of events in buffer
4893   DWORD i = 0;              // Loop index
4894   DWORD curLength = 0;      // Position marker
4895   DWORD actualLength = 0;   // Number of bytes readable
4896   BOOL error = FALSE;       // Error holder
4897   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4898 
4899   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4900     return FALSE;
4901   }
4902 
4903   // Construct an array of input records in the console buffer
4904   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4905   if (error == 0) {
4906     return nonSeekAvailable(fd, pbytes);
4907   }
4908 
4909   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4910   if (numEvents > MAX_INPUT_EVENTS) {
4911     numEvents = MAX_INPUT_EVENTS;
4912   }
4913 
4914   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4915   if (lpBuffer == NULL) {
4916     return FALSE;
4917   }
4918 
4919   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4920   if (error == 0) {
4921     os::free(lpBuffer);
4922     return FALSE;
4923   }
4924 
4925   // Examine input records for the number of bytes available
4926   for (i=0; i<numEvents; i++) {
4927     if (lpBuffer[i].EventType == KEY_EVENT) {
4928 
4929       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4930                                       &(lpBuffer[i].Event);
4931       if (keyRecord->bKeyDown == TRUE) {
4932         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4933         curLength++;
4934         if (*keyPressed == '\r') {
4935           actualLength = curLength;
4936         }
4937       }
4938     }
4939   }
4940 
4941   if (lpBuffer != NULL) {
4942     os::free(lpBuffer);
4943   }
4944 
4945   *pbytes = (long) actualLength;
4946   return TRUE;
4947 }
4948 
4949 // Map a block of memory.
4950 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4951                         char *addr, size_t bytes, bool read_only,
4952                         bool allow_exec) {
4953   HANDLE hFile;
4954   char* base;
4955 
4956   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4957                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4958   if (hFile == INVALID_HANDLE_VALUE) {
4959     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4960     return NULL;
4961   }
4962 
4963   if (allow_exec) {
4964     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4965     // unless it comes from a PE image (which the shared archive is not.)
4966     // Even VirtualProtect refuses to give execute access to mapped memory
4967     // that was not previously executable.
4968     //
4969     // Instead, stick the executable region in anonymous memory.  Yuck.
4970     // Penalty is that ~4 pages will not be shareable - in the future
4971     // we might consider DLLizing the shared archive with a proper PE
4972     // header so that mapping executable + sharing is possible.
4973 
4974     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4975                                 PAGE_READWRITE);
4976     if (base == NULL) {
4977       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4978       CloseHandle(hFile);
4979       return NULL;
4980     }
4981 
4982     // Record virtual memory allocation
4983     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4984 
4985     DWORD bytes_read;
4986     OVERLAPPED overlapped;
4987     overlapped.Offset = (DWORD)file_offset;
4988     overlapped.OffsetHigh = 0;
4989     overlapped.hEvent = NULL;
4990     // ReadFile guarantees that if the return value is true, the requested
4991     // number of bytes were read before returning.
4992     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4993     if (!res) {
4994       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4995       release_memory(base, bytes);
4996       CloseHandle(hFile);
4997       return NULL;
4998     }
4999   } else {
5000     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5001                                     NULL /* file_name */);
5002     if (hMap == NULL) {
5003       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5004       CloseHandle(hFile);
5005       return NULL;
5006     }
5007 
5008     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5009     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5010                                   (DWORD)bytes, addr);
5011     if (base == NULL) {
5012       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5013       CloseHandle(hMap);
5014       CloseHandle(hFile);
5015       return NULL;
5016     }
5017 
5018     if (CloseHandle(hMap) == 0) {
5019       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5020       CloseHandle(hFile);
5021       return base;
5022     }
5023   }
5024 
5025   if (allow_exec) {
5026     DWORD old_protect;
5027     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5028     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5029 
5030     if (!res) {
5031       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5032       // Don't consider this a hard error, on IA32 even if the
5033       // VirtualProtect fails, we should still be able to execute
5034       CloseHandle(hFile);
5035       return base;
5036     }
5037   }
5038 
5039   if (CloseHandle(hFile) == 0) {
5040     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5041     return base;
5042   }
5043 
5044   return base;
5045 }
5046 
5047 
5048 // Remap a block of memory.
5049 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5050                           char *addr, size_t bytes, bool read_only,
5051                           bool allow_exec) {
5052   // This OS does not allow existing memory maps to be remapped so we
5053   // would have to unmap the memory before we remap it.
5054 
5055   // Because there is a small window between unmapping memory and mapping
5056   // it in again with different protections, CDS archives are mapped RW
5057   // on windows, so this function isn't called.
5058   ShouldNotReachHere();
5059   return NULL;
5060 }
5061 
5062 
5063 // Unmap a block of memory.
5064 // Returns true=success, otherwise false.
5065 
5066 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5067   MEMORY_BASIC_INFORMATION mem_info;
5068   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5069     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5070     return false;
5071   }
5072 
5073   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5074   // Instead, executable region was allocated using VirtualAlloc(). See
5075   // pd_map_memory() above.
5076   //
5077   // The following flags should match the 'exec_access' flages used for
5078   // VirtualProtect() in pd_map_memory().
5079   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5080       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5081     return pd_release_memory(addr, bytes);
5082   }
5083 
5084   BOOL result = UnmapViewOfFile(addr);
5085   if (result == 0) {
5086     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5087     return false;
5088   }
5089   return true;
5090 }
5091 
5092 void os::pause() {
5093   char filename[MAX_PATH];
5094   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5095     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5096   } else {
5097     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5098   }
5099 
5100   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5101   if (fd != -1) {
5102     struct stat buf;
5103     ::close(fd);
5104     while (::stat(filename, &buf) == 0) {
5105       Sleep(100);
5106     }
5107   } else {
5108     jio_fprintf(stderr,
5109                 "Could not open pause file '%s', continuing immediately.\n", filename);
5110   }
5111 }
5112 
5113 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5114 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5115 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5116 
5117 os::ThreadCrashProtection::ThreadCrashProtection() {
5118 }
5119 
5120 // See the caveats for this class in os_windows.hpp
5121 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5122 // into this method and returns false. If no OS EXCEPTION was raised, returns
5123 // true.
5124 // The callback is supposed to provide the method that should be protected.
5125 //
5126 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5127 
5128   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5129 
5130   _protected_thread = Thread::current_or_null();
5131   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5132 
5133   bool success = true;
5134   __try {
5135     _crash_protection = this;
5136     cb.call();
5137   } __except(EXCEPTION_EXECUTE_HANDLER) {
5138     // only for protection, nothing to do
5139     success = false;
5140   }
5141   _crash_protection = NULL;
5142   _protected_thread = NULL;
5143   Thread::muxRelease(&_crash_mux);
5144   return success;
5145 }
5146 
5147 
5148 class HighResolutionInterval : public CHeapObj<mtThread> {
5149   // The default timer resolution seems to be 10 milliseconds.
5150   // (Where is this written down?)
5151   // If someone wants to sleep for only a fraction of the default,
5152   // then we set the timer resolution down to 1 millisecond for
5153   // the duration of their interval.
5154   // We carefully set the resolution back, since otherwise we
5155   // seem to incur an overhead (3%?) that we don't need.
5156   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5157   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5158   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5159   // timeBeginPeriod() if the relative error exceeded some threshold.
5160   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5161   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5162   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5163   // resolution timers running.
5164  private:
5165   jlong resolution;
5166  public:
5167   HighResolutionInterval(jlong ms) {
5168     resolution = ms % 10L;
5169     if (resolution != 0) {
5170       MMRESULT result = timeBeginPeriod(1L);
5171     }
5172   }
5173   ~HighResolutionInterval() {
5174     if (resolution != 0) {
5175       MMRESULT result = timeEndPeriod(1L);
5176     }
5177     resolution = 0L;
5178   }
5179 };
5180 
5181 // An Event wraps a win32 "CreateEvent" kernel handle.
5182 //
5183 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5184 //
5185 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5186 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5187 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5188 //     In addition, an unpark() operation might fetch the handle field, but the
5189 //     event could recycle between the fetch and the SetEvent() operation.
5190 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5191 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5192 //     on an stale but recycled handle would be harmless, but in practice this might
5193 //     confuse other non-Sun code, so it's not a viable approach.
5194 //
5195 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5196 //     with the Event.  The event handle is never closed.  This could be construed
5197 //     as handle leakage, but only up to the maximum # of threads that have been extant
5198 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5199 //     permit a process to have hundreds of thousands of open handles.
5200 //
5201 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5202 //     and release unused handles.
5203 //
5204 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5205 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5206 //
5207 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5208 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5209 //
5210 // We use (2).
5211 //
5212 // TODO-FIXME:
5213 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5214 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5215 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5216 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5217 //     into a single win32 CreateEvent() handle.
5218 //
5219 // Assumption:
5220 //    Only one parker can exist on an event, which is why we allocate
5221 //    them per-thread. Multiple unparkers can coexist.
5222 //
5223 // _Event transitions in park()
5224 //   -1 => -1 : illegal
5225 //    1 =>  0 : pass - return immediately
5226 //    0 => -1 : block; then set _Event to 0 before returning
5227 //
5228 // _Event transitions in unpark()
5229 //    0 => 1 : just return
5230 //    1 => 1 : just return
5231 //   -1 => either 0 or 1; must signal target thread
5232 //         That is, we can safely transition _Event from -1 to either
5233 //         0 or 1.
5234 //
5235 // _Event serves as a restricted-range semaphore.
5236 //   -1 : thread is blocked, i.e. there is a waiter
5237 //    0 : neutral: thread is running or ready,
5238 //        could have been signaled after a wait started
5239 //    1 : signaled - thread is running or ready
5240 //
5241 // Another possible encoding of _Event would be with
5242 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5243 //
5244 
5245 int os::PlatformEvent::park(jlong Millis) {
5246   // Transitions for _Event:
5247   //   -1 => -1 : illegal
5248   //    1 =>  0 : pass - return immediately
5249   //    0 => -1 : block; then set _Event to 0 before returning
5250 
5251   guarantee(_ParkHandle != NULL , "Invariant");
5252   guarantee(Millis > 0          , "Invariant");
5253 
5254   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5255   // the initial park() operation.
5256   // Consider: use atomic decrement instead of CAS-loop
5257 
5258   int v;
5259   for (;;) {
5260     v = _Event;
5261     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5262   }
5263   guarantee((v == 0) || (v == 1), "invariant");
5264   if (v != 0) return OS_OK;
5265 
5266   // Do this the hard way by blocking ...
5267   // TODO: consider a brief spin here, gated on the success of recent
5268   // spin attempts by this thread.
5269   //
5270   // We decompose long timeouts into series of shorter timed waits.
5271   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5272   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5273   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5274   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5275   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5276   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5277   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5278   // for the already waited time.  This policy does not admit any new outcomes.
5279   // In the future, however, we might want to track the accumulated wait time and
5280   // adjust Millis accordingly if we encounter a spurious wakeup.
5281 
5282   const int MAXTIMEOUT = 0x10000000;
5283   DWORD rv = WAIT_TIMEOUT;
5284   while (_Event < 0 && Millis > 0) {
5285     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5286     if (Millis > MAXTIMEOUT) {
5287       prd = MAXTIMEOUT;
5288     }
5289     HighResolutionInterval *phri = NULL;
5290     if (!ForceTimeHighResolution) {
5291       phri = new HighResolutionInterval(prd);
5292     }
5293     rv = ::WaitForSingleObject(_ParkHandle, prd);
5294     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5295     if (rv == WAIT_TIMEOUT) {
5296       Millis -= prd;
5297     }
5298     delete phri; // if it is NULL, harmless
5299   }
5300   v = _Event;
5301   _Event = 0;
5302   // see comment at end of os::PlatformEvent::park() below:
5303   OrderAccess::fence();
5304   // If we encounter a nearly simultanous timeout expiry and unpark()
5305   // we return OS_OK indicating we awoke via unpark().
5306   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5307   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5308 }
5309 
5310 void os::PlatformEvent::park() {
5311   // Transitions for _Event:
5312   //   -1 => -1 : illegal
5313   //    1 =>  0 : pass - return immediately
5314   //    0 => -1 : block; then set _Event to 0 before returning
5315 
5316   guarantee(_ParkHandle != NULL, "Invariant");
5317   // Invariant: Only the thread associated with the Event/PlatformEvent
5318   // may call park().
5319   // Consider: use atomic decrement instead of CAS-loop
5320   int v;
5321   for (;;) {
5322     v = _Event;
5323     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5324   }
5325   guarantee((v == 0) || (v == 1), "invariant");
5326   if (v != 0) return;
5327 
5328   // Do this the hard way by blocking ...
5329   // TODO: consider a brief spin here, gated on the success of recent
5330   // spin attempts by this thread.
5331   while (_Event < 0) {
5332     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5333     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5334   }
5335 
5336   // Usually we'll find _Event == 0 at this point, but as
5337   // an optional optimization we clear it, just in case can
5338   // multiple unpark() operations drove _Event up to 1.
5339   _Event = 0;
5340   OrderAccess::fence();
5341   guarantee(_Event >= 0, "invariant");
5342 }
5343 
5344 void os::PlatformEvent::unpark() {
5345   guarantee(_ParkHandle != NULL, "Invariant");
5346 
5347   // Transitions for _Event:
5348   //    0 => 1 : just return
5349   //    1 => 1 : just return
5350   //   -1 => either 0 or 1; must signal target thread
5351   //         That is, we can safely transition _Event from -1 to either
5352   //         0 or 1.
5353   // See also: "Semaphores in Plan 9" by Mullender & Cox
5354   //
5355   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5356   // that it will take two back-to-back park() calls for the owning
5357   // thread to block. This has the benefit of forcing a spurious return
5358   // from the first park() call after an unpark() call which will help
5359   // shake out uses of park() and unpark() without condition variables.
5360 
5361   if (Atomic::xchg(&_Event, 1) >= 0) return;
5362 
5363   ::SetEvent(_ParkHandle);
5364 }
5365 
5366 
5367 // JSR166
5368 // -------------------------------------------------------
5369 
5370 // The Windows implementation of Park is very straightforward: Basic
5371 // operations on Win32 Events turn out to have the right semantics to
5372 // use them directly. We opportunistically resuse the event inherited
5373 // from Monitor.
5374 
5375 void Parker::park(bool isAbsolute, jlong time) {
5376   guarantee(_ParkEvent != NULL, "invariant");
5377   // First, demultiplex/decode time arguments
5378   if (time < 0) { // don't wait
5379     return;
5380   } else if (time == 0 && !isAbsolute) {
5381     time = INFINITE;
5382   } else if (isAbsolute) {
5383     time -= os::javaTimeMillis(); // convert to relative time
5384     if (time <= 0) {  // already elapsed
5385       return;
5386     }
5387   } else { // relative
5388     time /= 1000000;  // Must coarsen from nanos to millis
5389     if (time == 0) {  // Wait for the minimal time unit if zero
5390       time = 1;
5391     }
5392   }
5393 
5394   JavaThread* thread = JavaThread::current();
5395 
5396   // Don't wait if interrupted or already triggered
5397   if (thread->is_interrupted(false) ||
5398       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5399     ResetEvent(_ParkEvent);
5400     return;
5401   } else {
5402     ThreadBlockInVM tbivm(thread);
5403     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5404     thread->set_suspend_equivalent();
5405 
5406     WaitForSingleObject(_ParkEvent, time);
5407     ResetEvent(_ParkEvent);
5408 
5409     // If externally suspended while waiting, re-suspend
5410     if (thread->handle_special_suspend_equivalent_condition()) {
5411       thread->java_suspend_self();
5412     }
5413   }
5414 }
5415 
5416 void Parker::unpark() {
5417   guarantee(_ParkEvent != NULL, "invariant");
5418   SetEvent(_ParkEvent);
5419 }
5420 
5421 // Platform Monitor implementation
5422 
5423 // Must already be locked
5424 int os::PlatformMonitor::wait(jlong millis) {
5425   assert(millis >= 0, "negative timeout");
5426   int ret = OS_TIMEOUT;
5427   int status = SleepConditionVariableCS(&_cond, &_mutex,
5428                                         millis == 0 ? INFINITE : millis);
5429   if (status != 0) {
5430     ret = OS_OK;
5431   }
5432   #ifndef PRODUCT
5433   else {
5434     DWORD err = GetLastError();
5435     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5436   }
5437   #endif
5438   return ret;
5439 }
5440 
5441 // Run the specified command in a separate process. Return its exit value,
5442 // or -1 on failure (e.g. can't create a new process).
5443 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5444   STARTUPINFO si;
5445   PROCESS_INFORMATION pi;
5446   DWORD exit_code;
5447 
5448   char * cmd_string;
5449   const char * cmd_prefix = "cmd /C ";
5450   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5451   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5452   if (cmd_string == NULL) {
5453     return -1;
5454   }
5455   cmd_string[0] = '\0';
5456   strcat(cmd_string, cmd_prefix);
5457   strcat(cmd_string, cmd);
5458 
5459   // now replace all '\n' with '&'
5460   char * substring = cmd_string;
5461   while ((substring = strchr(substring, '\n')) != NULL) {
5462     substring[0] = '&';
5463     substring++;
5464   }
5465   memset(&si, 0, sizeof(si));
5466   si.cb = sizeof(si);
5467   memset(&pi, 0, sizeof(pi));
5468   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5469                             cmd_string,    // command line
5470                             NULL,   // process security attribute
5471                             NULL,   // thread security attribute
5472                             TRUE,   // inherits system handles
5473                             0,      // no creation flags
5474                             NULL,   // use parent's environment block
5475                             NULL,   // use parent's starting directory
5476                             &si,    // (in) startup information
5477                             &pi);   // (out) process information
5478 
5479   if (rslt) {
5480     // Wait until child process exits.
5481     WaitForSingleObject(pi.hProcess, INFINITE);
5482 
5483     GetExitCodeProcess(pi.hProcess, &exit_code);
5484 
5485     // Close process and thread handles.
5486     CloseHandle(pi.hProcess);
5487     CloseHandle(pi.hThread);
5488   } else {
5489     exit_code = -1;
5490   }
5491 
5492   FREE_C_HEAP_ARRAY(char, cmd_string);
5493   return (int)exit_code;
5494 }
5495 
5496 bool os::find(address addr, outputStream* st) {
5497   int offset = -1;
5498   bool result = false;
5499   char buf[256];
5500   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5501     st->print(PTR_FORMAT " ", addr);
5502     if (strlen(buf) < sizeof(buf) - 1) {
5503       char* p = strrchr(buf, '\\');
5504       if (p) {
5505         st->print("%s", p + 1);
5506       } else {
5507         st->print("%s", buf);
5508       }
5509     } else {
5510         // The library name is probably truncated. Let's omit the library name.
5511         // See also JDK-8147512.
5512     }
5513     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5514       st->print("::%s + 0x%x", buf, offset);
5515     }
5516     st->cr();
5517     result = true;
5518   }
5519   return result;
5520 }
5521 
5522 static jint initSock() {
5523   WSADATA wsadata;
5524 
5525   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5526     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5527                 ::GetLastError());
5528     return JNI_ERR;
5529   }
5530   return JNI_OK;
5531 }
5532 
5533 struct hostent* os::get_host_by_name(char* name) {
5534   return (struct hostent*)gethostbyname(name);
5535 }
5536 
5537 int os::socket_close(int fd) {
5538   return ::closesocket(fd);
5539 }
5540 
5541 int os::socket(int domain, int type, int protocol) {
5542   return ::socket(domain, type, protocol);
5543 }
5544 
5545 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5546   return ::connect(fd, him, len);
5547 }
5548 
5549 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5550   return ::recv(fd, buf, (int)nBytes, flags);
5551 }
5552 
5553 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5554   return ::send(fd, buf, (int)nBytes, flags);
5555 }
5556 
5557 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5558   return ::send(fd, buf, (int)nBytes, flags);
5559 }
5560 
5561 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5562 #if defined(IA32)
5563   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5564 #elif defined (AMD64)
5565   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5566 #endif
5567 
5568 // returns true if thread could be suspended,
5569 // false otherwise
5570 static bool do_suspend(HANDLE* h) {
5571   if (h != NULL) {
5572     if (SuspendThread(*h) != ~0) {
5573       return true;
5574     }
5575   }
5576   return false;
5577 }
5578 
5579 // resume the thread
5580 // calling resume on an active thread is a no-op
5581 static void do_resume(HANDLE* h) {
5582   if (h != NULL) {
5583     ResumeThread(*h);
5584   }
5585 }
5586 
5587 // retrieve a suspend/resume context capable handle
5588 // from the tid. Caller validates handle return value.
5589 void get_thread_handle_for_extended_context(HANDLE* h,
5590                                             OSThread::thread_id_t tid) {
5591   if (h != NULL) {
5592     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5593   }
5594 }
5595 
5596 // Thread sampling implementation
5597 //
5598 void os::SuspendedThreadTask::internal_do_task() {
5599   CONTEXT    ctxt;
5600   HANDLE     h = NULL;
5601 
5602   // get context capable handle for thread
5603   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5604 
5605   // sanity
5606   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5607     return;
5608   }
5609 
5610   // suspend the thread
5611   if (do_suspend(&h)) {
5612     ctxt.ContextFlags = sampling_context_flags;
5613     // get thread context
5614     GetThreadContext(h, &ctxt);
5615     SuspendedThreadTaskContext context(_thread, &ctxt);
5616     // pass context to Thread Sampling impl
5617     do_task(context);
5618     // resume thread
5619     do_resume(&h);
5620   }
5621 
5622   // close handle
5623   CloseHandle(h);
5624 }
5625 
5626 bool os::start_debugging(char *buf, int buflen) {
5627   int len = (int)strlen(buf);
5628   char *p = &buf[len];
5629 
5630   jio_snprintf(p, buflen-len,
5631              "\n\n"
5632              "Do you want to debug the problem?\n\n"
5633              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5634              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5635              "Otherwise, select 'No' to abort...",
5636              os::current_process_id(), os::current_thread_id());
5637 
5638   bool yes = os::message_box("Unexpected Error", buf);
5639 
5640   if (yes) {
5641     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5642     // exception. If VM is running inside a debugger, the debugger will
5643     // catch the exception. Otherwise, the breakpoint exception will reach
5644     // the default windows exception handler, which can spawn a debugger and
5645     // automatically attach to the dying VM.
5646     os::breakpoint();
5647     yes = false;
5648   }
5649   return yes;
5650 }
5651 
5652 void* os::get_default_process_handle() {
5653   return (void*)GetModuleHandle(NULL);
5654 }
5655 
5656 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5657 // which is used to find statically linked in agents.
5658 // Additionally for windows, takes into account __stdcall names.
5659 // Parameters:
5660 //            sym_name: Symbol in library we are looking for
5661 //            lib_name: Name of library to look in, NULL for shared libs.
5662 //            is_absolute_path == true if lib_name is absolute path to agent
5663 //                                     such as "C:/a/b/L.dll"
5664 //            == false if only the base name of the library is passed in
5665 //               such as "L"
5666 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5667                                     bool is_absolute_path) {
5668   char *agent_entry_name;
5669   size_t len;
5670   size_t name_len;
5671   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5672   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5673   const char *start;
5674 
5675   if (lib_name != NULL) {
5676     len = name_len = strlen(lib_name);
5677     if (is_absolute_path) {
5678       // Need to strip path, prefix and suffix
5679       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5680         lib_name = ++start;
5681       } else {
5682         // Need to check for drive prefix
5683         if ((start = strchr(lib_name, ':')) != NULL) {
5684           lib_name = ++start;
5685         }
5686       }
5687       if (len <= (prefix_len + suffix_len)) {
5688         return NULL;
5689       }
5690       lib_name += prefix_len;
5691       name_len = strlen(lib_name) - suffix_len;
5692     }
5693   }
5694   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5695   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5696   if (agent_entry_name == NULL) {
5697     return NULL;
5698   }
5699   if (lib_name != NULL) {
5700     const char *p = strrchr(sym_name, '@');
5701     if (p != NULL && p != sym_name) {
5702       // sym_name == _Agent_OnLoad@XX
5703       strncpy(agent_entry_name, sym_name, (p - sym_name));
5704       agent_entry_name[(p-sym_name)] = '\0';
5705       // agent_entry_name == _Agent_OnLoad
5706       strcat(agent_entry_name, "_");
5707       strncat(agent_entry_name, lib_name, name_len);
5708       strcat(agent_entry_name, p);
5709       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5710     } else {
5711       strcpy(agent_entry_name, sym_name);
5712       strcat(agent_entry_name, "_");
5713       strncat(agent_entry_name, lib_name, name_len);
5714     }
5715   } else {
5716     strcpy(agent_entry_name, sym_name);
5717   }
5718   return agent_entry_name;
5719 }
5720 
5721 #ifndef PRODUCT
5722 
5723 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5724 // contiguous memory block at a particular address.
5725 // The test first tries to find a good approximate address to allocate at by using the same
5726 // method to allocate some memory at any address. The test then tries to allocate memory in
5727 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5728 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5729 // the previously allocated memory is available for allocation. The only actual failure
5730 // that is reported is when the test tries to allocate at a particular location but gets a
5731 // different valid one. A NULL return value at this point is not considered an error but may
5732 // be legitimate.
5733 void TestReserveMemorySpecial_test() {
5734   if (!UseLargePages) {
5735     return;
5736   }
5737   // save current value of globals
5738   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5739   bool old_use_numa_interleaving = UseNUMAInterleaving;
5740 
5741   // set globals to make sure we hit the correct code path
5742   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5743 
5744   // do an allocation at an address selected by the OS to get a good one.
5745   const size_t large_allocation_size = os::large_page_size() * 4;
5746   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5747   if (result == NULL) {
5748   } else {
5749     os::release_memory_special(result, large_allocation_size);
5750 
5751     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5752     // we managed to get it once.
5753     const size_t expected_allocation_size = os::large_page_size();
5754     char* expected_location = result + os::large_page_size();
5755     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5756     if (actual_location == NULL) {
5757     } else {
5758       // release memory
5759       os::release_memory_special(actual_location, expected_allocation_size);
5760       // only now check, after releasing any memory to avoid any leaks.
5761       assert(actual_location == expected_location,
5762              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5763              expected_location, expected_allocation_size, actual_location);
5764     }
5765   }
5766 
5767   // restore globals
5768   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5769   UseNUMAInterleaving = old_use_numa_interleaving;
5770 }
5771 #endif // PRODUCT
5772 
5773 /*
5774   All the defined signal names for Windows.
5775 
5776   NOTE that not all of these names are accepted by FindSignal!
5777 
5778   For various reasons some of these may be rejected at runtime.
5779 
5780   Here are the names currently accepted by a user of sun.misc.Signal with
5781   1.4.1 (ignoring potential interaction with use of chaining, etc):
5782 
5783      (LIST TBD)
5784 
5785 */
5786 int os::get_signal_number(const char* name) {
5787   static const struct {
5788     const char* name;
5789     int         number;
5790   } siglabels [] =
5791     // derived from version 6.0 VC98/include/signal.h
5792   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5793   "FPE",        SIGFPE,         // floating point exception
5794   "SEGV",       SIGSEGV,        // segment violation
5795   "INT",        SIGINT,         // interrupt
5796   "TERM",       SIGTERM,        // software term signal from kill
5797   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5798   "ILL",        SIGILL};        // illegal instruction
5799   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5800     if (strcmp(name, siglabels[i].name) == 0) {
5801       return siglabels[i].number;
5802     }
5803   }
5804   return -1;
5805 }
5806 
5807 // Fast current thread access
5808 
5809 int os::win32::_thread_ptr_offset = 0;
5810 
5811 static void call_wrapper_dummy() {}
5812 
5813 // We need to call the os_exception_wrapper once so that it sets
5814 // up the offset from FS of the thread pointer.
5815 void os::win32::initialize_thread_ptr_offset() {
5816   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5817                            NULL, methodHandle(), NULL, NULL);
5818 }
5819 
5820 bool os::supports_map_sync() {
5821   return false;
5822 }
--- EOF ---