1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/codeCache.hpp"
  34 #include "code/icBuffer.hpp"
  35 #include "code/vtableStubs.hpp"
  36 #include "compiler/compileBroker.hpp"
  37 #include "compiler/disassembler.hpp"
  38 #include "interpreter/interpreter.hpp"
  39 #include "logging/log.hpp"
  40 #include "logging/logStream.hpp"
  41 #include "memory/allocation.inline.hpp"
  42 #include "memory/filemap.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "os_share_windows.hpp"
  45 #include "os_windows.inline.hpp"
  46 #include "prims/jniFastGetField.hpp"
  47 #include "prims/jvm_misc.hpp"
  48 #include "runtime/arguments.hpp"
  49 #include "runtime/atomic.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safepointMechanism.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/statSampler.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadCritical.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_version.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/decoder.hpp"
  72 #include "utilities/defaultStream.hpp"
  73 #include "utilities/events.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 #ifdef _DEBUG
  80 #include <crtdbg.h>
  81 #endif
  82 
  83 #include <windows.h>
  84 #include <sys/types.h>
  85 #include <sys/stat.h>
  86 #include <sys/timeb.h>
  87 #include <objidl.h>
  88 #include <shlobj.h>
  89 
  90 #include <malloc.h>
  91 #include <signal.h>
  92 #include <direct.h>
  93 #include <errno.h>
  94 #include <fcntl.h>
  95 #include <io.h>
  96 #include <process.h>              // For _beginthreadex(), _endthreadex()
  97 #include <imagehlp.h>             // For os::dll_address_to_function_name
  98 // for enumerating dll libraries
  99 #include <vdmdbg.h>
 100 #include <psapi.h>
 101 #include <mmsystem.h>
 102 #include <winsock2.h>
 103 
 104 // for timer info max values which include all bits
 105 #define ALL_64_BITS CONST64(-1)
 106 
 107 // For DLL loading/load error detection
 108 // Values of PE COFF
 109 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 110 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 111 
 112 static HANDLE main_process;
 113 static HANDLE main_thread;
 114 static int    main_thread_id;
 115 
 116 static FILETIME process_creation_time;
 117 static FILETIME process_exit_time;
 118 static FILETIME process_user_time;
 119 static FILETIME process_kernel_time;
 120 
 121 #ifdef _M_AMD64
 122   #define __CPU__ amd64
 123 #else
 124   #define __CPU__ i486
 125 #endif
 126 
 127 #if INCLUDE_AOT
 128 PVOID  topLevelVectoredExceptionHandler = NULL;
 129 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 130 #endif
 131 
 132 // save DLL module handle, used by GetModuleFileName
 133 
 134 HINSTANCE vm_lib_handle;
 135 
 136 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 137   switch (reason) {
 138   case DLL_PROCESS_ATTACH:
 139     vm_lib_handle = hinst;
 140     if (ForceTimeHighResolution) {
 141       timeBeginPeriod(1L);
 142     }
 143     WindowsDbgHelp::pre_initialize();
 144     SymbolEngine::pre_initialize();
 145     break;
 146   case DLL_PROCESS_DETACH:
 147     if (ForceTimeHighResolution) {
 148       timeEndPeriod(1L);
 149     }
 150 #if INCLUDE_AOT
 151     if (topLevelVectoredExceptionHandler != NULL) {
 152       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 153       topLevelVectoredExceptionHandler = NULL;
 154     }
 155 #endif
 156     break;
 157   default:
 158     break;
 159   }
 160   return true;
 161 }
 162 
 163 static inline double fileTimeAsDouble(FILETIME* time) {
 164   const double high  = (double) ((unsigned int) ~0);
 165   const double split = 10000000.0;
 166   double result = (time->dwLowDateTime / split) +
 167                    time->dwHighDateTime * (high/split);
 168   return result;
 169 }
 170 
 171 // Implementation of os
 172 
 173 bool os::unsetenv(const char* name) {
 174   assert(name != NULL, "Null pointer");
 175   return (SetEnvironmentVariable(name, NULL) == TRUE);
 176 }
 177 
 178 // No setuid programs under Windows.
 179 bool os::have_special_privileges() {
 180   return false;
 181 }
 182 
 183 
 184 // This method is  a periodic task to check for misbehaving JNI applications
 185 // under CheckJNI, we can add any periodic checks here.
 186 // For Windows at the moment does nothing
 187 void os::run_periodic_checks() {
 188   return;
 189 }
 190 
 191 // previous UnhandledExceptionFilter, if there is one
 192 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 193 
 194 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 195 
 196 void os::init_system_properties_values() {
 197   // sysclasspath, java_home, dll_dir
 198   {
 199     char *home_path;
 200     char *dll_path;
 201     char *pslash;
 202     const char *bin = "\\bin";
 203     char home_dir[MAX_PATH + 1];
 204     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 205 
 206     if (alt_home_dir != NULL)  {
 207       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 208       home_dir[MAX_PATH] = '\0';
 209     } else {
 210       os::jvm_path(home_dir, sizeof(home_dir));
 211       // Found the full path to jvm.dll.
 212       // Now cut the path to <java_home>/jre if we can.
 213       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 214       pslash = strrchr(home_dir, '\\');
 215       if (pslash != NULL) {
 216         *pslash = '\0';                   // get rid of \{client|server}
 217         pslash = strrchr(home_dir, '\\');
 218         if (pslash != NULL) {
 219           *pslash = '\0';                 // get rid of \bin
 220         }
 221       }
 222     }
 223 
 224     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 225     strcpy(home_path, home_dir);
 226     Arguments::set_java_home(home_path);
 227     FREE_C_HEAP_ARRAY(char, home_path);
 228 
 229     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 230                                 mtInternal);
 231     strcpy(dll_path, home_dir);
 232     strcat(dll_path, bin);
 233     Arguments::set_dll_dir(dll_path);
 234     FREE_C_HEAP_ARRAY(char, dll_path);
 235 
 236     if (!set_boot_path('\\', ';')) {
 237       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 238     }
 239   }
 240 
 241 // library_path
 242 #define EXT_DIR "\\lib\\ext"
 243 #define BIN_DIR "\\bin"
 244 #define PACKAGE_DIR "\\Sun\\Java"
 245   {
 246     // Win32 library search order (See the documentation for LoadLibrary):
 247     //
 248     // 1. The directory from which application is loaded.
 249     // 2. The system wide Java Extensions directory (Java only)
 250     // 3. System directory (GetSystemDirectory)
 251     // 4. Windows directory (GetWindowsDirectory)
 252     // 5. The PATH environment variable
 253     // 6. The current directory
 254 
 255     char *library_path;
 256     char tmp[MAX_PATH];
 257     char *path_str = ::getenv("PATH");
 258 
 259     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 260                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 261 
 262     library_path[0] = '\0';
 263 
 264     GetModuleFileName(NULL, tmp, sizeof(tmp));
 265     *(strrchr(tmp, '\\')) = '\0';
 266     strcat(library_path, tmp);
 267 
 268     GetWindowsDirectory(tmp, sizeof(tmp));
 269     strcat(library_path, ";");
 270     strcat(library_path, tmp);
 271     strcat(library_path, PACKAGE_DIR BIN_DIR);
 272 
 273     GetSystemDirectory(tmp, sizeof(tmp));
 274     strcat(library_path, ";");
 275     strcat(library_path, tmp);
 276 
 277     GetWindowsDirectory(tmp, sizeof(tmp));
 278     strcat(library_path, ";");
 279     strcat(library_path, tmp);
 280 
 281     if (path_str) {
 282       strcat(library_path, ";");
 283       strcat(library_path, path_str);
 284     }
 285 
 286     strcat(library_path, ";.");
 287 
 288     Arguments::set_library_path(library_path);
 289     FREE_C_HEAP_ARRAY(char, library_path);
 290   }
 291 
 292   // Default extensions directory
 293   {
 294     char path[MAX_PATH];
 295     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 296     GetWindowsDirectory(path, MAX_PATH);
 297     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 298             path, PACKAGE_DIR, EXT_DIR);
 299     Arguments::set_ext_dirs(buf);
 300   }
 301   #undef EXT_DIR
 302   #undef BIN_DIR
 303   #undef PACKAGE_DIR
 304 
 305 #ifndef _WIN64
 306   // set our UnhandledExceptionFilter and save any previous one
 307   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 308 #endif
 309 
 310   // Done
 311   return;
 312 }
 313 
 314 void os::breakpoint() {
 315   DebugBreak();
 316 }
 317 
 318 // Invoked from the BREAKPOINT Macro
 319 extern "C" void breakpoint() {
 320   os::breakpoint();
 321 }
 322 
 323 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 324 // So far, this method is only used by Native Memory Tracking, which is
 325 // only supported on Windows XP or later.
 326 //
 327 int os::get_native_stack(address* stack, int frames, int toSkip) {
 328   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 329   for (int index = captured; index < frames; index ++) {
 330     stack[index] = NULL;
 331   }
 332   return captured;
 333 }
 334 
 335 
 336 // os::current_stack_base()
 337 //
 338 //   Returns the base of the stack, which is the stack's
 339 //   starting address.  This function must be called
 340 //   while running on the stack of the thread being queried.
 341 
 342 address os::current_stack_base() {
 343   MEMORY_BASIC_INFORMATION minfo;
 344   address stack_bottom;
 345   size_t stack_size;
 346 
 347   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 348   stack_bottom =  (address)minfo.AllocationBase;
 349   stack_size = minfo.RegionSize;
 350 
 351   // Add up the sizes of all the regions with the same
 352   // AllocationBase.
 353   while (1) {
 354     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 355     if (stack_bottom == (address)minfo.AllocationBase) {
 356       stack_size += minfo.RegionSize;
 357     } else {
 358       break;
 359     }
 360   }
 361   return stack_bottom + stack_size;
 362 }
 363 
 364 size_t os::current_stack_size() {
 365   size_t sz;
 366   MEMORY_BASIC_INFORMATION minfo;
 367   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 368   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 369   return sz;
 370 }
 371 
 372 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 373   MEMORY_BASIC_INFORMATION minfo;
 374   committed_start = NULL;
 375   committed_size = 0;
 376   address top = start + size;
 377   const address start_addr = start;
 378   while (start < top) {
 379     VirtualQuery(start, &minfo, sizeof(minfo));
 380     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 381       if (committed_start != NULL) {
 382         break;
 383       }
 384     } else {  // committed
 385       if (committed_start == NULL) {
 386         committed_start = start;
 387       }
 388       size_t offset = start - (address)minfo.BaseAddress;
 389       committed_size += minfo.RegionSize - offset;
 390     }
 391     start = (address)minfo.BaseAddress + minfo.RegionSize;
 392   }
 393 
 394   if (committed_start == NULL) {
 395     assert(committed_size == 0, "Sanity");
 396     return false;
 397   } else {
 398     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 399     // current region may go beyond the limit, trim to the limit
 400     committed_size = MIN2(committed_size, size_t(top - committed_start));
 401     return true;
 402   }
 403 }
 404 
 405 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 406   const struct tm* time_struct_ptr = localtime(clock);
 407   if (time_struct_ptr != NULL) {
 408     *res = *time_struct_ptr;
 409     return res;
 410   }
 411   return NULL;
 412 }
 413 
 414 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 415   const struct tm* time_struct_ptr = gmtime(clock);
 416   if (time_struct_ptr != NULL) {
 417     *res = *time_struct_ptr;
 418     return res;
 419   }
 420   return NULL;
 421 }
 422 
 423 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 424 
 425 // Thread start routine for all newly created threads
 426 static unsigned __stdcall thread_native_entry(Thread* thread) {
 427 
 428   thread->record_stack_base_and_size();
 429 
 430   // Try to randomize the cache line index of hot stack frames.
 431   // This helps when threads of the same stack traces evict each other's
 432   // cache lines. The threads can be either from the same JVM instance, or
 433   // from different JVM instances. The benefit is especially true for
 434   // processors with hyperthreading technology.
 435   static int counter = 0;
 436   int pid = os::current_process_id();
 437   _alloca(((pid ^ counter++) & 7) * 128);
 438 
 439   thread->initialize_thread_current();
 440 
 441   OSThread* osthr = thread->osthread();
 442   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 443 
 444   if (UseNUMA) {
 445     int lgrp_id = os::numa_get_group_id();
 446     if (lgrp_id != -1) {
 447       thread->set_lgrp_id(lgrp_id);
 448     }
 449   }
 450 
 451   // Diagnostic code to investigate JDK-6573254
 452   int res = 30115;  // non-java thread
 453   if (thread->is_Java_thread()) {
 454     res = 20115;    // java thread
 455   }
 456 
 457   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 458 
 459   // Install a win32 structured exception handler around every thread created
 460   // by VM, so VM can generate error dump when an exception occurred in non-
 461   // Java thread (e.g. VM thread).
 462   __try {
 463     thread->call_run();
 464   } __except(topLevelExceptionFilter(
 465                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 466     // Nothing to do.
 467   }
 468 
 469   // Note: at this point the thread object may already have deleted itself.
 470   // Do not dereference it from here on out.
 471 
 472   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 473 
 474   // One less thread is executing
 475   // When the VMThread gets here, the main thread may have already exited
 476   // which frees the CodeHeap containing the Atomic::add code
 477   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 478     Atomic::dec(&os::win32::_os_thread_count);
 479   }
 480 
 481   // Thread must not return from exit_process_or_thread(), but if it does,
 482   // let it proceed to exit normally
 483   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 484 }
 485 
 486 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 487                                   int thread_id) {
 488   // Allocate the OSThread object
 489   OSThread* osthread = new OSThread(NULL, NULL);
 490   if (osthread == NULL) return NULL;
 491 
 492   // Initialize the JDK library's interrupt event.
 493   // This should really be done when OSThread is constructed,
 494   // but there is no way for a constructor to report failure to
 495   // allocate the event.
 496   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 497   if (interrupt_event == NULL) {
 498     delete osthread;
 499     return NULL;
 500   }
 501   osthread->set_interrupt_event(interrupt_event);
 502 
 503   // Store info on the Win32 thread into the OSThread
 504   osthread->set_thread_handle(thread_handle);
 505   osthread->set_thread_id(thread_id);
 506 
 507   if (UseNUMA) {
 508     int lgrp_id = os::numa_get_group_id();
 509     if (lgrp_id != -1) {
 510       thread->set_lgrp_id(lgrp_id);
 511     }
 512   }
 513 
 514   // Initial thread state is INITIALIZED, not SUSPENDED
 515   osthread->set_state(INITIALIZED);
 516 
 517   return osthread;
 518 }
 519 
 520 
 521 bool os::create_attached_thread(JavaThread* thread) {
 522 #ifdef ASSERT
 523   thread->verify_not_published();
 524 #endif
 525   HANDLE thread_h;
 526   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 527                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 528     fatal("DuplicateHandle failed\n");
 529   }
 530   OSThread* osthread = create_os_thread(thread, thread_h,
 531                                         (int)current_thread_id());
 532   if (osthread == NULL) {
 533     return false;
 534   }
 535 
 536   // Initial thread state is RUNNABLE
 537   osthread->set_state(RUNNABLE);
 538 
 539   thread->set_osthread(osthread);
 540 
 541   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 542     os::current_thread_id());
 543 
 544   return true;
 545 }
 546 
 547 bool os::create_main_thread(JavaThread* thread) {
 548 #ifdef ASSERT
 549   thread->verify_not_published();
 550 #endif
 551   if (_starting_thread == NULL) {
 552     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 553     if (_starting_thread == NULL) {
 554       return false;
 555     }
 556   }
 557 
 558   // The primordial thread is runnable from the start)
 559   _starting_thread->set_state(RUNNABLE);
 560 
 561   thread->set_osthread(_starting_thread);
 562   return true;
 563 }
 564 
 565 // Helper function to trace _beginthreadex attributes,
 566 //  similar to os::Posix::describe_pthread_attr()
 567 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 568                                                size_t stacksize, unsigned initflag) {
 569   stringStream ss(buf, buflen);
 570   if (stacksize == 0) {
 571     ss.print("stacksize: default, ");
 572   } else {
 573     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 574   }
 575   ss.print("flags: ");
 576   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 577   #define ALL(X) \
 578     X(CREATE_SUSPENDED) \
 579     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 580   ALL(PRINT_FLAG)
 581   #undef ALL
 582   #undef PRINT_FLAG
 583   return buf;
 584 }
 585 
 586 // Allocate and initialize a new OSThread
 587 bool os::create_thread(Thread* thread, ThreadType thr_type,
 588                        size_t stack_size) {
 589   unsigned thread_id;
 590 
 591   // Allocate the OSThread object
 592   OSThread* osthread = new OSThread(NULL, NULL);
 593   if (osthread == NULL) {
 594     return false;
 595   }
 596 
 597   // Initialize the JDK library's interrupt event.
 598   // This should really be done when OSThread is constructed,
 599   // but there is no way for a constructor to report failure to
 600   // allocate the event.
 601   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 602   if (interrupt_event == NULL) {
 603     delete osthread;
 604     return false;
 605   }
 606   osthread->set_interrupt_event(interrupt_event);
 607   // We don't call set_interrupted(false) as it will trip the assert in there
 608   // as we are not operating on the current thread. We don't need to call it
 609   // because the initial state is already correct.
 610 
 611   thread->set_osthread(osthread);
 612 
 613   if (stack_size == 0) {
 614     switch (thr_type) {
 615     case os::java_thread:
 616       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 617       if (JavaThread::stack_size_at_create() > 0) {
 618         stack_size = JavaThread::stack_size_at_create();
 619       }
 620       break;
 621     case os::compiler_thread:
 622       if (CompilerThreadStackSize > 0) {
 623         stack_size = (size_t)(CompilerThreadStackSize * K);
 624         break;
 625       } // else fall through:
 626         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 627     case os::vm_thread:
 628     case os::pgc_thread:
 629     case os::cgc_thread:
 630     case os::watcher_thread:
 631       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 632       break;
 633     }
 634   }
 635 
 636   // Create the Win32 thread
 637   //
 638   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 639   // does not specify stack size. Instead, it specifies the size of
 640   // initially committed space. The stack size is determined by
 641   // PE header in the executable. If the committed "stack_size" is larger
 642   // than default value in the PE header, the stack is rounded up to the
 643   // nearest multiple of 1MB. For example if the launcher has default
 644   // stack size of 320k, specifying any size less than 320k does not
 645   // affect the actual stack size at all, it only affects the initial
 646   // commitment. On the other hand, specifying 'stack_size' larger than
 647   // default value may cause significant increase in memory usage, because
 648   // not only the stack space will be rounded up to MB, but also the
 649   // entire space is committed upfront.
 650   //
 651   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 652   // for CreateThread() that can treat 'stack_size' as stack size. However we
 653   // are not supposed to call CreateThread() directly according to MSDN
 654   // document because JVM uses C runtime library. The good news is that the
 655   // flag appears to work with _beginthredex() as well.
 656 
 657   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 658   HANDLE thread_handle =
 659     (HANDLE)_beginthreadex(NULL,
 660                            (unsigned)stack_size,
 661                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 662                            thread,
 663                            initflag,
 664                            &thread_id);
 665 
 666   char buf[64];
 667   if (thread_handle != NULL) {
 668     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 669       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 670   } else {
 671     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 672       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 673     // Log some OS information which might explain why creating the thread failed.
 674     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 675     LogStream st(Log(os, thread)::info());
 676     os::print_memory_info(&st);
 677   }
 678 
 679   if (thread_handle == NULL) {
 680     // Need to clean up stuff we've allocated so far
 681     thread->set_osthread(NULL);
 682     delete osthread;
 683     return false;
 684   }
 685 
 686   Atomic::inc(&os::win32::_os_thread_count);
 687 
 688   // Store info on the Win32 thread into the OSThread
 689   osthread->set_thread_handle(thread_handle);
 690   osthread->set_thread_id(thread_id);
 691 
 692   // Initial thread state is INITIALIZED, not SUSPENDED
 693   osthread->set_state(INITIALIZED);
 694 
 695   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 696   return true;
 697 }
 698 
 699 
 700 // Free Win32 resources related to the OSThread
 701 void os::free_thread(OSThread* osthread) {
 702   assert(osthread != NULL, "osthread not set");
 703 
 704   // We are told to free resources of the argument thread,
 705   // but we can only really operate on the current thread.
 706   assert(Thread::current()->osthread() == osthread,
 707          "os::free_thread but not current thread");
 708 
 709   CloseHandle(osthread->thread_handle());
 710   delete osthread;
 711 }
 712 
 713 static jlong first_filetime;
 714 static jlong initial_performance_count;
 715 static jlong performance_frequency;
 716 
 717 
 718 jlong as_long(LARGE_INTEGER x) {
 719   jlong result = 0; // initialization to avoid warning
 720   set_high(&result, x.HighPart);
 721   set_low(&result, x.LowPart);
 722   return result;
 723 }
 724 
 725 
 726 jlong os::elapsed_counter() {
 727   LARGE_INTEGER count;
 728   QueryPerformanceCounter(&count);
 729   return as_long(count) - initial_performance_count;
 730 }
 731 
 732 
 733 jlong os::elapsed_frequency() {
 734   return performance_frequency;
 735 }
 736 
 737 
 738 julong os::available_memory() {
 739   return win32::available_memory();
 740 }
 741 
 742 julong os::win32::available_memory() {
 743   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 744   // value if total memory is larger than 4GB
 745   MEMORYSTATUSEX ms;
 746   ms.dwLength = sizeof(ms);
 747   GlobalMemoryStatusEx(&ms);
 748 
 749   return (julong)ms.ullAvailPhys;
 750 }
 751 
 752 julong os::physical_memory() {
 753   return win32::physical_memory();
 754 }
 755 
 756 bool os::has_allocatable_memory_limit(julong* limit) {
 757   MEMORYSTATUSEX ms;
 758   ms.dwLength = sizeof(ms);
 759   GlobalMemoryStatusEx(&ms);
 760 #ifdef _LP64
 761   *limit = (julong)ms.ullAvailVirtual;
 762   return true;
 763 #else
 764   // Limit to 1400m because of the 2gb address space wall
 765   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 766   return true;
 767 #endif
 768 }
 769 
 770 int os::active_processor_count() {
 771   // User has overridden the number of active processors
 772   if (ActiveProcessorCount > 0) {
 773     log_trace(os)("active_processor_count: "
 774                   "active processor count set by user : %d",
 775                   ActiveProcessorCount);
 776     return ActiveProcessorCount;
 777   }
 778 
 779   DWORD_PTR lpProcessAffinityMask = 0;
 780   DWORD_PTR lpSystemAffinityMask = 0;
 781   int proc_count = processor_count();
 782   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 783       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 784     // Nof active processors is number of bits in process affinity mask
 785     int bitcount = 0;
 786     while (lpProcessAffinityMask != 0) {
 787       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 788       bitcount++;
 789     }
 790     return bitcount;
 791   } else {
 792     return proc_count;
 793   }
 794 }
 795 
 796 uint os::processor_id() {
 797   return (uint)GetCurrentProcessorNumber();
 798 }
 799 
 800 void os::set_native_thread_name(const char *name) {
 801 
 802   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 803   //
 804   // Note that unfortunately this only works if the process
 805   // is already attached to a debugger; debugger must observe
 806   // the exception below to show the correct name.
 807 
 808   // If there is no debugger attached skip raising the exception
 809   if (!IsDebuggerPresent()) {
 810     return;
 811   }
 812 
 813   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 814   struct {
 815     DWORD dwType;     // must be 0x1000
 816     LPCSTR szName;    // pointer to name (in user addr space)
 817     DWORD dwThreadID; // thread ID (-1=caller thread)
 818     DWORD dwFlags;    // reserved for future use, must be zero
 819   } info;
 820 
 821   info.dwType = 0x1000;
 822   info.szName = name;
 823   info.dwThreadID = -1;
 824   info.dwFlags = 0;
 825 
 826   __try {
 827     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 828   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 829 }
 830 
 831 bool os::bind_to_processor(uint processor_id) {
 832   // Not yet implemented.
 833   return false;
 834 }
 835 
 836 void os::win32::initialize_performance_counter() {
 837   LARGE_INTEGER count;
 838   QueryPerformanceFrequency(&count);
 839   performance_frequency = as_long(count);
 840   QueryPerformanceCounter(&count);
 841   initial_performance_count = as_long(count);
 842 }
 843 
 844 
 845 double os::elapsedTime() {
 846   return (double) elapsed_counter() / (double) elapsed_frequency();
 847 }
 848 
 849 
 850 // Windows format:
 851 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 852 // Java format:
 853 //   Java standards require the number of milliseconds since 1/1/1970
 854 
 855 // Constant offset - calculated using offset()
 856 static jlong  _offset   = 116444736000000000;
 857 // Fake time counter for reproducible results when debugging
 858 static jlong  fake_time = 0;
 859 
 860 #ifdef ASSERT
 861 // Just to be safe, recalculate the offset in debug mode
 862 static jlong _calculated_offset = 0;
 863 static int   _has_calculated_offset = 0;
 864 
 865 jlong offset() {
 866   if (_has_calculated_offset) return _calculated_offset;
 867   SYSTEMTIME java_origin;
 868   java_origin.wYear          = 1970;
 869   java_origin.wMonth         = 1;
 870   java_origin.wDayOfWeek     = 0; // ignored
 871   java_origin.wDay           = 1;
 872   java_origin.wHour          = 0;
 873   java_origin.wMinute        = 0;
 874   java_origin.wSecond        = 0;
 875   java_origin.wMilliseconds  = 0;
 876   FILETIME jot;
 877   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 878     fatal("Error = %d\nWindows error", GetLastError());
 879   }
 880   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 881   _has_calculated_offset = 1;
 882   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 883   return _calculated_offset;
 884 }
 885 #else
 886 jlong offset() {
 887   return _offset;
 888 }
 889 #endif
 890 
 891 jlong windows_to_java_time(FILETIME wt) {
 892   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 893   return (a - offset()) / 10000;
 894 }
 895 
 896 // Returns time ticks in (10th of micro seconds)
 897 jlong windows_to_time_ticks(FILETIME wt) {
 898   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 899   return (a - offset());
 900 }
 901 
 902 FILETIME java_to_windows_time(jlong l) {
 903   jlong a = (l * 10000) + offset();
 904   FILETIME result;
 905   result.dwHighDateTime = high(a);
 906   result.dwLowDateTime  = low(a);
 907   return result;
 908 }
 909 
 910 bool os::supports_vtime() { return true; }
 911 
 912 double os::elapsedVTime() {
 913   FILETIME created;
 914   FILETIME exited;
 915   FILETIME kernel;
 916   FILETIME user;
 917   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 918     // the resolution of windows_to_java_time() should be sufficient (ms)
 919     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 920   } else {
 921     return elapsedTime();
 922   }
 923 }
 924 
 925 jlong os::javaTimeMillis() {
 926   FILETIME wt;
 927   GetSystemTimeAsFileTime(&wt);
 928   return windows_to_java_time(wt);
 929 }
 930 
 931 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 932   FILETIME wt;
 933   GetSystemTimeAsFileTime(&wt);
 934   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 935   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 936   seconds = secs;
 937   nanos = jlong(ticks - (secs*10000000)) * 100;
 938 }
 939 
 940 jlong os::javaTimeNanos() {
 941     LARGE_INTEGER current_count;
 942     QueryPerformanceCounter(&current_count);
 943     double current = as_long(current_count);
 944     double freq = performance_frequency;
 945     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 946     return time;
 947 }
 948 
 949 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 950   jlong freq = performance_frequency;
 951   if (freq < NANOSECS_PER_SEC) {
 952     // the performance counter is 64 bits and we will
 953     // be multiplying it -- so no wrap in 64 bits
 954     info_ptr->max_value = ALL_64_BITS;
 955   } else if (freq > NANOSECS_PER_SEC) {
 956     // use the max value the counter can reach to
 957     // determine the max value which could be returned
 958     julong max_counter = (julong)ALL_64_BITS;
 959     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 960   } else {
 961     // the performance counter is 64 bits and we will
 962     // be using it directly -- so no wrap in 64 bits
 963     info_ptr->max_value = ALL_64_BITS;
 964   }
 965 
 966   // using a counter, so no skipping
 967   info_ptr->may_skip_backward = false;
 968   info_ptr->may_skip_forward = false;
 969 
 970   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 971 }
 972 
 973 char* os::local_time_string(char *buf, size_t buflen) {
 974   SYSTEMTIME st;
 975   GetLocalTime(&st);
 976   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 977                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 978   return buf;
 979 }
 980 
 981 bool os::getTimesSecs(double* process_real_time,
 982                       double* process_user_time,
 983                       double* process_system_time) {
 984   HANDLE h_process = GetCurrentProcess();
 985   FILETIME create_time, exit_time, kernel_time, user_time;
 986   BOOL result = GetProcessTimes(h_process,
 987                                 &create_time,
 988                                 &exit_time,
 989                                 &kernel_time,
 990                                 &user_time);
 991   if (result != 0) {
 992     FILETIME wt;
 993     GetSystemTimeAsFileTime(&wt);
 994     jlong rtc_millis = windows_to_java_time(wt);
 995     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 996     *process_user_time =
 997       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
 998     *process_system_time =
 999       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1000     return true;
1001   } else {
1002     return false;
1003   }
1004 }
1005 
1006 void os::shutdown() {
1007   // allow PerfMemory to attempt cleanup of any persistent resources
1008   perfMemory_exit();
1009 
1010   // flush buffered output, finish log files
1011   ostream_abort();
1012 
1013   // Check for abort hook
1014   abort_hook_t abort_hook = Arguments::abort_hook();
1015   if (abort_hook != NULL) {
1016     abort_hook();
1017   }
1018 }
1019 
1020 
1021 static HANDLE dumpFile = NULL;
1022 
1023 // Check if dump file can be created.
1024 void os::check_dump_limit(char* buffer, size_t buffsz) {
1025   bool status = true;
1026   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1027     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1028     status = false;
1029   }
1030 
1031 #ifndef ASSERT
1032   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1033     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1034     status = false;
1035   }
1036 #endif
1037 
1038   if (status) {
1039     const char* cwd = get_current_directory(NULL, 0);
1040     int pid = current_process_id();
1041     if (cwd != NULL) {
1042       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1043     } else {
1044       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1045     }
1046 
1047     if (dumpFile == NULL &&
1048        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1049                  == INVALID_HANDLE_VALUE) {
1050       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1051       status = false;
1052     }
1053   }
1054   VMError::record_coredump_status(buffer, status);
1055 }
1056 
1057 void os::abort(bool dump_core, void* siginfo, const void* context) {
1058   EXCEPTION_POINTERS ep;
1059   MINIDUMP_EXCEPTION_INFORMATION mei;
1060   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1061 
1062   HANDLE hProcess = GetCurrentProcess();
1063   DWORD processId = GetCurrentProcessId();
1064   MINIDUMP_TYPE dumpType;
1065 
1066   shutdown();
1067   if (!dump_core || dumpFile == NULL) {
1068     if (dumpFile != NULL) {
1069       CloseHandle(dumpFile);
1070     }
1071     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1072   }
1073 
1074   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1075     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1076 
1077   if (siginfo != NULL && context != NULL) {
1078     ep.ContextRecord = (PCONTEXT) context;
1079     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1080 
1081     mei.ThreadId = GetCurrentThreadId();
1082     mei.ExceptionPointers = &ep;
1083     pmei = &mei;
1084   } else {
1085     pmei = NULL;
1086   }
1087 
1088   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1089   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1090   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1091       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1092     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1093   }
1094   CloseHandle(dumpFile);
1095   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1096 }
1097 
1098 // Die immediately, no exit hook, no abort hook, no cleanup.
1099 void os::die() {
1100   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1101 }
1102 
1103 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1104 //  * dirent_md.c       1.15 00/02/02
1105 //
1106 // The declarations for DIR and struct dirent are in jvm_win32.h.
1107 
1108 // Caller must have already run dirname through JVM_NativePath, which removes
1109 // duplicate slashes and converts all instances of '/' into '\\'.
1110 
1111 DIR * os::opendir(const char *dirname) {
1112   assert(dirname != NULL, "just checking");   // hotspot change
1113   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1114   DWORD fattr;                                // hotspot change
1115   char alt_dirname[4] = { 0, 0, 0, 0 };
1116 
1117   if (dirp == 0) {
1118     errno = ENOMEM;
1119     return 0;
1120   }
1121 
1122   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1123   // as a directory in FindFirstFile().  We detect this case here and
1124   // prepend the current drive name.
1125   //
1126   if (dirname[1] == '\0' && dirname[0] == '\\') {
1127     alt_dirname[0] = _getdrive() + 'A' - 1;
1128     alt_dirname[1] = ':';
1129     alt_dirname[2] = '\\';
1130     alt_dirname[3] = '\0';
1131     dirname = alt_dirname;
1132   }
1133 
1134   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1135   if (dirp->path == 0) {
1136     free(dirp);
1137     errno = ENOMEM;
1138     return 0;
1139   }
1140   strcpy(dirp->path, dirname);
1141 
1142   fattr = GetFileAttributes(dirp->path);
1143   if (fattr == 0xffffffff) {
1144     free(dirp->path);
1145     free(dirp);
1146     errno = ENOENT;
1147     return 0;
1148   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1149     free(dirp->path);
1150     free(dirp);
1151     errno = ENOTDIR;
1152     return 0;
1153   }
1154 
1155   // Append "*.*", or possibly "\\*.*", to path
1156   if (dirp->path[1] == ':' &&
1157       (dirp->path[2] == '\0' ||
1158       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1159     // No '\\' needed for cases like "Z:" or "Z:\"
1160     strcat(dirp->path, "*.*");
1161   } else {
1162     strcat(dirp->path, "\\*.*");
1163   }
1164 
1165   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1166   if (dirp->handle == INVALID_HANDLE_VALUE) {
1167     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1168       free(dirp->path);
1169       free(dirp);
1170       errno = EACCES;
1171       return 0;
1172     }
1173   }
1174   return dirp;
1175 }
1176 
1177 struct dirent * os::readdir(DIR *dirp) {
1178   assert(dirp != NULL, "just checking");      // hotspot change
1179   if (dirp->handle == INVALID_HANDLE_VALUE) {
1180     return NULL;
1181   }
1182 
1183   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1184 
1185   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1186     if (GetLastError() == ERROR_INVALID_HANDLE) {
1187       errno = EBADF;
1188       return NULL;
1189     }
1190     FindClose(dirp->handle);
1191     dirp->handle = INVALID_HANDLE_VALUE;
1192   }
1193 
1194   return &dirp->dirent;
1195 }
1196 
1197 int os::closedir(DIR *dirp) {
1198   assert(dirp != NULL, "just checking");      // hotspot change
1199   if (dirp->handle != INVALID_HANDLE_VALUE) {
1200     if (!FindClose(dirp->handle)) {
1201       errno = EBADF;
1202       return -1;
1203     }
1204     dirp->handle = INVALID_HANDLE_VALUE;
1205   }
1206   free(dirp->path);
1207   free(dirp);
1208   return 0;
1209 }
1210 
1211 // This must be hard coded because it's the system's temporary
1212 // directory not the java application's temp directory, ala java.io.tmpdir.
1213 const char* os::get_temp_directory() {
1214   static char path_buf[MAX_PATH];
1215   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1216     return path_buf;
1217   } else {
1218     path_buf[0] = '\0';
1219     return path_buf;
1220   }
1221 }
1222 
1223 // Needs to be in os specific directory because windows requires another
1224 // header file <direct.h>
1225 const char* os::get_current_directory(char *buf, size_t buflen) {
1226   int n = static_cast<int>(buflen);
1227   if (buflen > INT_MAX)  n = INT_MAX;
1228   return _getcwd(buf, n);
1229 }
1230 
1231 //-----------------------------------------------------------
1232 // Helper functions for fatal error handler
1233 #ifdef _WIN64
1234 // Helper routine which returns true if address in
1235 // within the NTDLL address space.
1236 //
1237 static bool _addr_in_ntdll(address addr) {
1238   HMODULE hmod;
1239   MODULEINFO minfo;
1240 
1241   hmod = GetModuleHandle("NTDLL.DLL");
1242   if (hmod == NULL) return false;
1243   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1244                                           &minfo, sizeof(MODULEINFO))) {
1245     return false;
1246   }
1247 
1248   if ((addr >= minfo.lpBaseOfDll) &&
1249       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1250     return true;
1251   } else {
1252     return false;
1253   }
1254 }
1255 #endif
1256 
1257 struct _modinfo {
1258   address addr;
1259   char*   full_path;   // point to a char buffer
1260   int     buflen;      // size of the buffer
1261   address base_addr;
1262 };
1263 
1264 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1265                                   address top_address, void * param) {
1266   struct _modinfo *pmod = (struct _modinfo *)param;
1267   if (!pmod) return -1;
1268 
1269   if (base_addr   <= pmod->addr &&
1270       top_address > pmod->addr) {
1271     // if a buffer is provided, copy path name to the buffer
1272     if (pmod->full_path) {
1273       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1274     }
1275     pmod->base_addr = base_addr;
1276     return 1;
1277   }
1278   return 0;
1279 }
1280 
1281 bool os::dll_address_to_library_name(address addr, char* buf,
1282                                      int buflen, int* offset) {
1283   // buf is not optional, but offset is optional
1284   assert(buf != NULL, "sanity check");
1285 
1286 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1287 //       return the full path to the DLL file, sometimes it returns path
1288 //       to the corresponding PDB file (debug info); sometimes it only
1289 //       returns partial path, which makes life painful.
1290 
1291   struct _modinfo mi;
1292   mi.addr      = addr;
1293   mi.full_path = buf;
1294   mi.buflen    = buflen;
1295   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1296     // buf already contains path name
1297     if (offset) *offset = addr - mi.base_addr;
1298     return true;
1299   }
1300 
1301   buf[0] = '\0';
1302   if (offset) *offset = -1;
1303   return false;
1304 }
1305 
1306 bool os::dll_address_to_function_name(address addr, char *buf,
1307                                       int buflen, int *offset,
1308                                       bool demangle) {
1309   // buf is not optional, but offset is optional
1310   assert(buf != NULL, "sanity check");
1311 
1312   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1313     return true;
1314   }
1315   if (offset != NULL)  *offset  = -1;
1316   buf[0] = '\0';
1317   return false;
1318 }
1319 
1320 // save the start and end address of jvm.dll into param[0] and param[1]
1321 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1322                            address top_address, void * param) {
1323   if (!param) return -1;
1324 
1325   if (base_addr   <= (address)_locate_jvm_dll &&
1326       top_address > (address)_locate_jvm_dll) {
1327     ((address*)param)[0] = base_addr;
1328     ((address*)param)[1] = top_address;
1329     return 1;
1330   }
1331   return 0;
1332 }
1333 
1334 address vm_lib_location[2];    // start and end address of jvm.dll
1335 
1336 // check if addr is inside jvm.dll
1337 bool os::address_is_in_vm(address addr) {
1338   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1339     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1340       assert(false, "Can't find jvm module.");
1341       return false;
1342     }
1343   }
1344 
1345   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1346 }
1347 
1348 // print module info; param is outputStream*
1349 static int _print_module(const char* fname, address base_address,
1350                          address top_address, void* param) {
1351   if (!param) return -1;
1352 
1353   outputStream* st = (outputStream*)param;
1354 
1355   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1356   return 0;
1357 }
1358 
1359 // Loads .dll/.so and
1360 // in case of error it checks if .dll/.so was built for the
1361 // same architecture as Hotspot is running on
1362 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1363   log_info(os)("attempting shared library load of %s", name);
1364 
1365   void * result = LoadLibrary(name);
1366   if (result != NULL) {
1367     Events::log(NULL, "Loaded shared library %s", name);
1368     // Recalculate pdb search path if a DLL was loaded successfully.
1369     SymbolEngine::recalc_search_path();
1370     log_info(os)("shared library load of %s was successful", name);
1371     return result;
1372   }
1373   DWORD errcode = GetLastError();
1374   // Read system error message into ebuf
1375   // It may or may not be overwritten below (in the for loop and just above)
1376   lasterror(ebuf, (size_t) ebuflen);
1377   ebuf[ebuflen - 1] = '\0';
1378   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1379   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1380 
1381   if (errcode == ERROR_MOD_NOT_FOUND) {
1382     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1383     ebuf[ebuflen - 1] = '\0';
1384     return NULL;
1385   }
1386 
1387   // Parsing dll below
1388   // If we can read dll-info and find that dll was built
1389   // for an architecture other than Hotspot is running in
1390   // - then print to buffer "DLL was built for a different architecture"
1391   // else call os::lasterror to obtain system error message
1392   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1393   if (fd < 0) {
1394     return NULL;
1395   }
1396 
1397   uint32_t signature_offset;
1398   uint16_t lib_arch = 0;
1399   bool failed_to_get_lib_arch =
1400     ( // Go to position 3c in the dll
1401      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1402      ||
1403      // Read location of signature
1404      (sizeof(signature_offset) !=
1405      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1406      ||
1407      // Go to COFF File Header in dll
1408      // that is located after "signature" (4 bytes long)
1409      (os::seek_to_file_offset(fd,
1410      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1411      ||
1412      // Read field that contains code of architecture
1413      // that dll was built for
1414      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1415     );
1416 
1417   ::close(fd);
1418   if (failed_to_get_lib_arch) {
1419     // file i/o error - report os::lasterror(...) msg
1420     return NULL;
1421   }
1422 
1423   typedef struct {
1424     uint16_t arch_code;
1425     char* arch_name;
1426   } arch_t;
1427 
1428   static const arch_t arch_array[] = {
1429     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1430     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1431   };
1432 #if (defined _M_AMD64)
1433   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1434 #elif (defined _M_IX86)
1435   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1436 #else
1437   #error Method os::dll_load requires that one of following \
1438          is defined :_M_AMD64 or _M_IX86
1439 #endif
1440 
1441 
1442   // Obtain a string for printf operation
1443   // lib_arch_str shall contain string what platform this .dll was built for
1444   // running_arch_str shall string contain what platform Hotspot was built for
1445   char *running_arch_str = NULL, *lib_arch_str = NULL;
1446   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1447     if (lib_arch == arch_array[i].arch_code) {
1448       lib_arch_str = arch_array[i].arch_name;
1449     }
1450     if (running_arch == arch_array[i].arch_code) {
1451       running_arch_str = arch_array[i].arch_name;
1452     }
1453   }
1454 
1455   assert(running_arch_str,
1456          "Didn't find running architecture code in arch_array");
1457 
1458   // If the architecture is right
1459   // but some other error took place - report os::lasterror(...) msg
1460   if (lib_arch == running_arch) {
1461     return NULL;
1462   }
1463 
1464   if (lib_arch_str != NULL) {
1465     ::_snprintf(ebuf, ebuflen - 1,
1466                 "Can't load %s-bit .dll on a %s-bit platform",
1467                 lib_arch_str, running_arch_str);
1468   } else {
1469     // don't know what architecture this dll was build for
1470     ::_snprintf(ebuf, ebuflen - 1,
1471                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1472                 lib_arch, running_arch_str);
1473   }
1474 
1475   return NULL;
1476 }
1477 
1478 void os::print_dll_info(outputStream *st) {
1479   st->print_cr("Dynamic libraries:");
1480   get_loaded_modules_info(_print_module, (void *)st);
1481 }
1482 
1483 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1484   HANDLE   hProcess;
1485 
1486 # define MAX_NUM_MODULES 128
1487   HMODULE     modules[MAX_NUM_MODULES];
1488   static char filename[MAX_PATH];
1489   int         result = 0;
1490 
1491   int pid = os::current_process_id();
1492   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1493                          FALSE, pid);
1494   if (hProcess == NULL) return 0;
1495 
1496   DWORD size_needed;
1497   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1498     CloseHandle(hProcess);
1499     return 0;
1500   }
1501 
1502   // number of modules that are currently loaded
1503   int num_modules = size_needed / sizeof(HMODULE);
1504 
1505   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1506     // Get Full pathname:
1507     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1508       filename[0] = '\0';
1509     }
1510 
1511     MODULEINFO modinfo;
1512     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1513       modinfo.lpBaseOfDll = NULL;
1514       modinfo.SizeOfImage = 0;
1515     }
1516 
1517     // Invoke callback function
1518     result = callback(filename, (address)modinfo.lpBaseOfDll,
1519                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1520     if (result) break;
1521   }
1522 
1523   CloseHandle(hProcess);
1524   return result;
1525 }
1526 
1527 bool os::get_host_name(char* buf, size_t buflen) {
1528   DWORD size = (DWORD)buflen;
1529   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1530 }
1531 
1532 void os::get_summary_os_info(char* buf, size_t buflen) {
1533   stringStream sst(buf, buflen);
1534   os::win32::print_windows_version(&sst);
1535   // chop off newline character
1536   char* nl = strchr(buf, '\n');
1537   if (nl != NULL) *nl = '\0';
1538 }
1539 
1540 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1541 #if _MSC_VER >= 1900
1542   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1543   int result = ::vsnprintf(buf, len, fmt, args);
1544   // If an encoding error occurred (result < 0) then it's not clear
1545   // whether the buffer is NUL terminated, so ensure it is.
1546   if ((result < 0) && (len > 0)) {
1547     buf[len - 1] = '\0';
1548   }
1549   return result;
1550 #else
1551   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1552   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1553   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1554   // go straight to _vscprintf.  The output is going to be truncated in
1555   // that case, except in the unusual case of empty output.  More
1556   // importantly, the documentation for various versions of Visual Studio
1557   // are inconsistent about the behavior of _vsnprintf when len == 0,
1558   // including it possibly being an error.
1559   int result = -1;
1560   if (len > 0) {
1561     result = _vsnprintf(buf, len, fmt, args);
1562     // If output (including NUL terminator) is truncated, the buffer
1563     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1564     if ((result < 0) || ((size_t)result >= len)) {
1565       buf[len - 1] = '\0';
1566     }
1567   }
1568   if (result < 0) {
1569     result = _vscprintf(fmt, args);
1570   }
1571   return result;
1572 #endif // _MSC_VER dispatch
1573 }
1574 
1575 static inline time_t get_mtime(const char* filename) {
1576   struct stat st;
1577   int ret = os::stat(filename, &st);
1578   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1579   return st.st_mtime;
1580 }
1581 
1582 int os::compare_file_modified_times(const char* file1, const char* file2) {
1583   time_t t1 = get_mtime(file1);
1584   time_t t2 = get_mtime(file2);
1585   return t1 - t2;
1586 }
1587 
1588 void os::print_os_info_brief(outputStream* st) {
1589   os::print_os_info(st);
1590 }
1591 
1592 void os::win32::print_uptime_info(outputStream* st) {
1593   unsigned long long ticks = GetTickCount64();
1594   os::print_dhm(st, "OS uptime:", ticks/1000);
1595 }
1596 
1597 void os::print_os_info(outputStream* st) {
1598 #ifdef ASSERT
1599   char buffer[1024];
1600   st->print("HostName: ");
1601   if (get_host_name(buffer, sizeof(buffer))) {
1602     st->print("%s ", buffer);
1603   } else {
1604     st->print("N/A ");
1605   }
1606 #endif
1607   st->print_cr("OS:");
1608   os::win32::print_windows_version(st);
1609 
1610   os::win32::print_uptime_info(st);
1611 
1612 #ifdef _LP64
1613   VM_Version::print_platform_virtualization_info(st);
1614 #endif
1615 }
1616 
1617 void os::win32::print_windows_version(outputStream* st) {
1618   OSVERSIONINFOEX osvi;
1619   VS_FIXEDFILEINFO *file_info;
1620   TCHAR kernel32_path[MAX_PATH];
1621   UINT len, ret;
1622 
1623   // Use the GetVersionEx information to see if we're on a server or
1624   // workstation edition of Windows. Starting with Windows 8.1 we can't
1625   // trust the OS version information returned by this API.
1626   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1627   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1628   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1629     st->print_cr("Call to GetVersionEx failed");
1630     return;
1631   }
1632   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1633 
1634   // Get the full path to \Windows\System32\kernel32.dll and use that for
1635   // determining what version of Windows we're running on.
1636   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1637   ret = GetSystemDirectory(kernel32_path, len);
1638   if (ret == 0 || ret > len) {
1639     st->print_cr("Call to GetSystemDirectory failed");
1640     return;
1641   }
1642   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1643 
1644   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1645   if (version_size == 0) {
1646     st->print_cr("Call to GetFileVersionInfoSize failed");
1647     return;
1648   }
1649 
1650   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1651   if (version_info == NULL) {
1652     st->print_cr("Failed to allocate version_info");
1653     return;
1654   }
1655 
1656   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1657     os::free(version_info);
1658     st->print_cr("Call to GetFileVersionInfo failed");
1659     return;
1660   }
1661 
1662   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1663     os::free(version_info);
1664     st->print_cr("Call to VerQueryValue failed");
1665     return;
1666   }
1667 
1668   int major_version = HIWORD(file_info->dwProductVersionMS);
1669   int minor_version = LOWORD(file_info->dwProductVersionMS);
1670   int build_number = HIWORD(file_info->dwProductVersionLS);
1671   int build_minor = LOWORD(file_info->dwProductVersionLS);
1672   int os_vers = major_version * 1000 + minor_version;
1673   os::free(version_info);
1674 
1675   st->print(" Windows ");
1676   switch (os_vers) {
1677 
1678   case 6000:
1679     if (is_workstation) {
1680       st->print("Vista");
1681     } else {
1682       st->print("Server 2008");
1683     }
1684     break;
1685 
1686   case 6001:
1687     if (is_workstation) {
1688       st->print("7");
1689     } else {
1690       st->print("Server 2008 R2");
1691     }
1692     break;
1693 
1694   case 6002:
1695     if (is_workstation) {
1696       st->print("8");
1697     } else {
1698       st->print("Server 2012");
1699     }
1700     break;
1701 
1702   case 6003:
1703     if (is_workstation) {
1704       st->print("8.1");
1705     } else {
1706       st->print("Server 2012 R2");
1707     }
1708     break;
1709 
1710   case 10000:
1711     if (is_workstation) {
1712       st->print("10");
1713     } else {
1714       // distinguish Windows Server 2016 and 2019 by build number
1715       // Windows server 2019 GA 10/2018 build number is 17763
1716       if (build_number > 17762) {
1717         st->print("Server 2019");
1718       } else {
1719         st->print("Server 2016");
1720       }
1721     }
1722     break;
1723 
1724   default:
1725     // Unrecognized windows, print out its major and minor versions
1726     st->print("%d.%d", major_version, minor_version);
1727     break;
1728   }
1729 
1730   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1731   // find out whether we are running on 64 bit processor or not
1732   SYSTEM_INFO si;
1733   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1734   GetNativeSystemInfo(&si);
1735   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1736     st->print(" , 64 bit");
1737   }
1738 
1739   st->print(" Build %d", build_number);
1740   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1741   st->cr();
1742 }
1743 
1744 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1745   // Nothing to do for now.
1746 }
1747 
1748 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1749   HKEY key;
1750   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1751                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1752   if (status == ERROR_SUCCESS) {
1753     DWORD size = (DWORD)buflen;
1754     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1755     if (status != ERROR_SUCCESS) {
1756         strncpy(buf, "## __CPU__", buflen);
1757     }
1758     RegCloseKey(key);
1759   } else {
1760     // Put generic cpu info to return
1761     strncpy(buf, "## __CPU__", buflen);
1762   }
1763 }
1764 
1765 void os::print_memory_info(outputStream* st) {
1766   st->print("Memory:");
1767   st->print(" %dk page", os::vm_page_size()>>10);
1768 
1769   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1770   // value if total memory is larger than 4GB
1771   MEMORYSTATUSEX ms;
1772   ms.dwLength = sizeof(ms);
1773   int r1 = GlobalMemoryStatusEx(&ms);
1774 
1775   if (r1 != 0) {
1776     st->print(", system-wide physical " INT64_FORMAT "M ",
1777              (int64_t) ms.ullTotalPhys >> 20);
1778     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1779 
1780     st->print("TotalPageFile size " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPageFile >> 20);
1782     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1783              (int64_t) ms.ullAvailPageFile >> 20);
1784 
1785     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1786 #if defined(_M_IX86)
1787     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1788              (int64_t) ms.ullTotalVirtual >> 20);
1789     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1790 #endif
1791   } else {
1792     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1793   }
1794 
1795   // extended memory statistics for a process
1796   PROCESS_MEMORY_COUNTERS_EX pmex;
1797   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1798   pmex.cb = sizeof(pmex);
1799   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1800 
1801   if (r2 != 0) {
1802     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1803              (int64_t) pmex.WorkingSetSize >> 20);
1804     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1805 
1806     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.PrivateUsage >> 20);
1808     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1809   } else {
1810     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1811   }
1812 
1813   st->cr();
1814 }
1815 
1816 bool os::signal_sent_by_kill(const void* siginfo) {
1817   // TODO: Is this possible?
1818   return false;
1819 }
1820 
1821 void os::print_siginfo(outputStream *st, const void* siginfo) {
1822   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1823   st->print("siginfo:");
1824 
1825   char tmp[64];
1826   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1827     strcpy(tmp, "EXCEPTION_??");
1828   }
1829   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1830 
1831   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1832        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1833        er->NumberParameters >= 2) {
1834     switch (er->ExceptionInformation[0]) {
1835     case 0: st->print(", reading address"); break;
1836     case 1: st->print(", writing address"); break;
1837     case 8: st->print(", data execution prevention violation at address"); break;
1838     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1839                        er->ExceptionInformation[0]);
1840     }
1841     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1842   } else {
1843     int num = er->NumberParameters;
1844     if (num > 0) {
1845       st->print(", ExceptionInformation=");
1846       for (int i = 0; i < num; i++) {
1847         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1848       }
1849     }
1850   }
1851   st->cr();
1852 }
1853 
1854 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1855   // TODO: Can we kill thread?
1856   return false;
1857 }
1858 
1859 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1860   // do nothing
1861 }
1862 
1863 static char saved_jvm_path[MAX_PATH] = {0};
1864 
1865 // Find the full path to the current module, jvm.dll
1866 void os::jvm_path(char *buf, jint buflen) {
1867   // Error checking.
1868   if (buflen < MAX_PATH) {
1869     assert(false, "must use a large-enough buffer");
1870     buf[0] = '\0';
1871     return;
1872   }
1873   // Lazy resolve the path to current module.
1874   if (saved_jvm_path[0] != 0) {
1875     strcpy(buf, saved_jvm_path);
1876     return;
1877   }
1878 
1879   buf[0] = '\0';
1880   if (Arguments::sun_java_launcher_is_altjvm()) {
1881     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1882     // for a JAVA_HOME environment variable and fix up the path so it
1883     // looks like jvm.dll is installed there (append a fake suffix
1884     // hotspot/jvm.dll).
1885     char* java_home_var = ::getenv("JAVA_HOME");
1886     if (java_home_var != NULL && java_home_var[0] != 0 &&
1887         strlen(java_home_var) < (size_t)buflen) {
1888       strncpy(buf, java_home_var, buflen);
1889 
1890       // determine if this is a legacy image or modules image
1891       // modules image doesn't have "jre" subdirectory
1892       size_t len = strlen(buf);
1893       char* jrebin_p = buf + len;
1894       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1895       if (0 != _access(buf, 0)) {
1896         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1897       }
1898       len = strlen(buf);
1899       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1900     }
1901   }
1902 
1903   if (buf[0] == '\0') {
1904     GetModuleFileName(vm_lib_handle, buf, buflen);
1905   }
1906   strncpy(saved_jvm_path, buf, MAX_PATH);
1907   saved_jvm_path[MAX_PATH - 1] = '\0';
1908 }
1909 
1910 
1911 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1912 #ifndef _WIN64
1913   st->print("_");
1914 #endif
1915 }
1916 
1917 
1918 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1919 #ifndef _WIN64
1920   st->print("@%d", args_size  * sizeof(int));
1921 #endif
1922 }
1923 
1924 // This method is a copy of JDK's sysGetLastErrorString
1925 // from src/windows/hpi/src/system_md.c
1926 
1927 size_t os::lasterror(char* buf, size_t len) {
1928   DWORD errval;
1929 
1930   if ((errval = GetLastError()) != 0) {
1931     // DOS error
1932     size_t n = (size_t)FormatMessage(
1933                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1934                                      NULL,
1935                                      errval,
1936                                      0,
1937                                      buf,
1938                                      (DWORD)len,
1939                                      NULL);
1940     if (n > 3) {
1941       // Drop final '.', CR, LF
1942       if (buf[n - 1] == '\n') n--;
1943       if (buf[n - 1] == '\r') n--;
1944       if (buf[n - 1] == '.') n--;
1945       buf[n] = '\0';
1946     }
1947     return n;
1948   }
1949 
1950   if (errno != 0) {
1951     // C runtime error that has no corresponding DOS error code
1952     const char* s = os::strerror(errno);
1953     size_t n = strlen(s);
1954     if (n >= len) n = len - 1;
1955     strncpy(buf, s, n);
1956     buf[n] = '\0';
1957     return n;
1958   }
1959 
1960   return 0;
1961 }
1962 
1963 int os::get_last_error() {
1964   DWORD error = GetLastError();
1965   if (error == 0) {
1966     error = errno;
1967   }
1968   return (int)error;
1969 }
1970 
1971 // sun.misc.Signal
1972 // NOTE that this is a workaround for an apparent kernel bug where if
1973 // a signal handler for SIGBREAK is installed then that signal handler
1974 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1975 // See bug 4416763.
1976 static void (*sigbreakHandler)(int) = NULL;
1977 
1978 static void UserHandler(int sig, void *siginfo, void *context) {
1979   os::signal_notify(sig);
1980   // We need to reinstate the signal handler each time...
1981   os::signal(sig, (void*)UserHandler);
1982 }
1983 
1984 void* os::user_handler() {
1985   return (void*) UserHandler;
1986 }
1987 
1988 void* os::signal(int signal_number, void* handler) {
1989   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1990     void (*oldHandler)(int) = sigbreakHandler;
1991     sigbreakHandler = (void (*)(int)) handler;
1992     return (void*) oldHandler;
1993   } else {
1994     return (void*)::signal(signal_number, (void (*)(int))handler);
1995   }
1996 }
1997 
1998 void os::signal_raise(int signal_number) {
1999   raise(signal_number);
2000 }
2001 
2002 // The Win32 C runtime library maps all console control events other than ^C
2003 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2004 // logoff, and shutdown events.  We therefore install our own console handler
2005 // that raises SIGTERM for the latter cases.
2006 //
2007 static BOOL WINAPI consoleHandler(DWORD event) {
2008   switch (event) {
2009   case CTRL_C_EVENT:
2010     if (VMError::is_error_reported()) {
2011       // Ctrl-C is pressed during error reporting, likely because the error
2012       // handler fails to abort. Let VM die immediately.
2013       os::die();
2014     }
2015 
2016     os::signal_raise(SIGINT);
2017     return TRUE;
2018     break;
2019   case CTRL_BREAK_EVENT:
2020     if (sigbreakHandler != NULL) {
2021       (*sigbreakHandler)(SIGBREAK);
2022     }
2023     return TRUE;
2024     break;
2025   case CTRL_LOGOFF_EVENT: {
2026     // Don't terminate JVM if it is running in a non-interactive session,
2027     // such as a service process.
2028     USEROBJECTFLAGS flags;
2029     HANDLE handle = GetProcessWindowStation();
2030     if (handle != NULL &&
2031         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2032         sizeof(USEROBJECTFLAGS), NULL)) {
2033       // If it is a non-interactive session, let next handler to deal
2034       // with it.
2035       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2036         return FALSE;
2037       }
2038     }
2039   }
2040   case CTRL_CLOSE_EVENT:
2041   case CTRL_SHUTDOWN_EVENT:
2042     os::signal_raise(SIGTERM);
2043     return TRUE;
2044     break;
2045   default:
2046     break;
2047   }
2048   return FALSE;
2049 }
2050 
2051 // The following code is moved from os.cpp for making this
2052 // code platform specific, which it is by its very nature.
2053 
2054 // Return maximum OS signal used + 1 for internal use only
2055 // Used as exit signal for signal_thread
2056 int os::sigexitnum_pd() {
2057   return NSIG;
2058 }
2059 
2060 // a counter for each possible signal value, including signal_thread exit signal
2061 static volatile jint pending_signals[NSIG+1] = { 0 };
2062 static Semaphore* sig_sem = NULL;
2063 
2064 static void jdk_misc_signal_init() {
2065   // Initialize signal structures
2066   memset((void*)pending_signals, 0, sizeof(pending_signals));
2067 
2068   // Initialize signal semaphore
2069   sig_sem = new Semaphore();
2070 
2071   // Programs embedding the VM do not want it to attempt to receive
2072   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2073   // shutdown hooks mechanism introduced in 1.3.  For example, when
2074   // the VM is run as part of a Windows NT service (i.e., a servlet
2075   // engine in a web server), the correct behavior is for any console
2076   // control handler to return FALSE, not TRUE, because the OS's
2077   // "final" handler for such events allows the process to continue if
2078   // it is a service (while terminating it if it is not a service).
2079   // To make this behavior uniform and the mechanism simpler, we
2080   // completely disable the VM's usage of these console events if -Xrs
2081   // (=ReduceSignalUsage) is specified.  This means, for example, that
2082   // the CTRL-BREAK thread dump mechanism is also disabled in this
2083   // case.  See bugs 4323062, 4345157, and related bugs.
2084 
2085   // Add a CTRL-C handler
2086   SetConsoleCtrlHandler(consoleHandler, TRUE);
2087 }
2088 
2089 void os::signal_notify(int sig) {
2090   if (sig_sem != NULL) {
2091     Atomic::inc(&pending_signals[sig]);
2092     sig_sem->signal();
2093   } else {
2094     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2095     // initialization isn't called.
2096     assert(ReduceSignalUsage, "signal semaphore should be created");
2097   }
2098 }
2099 
2100 static int check_pending_signals() {
2101   while (true) {
2102     for (int i = 0; i < NSIG + 1; i++) {
2103       jint n = pending_signals[i];
2104       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2105         return i;
2106       }
2107     }
2108     JavaThread *thread = JavaThread::current();
2109 
2110     ThreadBlockInVM tbivm(thread);
2111 
2112     bool threadIsSuspended;
2113     do {
2114       thread->set_suspend_equivalent();
2115       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2116       sig_sem->wait();
2117 
2118       // were we externally suspended while we were waiting?
2119       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2120       if (threadIsSuspended) {
2121         // The semaphore has been incremented, but while we were waiting
2122         // another thread suspended us. We don't want to continue running
2123         // while suspended because that would surprise the thread that
2124         // suspended us.
2125         sig_sem->signal();
2126 
2127         thread->java_suspend_self();
2128       }
2129     } while (threadIsSuspended);
2130   }
2131 }
2132 
2133 int os::signal_wait() {
2134   return check_pending_signals();
2135 }
2136 
2137 // Implicit OS exception handling
2138 
2139 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2140                       address handler) {
2141   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2142   // Save pc in thread
2143 #ifdef _M_AMD64
2144   // Do not blow up if no thread info available.
2145   if (thread) {
2146     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2147   }
2148   // Set pc to handler
2149   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2150 #else
2151   // Do not blow up if no thread info available.
2152   if (thread) {
2153     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2154   }
2155   // Set pc to handler
2156   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2157 #endif
2158 
2159   // Continue the execution
2160   return EXCEPTION_CONTINUE_EXECUTION;
2161 }
2162 
2163 
2164 // Used for PostMortemDump
2165 extern "C" void safepoints();
2166 extern "C" void find(int x);
2167 extern "C" void events();
2168 
2169 // According to Windows API documentation, an illegal instruction sequence should generate
2170 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2171 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2172 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2173 
2174 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2175 
2176 // From "Execution Protection in the Windows Operating System" draft 0.35
2177 // Once a system header becomes available, the "real" define should be
2178 // included or copied here.
2179 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2180 
2181 // Windows Vista/2008 heap corruption check
2182 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2183 
2184 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2185 // C++ compiler contain this error code. Because this is a compiler-generated
2186 // error, the code is not listed in the Win32 API header files.
2187 // The code is actually a cryptic mnemonic device, with the initial "E"
2188 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2189 // ASCII values of "msc".
2190 
2191 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2192 
2193 #define def_excpt(val) { #val, (val) }
2194 
2195 static const struct { const char* name; uint number; } exceptlabels[] = {
2196     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2197     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2198     def_excpt(EXCEPTION_BREAKPOINT),
2199     def_excpt(EXCEPTION_SINGLE_STEP),
2200     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2201     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2202     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2203     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2204     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2205     def_excpt(EXCEPTION_FLT_OVERFLOW),
2206     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2207     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2208     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2209     def_excpt(EXCEPTION_INT_OVERFLOW),
2210     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2211     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2212     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2213     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2214     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2215     def_excpt(EXCEPTION_STACK_OVERFLOW),
2216     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2217     def_excpt(EXCEPTION_GUARD_PAGE),
2218     def_excpt(EXCEPTION_INVALID_HANDLE),
2219     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2220     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2221 };
2222 
2223 #undef def_excpt
2224 
2225 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2226   uint code = static_cast<uint>(exception_code);
2227   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2228     if (exceptlabels[i].number == code) {
2229       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2230       return buf;
2231     }
2232   }
2233 
2234   return NULL;
2235 }
2236 
2237 //-----------------------------------------------------------------------------
2238 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2239   // handle exception caused by idiv; should only happen for -MinInt/-1
2240   // (division by zero is handled explicitly)
2241 #ifdef  _M_AMD64
2242   PCONTEXT ctx = exceptionInfo->ContextRecord;
2243   address pc = (address)ctx->Rip;
2244   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2245   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2246   if (pc[0] == 0xF7) {
2247     // set correct result values and continue after idiv instruction
2248     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2249   } else {
2250     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2251   }
2252   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2253   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2254   // idiv opcode (0xF7).
2255   ctx->Rdx = (DWORD)0;             // remainder
2256   // Continue the execution
2257 #else
2258   PCONTEXT ctx = exceptionInfo->ContextRecord;
2259   address pc = (address)ctx->Eip;
2260   assert(pc[0] == 0xF7, "not an idiv opcode");
2261   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2262   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2263   // set correct result values and continue after idiv instruction
2264   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2265   ctx->Eax = (DWORD)min_jint;      // result
2266   ctx->Edx = (DWORD)0;             // remainder
2267   // Continue the execution
2268 #endif
2269   return EXCEPTION_CONTINUE_EXECUTION;
2270 }
2271 
2272 //-----------------------------------------------------------------------------
2273 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2274   PCONTEXT ctx = exceptionInfo->ContextRecord;
2275 #ifndef  _WIN64
2276   // handle exception caused by native method modifying control word
2277   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2278 
2279   switch (exception_code) {
2280   case EXCEPTION_FLT_DENORMAL_OPERAND:
2281   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2282   case EXCEPTION_FLT_INEXACT_RESULT:
2283   case EXCEPTION_FLT_INVALID_OPERATION:
2284   case EXCEPTION_FLT_OVERFLOW:
2285   case EXCEPTION_FLT_STACK_CHECK:
2286   case EXCEPTION_FLT_UNDERFLOW:
2287     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2288     if (fp_control_word != ctx->FloatSave.ControlWord) {
2289       // Restore FPCW and mask out FLT exceptions
2290       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2291       // Mask out pending FLT exceptions
2292       ctx->FloatSave.StatusWord &=  0xffffff00;
2293       return EXCEPTION_CONTINUE_EXECUTION;
2294     }
2295   }
2296 
2297   if (prev_uef_handler != NULL) {
2298     // We didn't handle this exception so pass it to the previous
2299     // UnhandledExceptionFilter.
2300     return (prev_uef_handler)(exceptionInfo);
2301   }
2302 #else // !_WIN64
2303   // On Windows, the mxcsr control bits are non-volatile across calls
2304   // See also CR 6192333
2305   //
2306   jint MxCsr = INITIAL_MXCSR;
2307   // we can't use StubRoutines::addr_mxcsr_std()
2308   // because in Win64 mxcsr is not saved there
2309   if (MxCsr != ctx->MxCsr) {
2310     ctx->MxCsr = MxCsr;
2311     return EXCEPTION_CONTINUE_EXECUTION;
2312   }
2313 #endif // !_WIN64
2314 
2315   return EXCEPTION_CONTINUE_SEARCH;
2316 }
2317 
2318 static inline void report_error(Thread* t, DWORD exception_code,
2319                                 address addr, void* siginfo, void* context) {
2320   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2321 
2322   // If UseOsErrorReporting, this will return here and save the error file
2323   // somewhere where we can find it in the minidump.
2324 }
2325 
2326 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2327         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2328   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2329   address addr = (address) exceptionRecord->ExceptionInformation[1];
2330   if (Interpreter::contains(pc)) {
2331     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2332     if (!fr->is_first_java_frame()) {
2333       // get_frame_at_stack_banging_point() is only called when we
2334       // have well defined stacks so java_sender() calls do not need
2335       // to assert safe_for_sender() first.
2336       *fr = fr->java_sender();
2337     }
2338   } else {
2339     // more complex code with compiled code
2340     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2341     CodeBlob* cb = CodeCache::find_blob(pc);
2342     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2343       // Not sure where the pc points to, fallback to default
2344       // stack overflow handling
2345       return false;
2346     } else {
2347       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2348       // in compiled code, the stack banging is performed just after the return pc
2349       // has been pushed on the stack
2350       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2351       if (!fr->is_java_frame()) {
2352         // See java_sender() comment above.
2353         *fr = fr->java_sender();
2354       }
2355     }
2356   }
2357   assert(fr->is_java_frame(), "Safety check");
2358   return true;
2359 }
2360 
2361 #if INCLUDE_AOT
2362 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2363   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2364   address addr = (address) exceptionRecord->ExceptionInformation[1];
2365   address pc = (address) exceptionInfo->ContextRecord->Rip;
2366 
2367   // Handle the case where we get an implicit exception in AOT generated
2368   // code.  AOT DLL's loaded are not registered for structured exceptions.
2369   // If the exception occurred in the codeCache or AOT code, pass control
2370   // to our normal exception handler.
2371   CodeBlob* cb = CodeCache::find_blob(pc);
2372   if (cb != NULL) {
2373     return topLevelExceptionFilter(exceptionInfo);
2374   }
2375 
2376   return EXCEPTION_CONTINUE_SEARCH;
2377 }
2378 #endif
2379 
2380 //-----------------------------------------------------------------------------
2381 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2382   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2383   PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2384   DWORD exception_code = exception_record->ExceptionCode;
2385 #ifdef _M_AMD64
2386   address pc = (address) exceptionInfo->ContextRecord->Rip;
2387 #else
2388   address pc = (address) exceptionInfo->ContextRecord->Eip;
2389 #endif
2390   Thread* t = Thread::current_or_null_safe();
2391 
2392   // Handle SafeFetch32 and SafeFetchN exceptions.
2393   if (StubRoutines::is_safefetch_fault(pc)) {
2394     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2395   }
2396 
2397 #ifndef _WIN64
2398   // Execution protection violation - win32 running on AMD64 only
2399   // Handled first to avoid misdiagnosis as a "normal" access violation;
2400   // This is safe to do because we have a new/unique ExceptionInformation
2401   // code for this condition.
2402   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2403     int exception_subcode = (int) exception_record->ExceptionInformation[0];
2404     address addr = (address) exception_record->ExceptionInformation[1];
2405 
2406     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2407       int page_size = os::vm_page_size();
2408 
2409       // Make sure the pc and the faulting address are sane.
2410       //
2411       // If an instruction spans a page boundary, and the page containing
2412       // the beginning of the instruction is executable but the following
2413       // page is not, the pc and the faulting address might be slightly
2414       // different - we still want to unguard the 2nd page in this case.
2415       //
2416       // 15 bytes seems to be a (very) safe value for max instruction size.
2417       bool pc_is_near_addr =
2418         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2419       bool instr_spans_page_boundary =
2420         (align_down((intptr_t) pc ^ (intptr_t) addr,
2421                          (intptr_t) page_size) > 0);
2422 
2423       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2424         static volatile address last_addr =
2425           (address) os::non_memory_address_word();
2426 
2427         // In conservative mode, don't unguard unless the address is in the VM
2428         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2429             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2430 
2431           // Set memory to RWX and retry
2432           address page_start = align_down(addr, page_size);
2433           bool res = os::protect_memory((char*) page_start, page_size,
2434                                         os::MEM_PROT_RWX);
2435 
2436           log_debug(os)("Execution protection violation "
2437                         "at " INTPTR_FORMAT
2438                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2439                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2440 
2441           // Set last_addr so if we fault again at the same address, we don't
2442           // end up in an endless loop.
2443           //
2444           // There are two potential complications here.  Two threads trapping
2445           // at the same address at the same time could cause one of the
2446           // threads to think it already unguarded, and abort the VM.  Likely
2447           // very rare.
2448           //
2449           // The other race involves two threads alternately trapping at
2450           // different addresses and failing to unguard the page, resulting in
2451           // an endless loop.  This condition is probably even more unlikely
2452           // than the first.
2453           //
2454           // Although both cases could be avoided by using locks or thread
2455           // local last_addr, these solutions are unnecessary complication:
2456           // this handler is a best-effort safety net, not a complete solution.
2457           // It is disabled by default and should only be used as a workaround
2458           // in case we missed any no-execute-unsafe VM code.
2459 
2460           last_addr = addr;
2461 
2462           return EXCEPTION_CONTINUE_EXECUTION;
2463         }
2464       }
2465 
2466       // Last unguard failed or not unguarding
2467       tty->print_raw_cr("Execution protection violation");
2468       report_error(t, exception_code, addr, exception_record,
2469                    exceptionInfo->ContextRecord);
2470       return EXCEPTION_CONTINUE_SEARCH;
2471     }
2472   }
2473 #endif // _WIN64
2474 
2475   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2476       VM_Version::is_cpuinfo_segv_addr(pc)) {
2477     // Verify that OS save/restore AVX registers.
2478     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2479   }
2480 
2481   if (t != NULL && t->is_Java_thread()) {
2482     JavaThread* thread = (JavaThread*) t;
2483     bool in_java = thread->thread_state() == _thread_in_Java;
2484     bool in_native = thread->thread_state() == _thread_in_native;
2485     bool in_vm = thread->thread_state() == _thread_in_vm;
2486 
2487     // Handle potential stack overflows up front.
2488     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2489       if (thread->stack_guards_enabled()) {
2490         if (in_java) {
2491           frame fr;
2492           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2493             assert(fr.is_java_frame(), "Must be a Java frame");
2494             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2495           }
2496         }
2497         // Yellow zone violation.  The o/s has unprotected the first yellow
2498         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2499         // update the enabled status, even if the zone contains only one page.
2500         assert(!in_vm, "Undersized StackShadowPages");
2501         thread->disable_stack_yellow_reserved_zone();
2502         // If not in java code, return and hope for the best.
2503         return in_java
2504             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2505             :  EXCEPTION_CONTINUE_EXECUTION;
2506       } else {
2507         // Fatal red zone violation.
2508         thread->disable_stack_red_zone();
2509         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2510         report_error(t, exception_code, pc, exception_record,
2511                       exceptionInfo->ContextRecord);
2512         return EXCEPTION_CONTINUE_SEARCH;
2513       }
2514     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2515       if (in_java) {
2516         // Either stack overflow or null pointer exception.
2517         address addr = (address) exception_record->ExceptionInformation[1];
2518         address stack_end = thread->stack_end();
2519         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2520           // Stack overflow.
2521           assert(!os::uses_stack_guard_pages(),
2522                  "should be caught by red zone code above.");
2523           return Handle_Exception(exceptionInfo,
2524                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2525         }
2526         // Check for safepoint polling and implicit null
2527         // We only expect null pointers in the stubs (vtable)
2528         // the rest are checked explicitly now.
2529         CodeBlob* cb = CodeCache::find_blob(pc);
2530         if (cb != NULL) {
2531           if (SafepointMechanism::is_poll_address(addr)) {
2532             address stub = SharedRuntime::get_poll_stub(pc);
2533             return Handle_Exception(exceptionInfo, stub);
2534           }
2535         }
2536 #ifdef _WIN64
2537         // If it's a legal stack address map the entire region in
2538         if (thread->is_in_usable_stack(addr)) {
2539           addr = (address)((uintptr_t)addr &
2540                             (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2541           os::commit_memory((char *)addr, thread->stack_base() - addr,
2542                             !ExecMem);
2543           return EXCEPTION_CONTINUE_EXECUTION;
2544         }
2545 #endif
2546         // Null pointer exception.
2547         if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2548           address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2549           if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2550         }
2551         report_error(t, exception_code, pc, exception_record,
2552                       exceptionInfo->ContextRecord);
2553         return EXCEPTION_CONTINUE_SEARCH;
2554       }
2555 
2556 #ifdef _WIN64
2557       // Special care for fast JNI field accessors.
2558       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2559       // in and the heap gets shrunk before the field access.
2560       address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2561       if (slowcase_pc != (address)-1) {
2562         return Handle_Exception(exceptionInfo, slowcase_pc);
2563       }
2564 #endif
2565 
2566       // Stack overflow or null pointer exception in native code.
2567       report_error(t, exception_code, pc, exception_record,
2568                    exceptionInfo->ContextRecord);
2569       return EXCEPTION_CONTINUE_SEARCH;
2570     } // /EXCEPTION_ACCESS_VIOLATION
2571     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2572 
2573     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2574       CompiledMethod* nm = NULL;
2575       JavaThread* thread = (JavaThread*)t;
2576       if (in_java) {
2577         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2578         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2579       }
2580 
2581       bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2582       if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2583           (nm != NULL && nm->has_unsafe_access())) {
2584         address next_pc =  Assembler::locate_next_instruction(pc);
2585         if (is_unsafe_arraycopy) {
2586           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2587         }
2588         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2589       }
2590     }
2591 
2592     if (in_java) {
2593       switch (exception_code) {
2594       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2595         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2596 
2597       case EXCEPTION_INT_OVERFLOW:
2598         return Handle_IDiv_Exception(exceptionInfo);
2599 
2600       } // switch
2601     }
2602     if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2603       LONG result=Handle_FLT_Exception(exceptionInfo);
2604       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2605     }
2606   }
2607 
2608   if (exception_code != EXCEPTION_BREAKPOINT) {
2609     report_error(t, exception_code, pc, exception_record,
2610                  exceptionInfo->ContextRecord);
2611   }
2612   return EXCEPTION_CONTINUE_SEARCH;
2613 }
2614 
2615 #ifndef _WIN64
2616 // Special care for fast JNI accessors.
2617 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2618 // the heap gets shrunk before the field access.
2619 // Need to install our own structured exception handler since native code may
2620 // install its own.
2621 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2622   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2623   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2624     address pc = (address) exceptionInfo->ContextRecord->Eip;
2625     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2626     if (addr != (address)-1) {
2627       return Handle_Exception(exceptionInfo, addr);
2628     }
2629   }
2630   return EXCEPTION_CONTINUE_SEARCH;
2631 }
2632 
2633 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2634   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2635                                                      jobject obj,           \
2636                                                      jfieldID fieldID) {    \
2637     __try {                                                                 \
2638       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2639                                                                  obj,       \
2640                                                                  fieldID);  \
2641     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2642                                               _exception_info())) {         \
2643     }                                                                       \
2644     return 0;                                                               \
2645   }
2646 
2647 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2648 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2649 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2650 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2651 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2652 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2653 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2654 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2655 
2656 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2657   switch (type) {
2658   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2659   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2660   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2661   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2662   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2663   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2664   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2665   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2666   default:        ShouldNotReachHere();
2667   }
2668   return (address)-1;
2669 }
2670 #endif
2671 
2672 // Virtual Memory
2673 
2674 int os::vm_page_size() { return os::win32::vm_page_size(); }
2675 int os::vm_allocation_granularity() {
2676   return os::win32::vm_allocation_granularity();
2677 }
2678 
2679 // Windows large page support is available on Windows 2003. In order to use
2680 // large page memory, the administrator must first assign additional privilege
2681 // to the user:
2682 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2683 //   + select Local Policies -> User Rights Assignment
2684 //   + double click "Lock pages in memory", add users and/or groups
2685 //   + reboot
2686 // Note the above steps are needed for administrator as well, as administrators
2687 // by default do not have the privilege to lock pages in memory.
2688 //
2689 // Note about Windows 2003: although the API supports committing large page
2690 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2691 // scenario, I found through experiment it only uses large page if the entire
2692 // memory region is reserved and committed in a single VirtualAlloc() call.
2693 // This makes Windows large page support more or less like Solaris ISM, in
2694 // that the entire heap must be committed upfront. This probably will change
2695 // in the future, if so the code below needs to be revisited.
2696 
2697 #ifndef MEM_LARGE_PAGES
2698   #define MEM_LARGE_PAGES 0x20000000
2699 #endif
2700 
2701 #define VirtualFreeChecked(mem, size, type)                       \
2702   do {                                                            \
2703     bool ret = VirtualFree(mem, size, type);                      \
2704     assert(ret, "Failed to free memory: " PTR_FORMAT, p2i(mem));  \
2705   } while (false)
2706 
2707 // The number of bytes is setup to match 1 pixel and 32 bits per pixel.
2708 static const int gdi_tiny_bitmap_width_bytes = 4;
2709 
2710 static HBITMAP gdi_create_tiny_bitmap(void* mem) {
2711   // The documentation for CreateBitmap states a word-alignment requirement.
2712   STATIC_ASSERT(is_aligned_(gdi_tiny_bitmap_width_bytes, sizeof(WORD)));
2713 
2714   // Some callers use this function to test if memory crossing separate memory
2715   // reservations can be used. Create a height of 2 to make sure that one pixel
2716   // ends up in the first reservation and the other in the second.
2717   int nHeight = 2;
2718 
2719   assert(is_aligned(mem, gdi_tiny_bitmap_width_bytes), "Incorrect alignment");
2720 
2721   // Width is one pixel and correlates with gdi_tiny_bitmap_width_bytes.
2722   int nWidth = 1;
2723 
2724   // Calculate bit count - will be 32.
2725   UINT nBitCount = gdi_tiny_bitmap_width_bytes / nWidth * BitsPerByte;
2726 
2727   return CreateBitmap(
2728       nWidth,
2729       nHeight,
2730       1,         // nPlanes
2731       nBitCount,
2732       mem);      // lpBits
2733 }
2734 
2735 // It has been found that some of the GDI functions fail under these two situations:
2736 //  1) When used with large pages
2737 //  2) When mem crosses the boundary between two separate memory reservations.
2738 //
2739 // This is a small test used to see if the current GDI implementation is
2740 // susceptible to any of these problems.
2741 static bool gdi_can_use_memory(void* mem) {
2742   HBITMAP bitmap = gdi_create_tiny_bitmap(mem);
2743   if (bitmap != NULL) {
2744     DeleteObject(bitmap);
2745     return true;
2746   }
2747 
2748   // Verify that the bitmap could be created with a normal page.
2749   // If this fails, the testing method above isn't reliable.
2750 #ifdef ASSERT
2751   void* verify_mem = ::malloc(4 * 1024);
2752   HBITMAP verify_bitmap = gdi_create_tiny_bitmap(verify_mem);
2753   if (verify_bitmap == NULL) {
2754     fatal("Couldn't create test bitmap with malloced memory");
2755   } else {
2756     DeleteObject(verify_bitmap);
2757   }
2758   ::free(verify_mem);
2759 #endif
2760 
2761   return false;
2762 }
2763 
2764 // Test if GDI functions work when memory spans
2765 // two adjacent memory reservations.
2766 static bool gdi_can_use_split_reservation_memory(bool use_large_pages, size_t granule) {
2767   DWORD mem_large_pages = use_large_pages ? MEM_LARGE_PAGES : 0;
2768 
2769   // Find virtual memory range. Two granules for regions and one for alignment.
2770   void* reserved = VirtualAlloc(NULL,
2771                                 granule * 3,
2772                                 MEM_RESERVE,
2773                                 PAGE_NOACCESS);
2774   if (reserved == NULL) {
2775     // Can't proceed with test - pessimistically report false
2776     return false;
2777   }
2778   VirtualFreeChecked(reserved, 0, MEM_RELEASE);
2779 
2780   // Ensure proper alignment
2781   void* res0 = align_up(reserved, granule);
2782   void* res1 = (char*)res0 + granule;
2783 
2784   // Reserve and commit the first part
2785   void* mem0 = VirtualAlloc(res0,
2786                             granule,
2787                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2788                             PAGE_READWRITE);
2789   if (mem0 != res0) {
2790     // Can't proceed with test - pessimistically report false
2791     return false;
2792   }
2793 
2794   // Reserve and commit the second part
2795   void* mem1 = VirtualAlloc(res1,
2796                             granule,
2797                             MEM_RESERVE|MEM_COMMIT|mem_large_pages,
2798                             PAGE_READWRITE);
2799   if (mem1 != res1) {
2800     VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2801     // Can't proceed with test - pessimistically report false
2802     return false;
2803   }
2804 
2805   // Set the bitmap's bits to point one "width" bytes before, so that
2806   // the bitmap extends across the reservation boundary.
2807   void* bitmapBits = (char*)mem1 - gdi_tiny_bitmap_width_bytes;
2808 
2809   bool success = gdi_can_use_memory(bitmapBits);
2810 
2811   VirtualFreeChecked(mem1, 0, MEM_RELEASE);
2812   VirtualFreeChecked(mem0, 0, MEM_RELEASE);
2813 
2814   return success;
2815 }
2816 
2817 // Container for NUMA node list info
2818 class NUMANodeListHolder {
2819  private:
2820   int *_numa_used_node_list;  // allocated below
2821   int _numa_used_node_count;
2822 
2823   void free_node_list() {
2824     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2825   }
2826 
2827  public:
2828   NUMANodeListHolder() {
2829     _numa_used_node_count = 0;
2830     _numa_used_node_list = NULL;
2831     // do rest of initialization in build routine (after function pointers are set up)
2832   }
2833 
2834   ~NUMANodeListHolder() {
2835     free_node_list();
2836   }
2837 
2838   bool build() {
2839     DWORD_PTR proc_aff_mask;
2840     DWORD_PTR sys_aff_mask;
2841     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2842     ULONG highest_node_number;
2843     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2844     free_node_list();
2845     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2846     for (unsigned int i = 0; i <= highest_node_number; i++) {
2847       ULONGLONG proc_mask_numa_node;
2848       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2849       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2850         _numa_used_node_list[_numa_used_node_count++] = i;
2851       }
2852     }
2853     return (_numa_used_node_count > 1);
2854   }
2855 
2856   int get_count() { return _numa_used_node_count; }
2857   int get_node_list_entry(int n) {
2858     // for indexes out of range, returns -1
2859     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2860   }
2861 
2862 } numa_node_list_holder;
2863 
2864 static size_t _large_page_size = 0;
2865 
2866 static bool request_lock_memory_privilege() {
2867   HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2868                                 os::current_process_id());
2869 
2870   bool success = false;
2871   HANDLE hToken = NULL;
2872   LUID luid;
2873   if (hProcess != NULL &&
2874       OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2875       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2876 
2877     TOKEN_PRIVILEGES tp;
2878     tp.PrivilegeCount = 1;
2879     tp.Privileges[0].Luid = luid;
2880     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2881 
2882     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2883     // privilege. Check GetLastError() too. See MSDN document.
2884     if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2885         (GetLastError() == ERROR_SUCCESS)) {
2886       success = true;
2887     }
2888   }
2889 
2890   // Cleanup
2891   if (hProcess != NULL) {
2892     CloseHandle(hProcess);
2893   }
2894   if (hToken != NULL) {
2895     CloseHandle(hToken);
2896   }
2897 
2898   return success;
2899 }
2900 
2901 static bool numa_interleaving_init() {
2902   bool success = false;
2903 
2904   // print a warning if UseNUMAInterleaving flag is specified on command line
2905   bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2906 
2907 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2908 
2909   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2910   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2911   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2912 
2913   if (!numa_node_list_holder.build()) {
2914     WARN("Process does not cover multiple NUMA nodes.");
2915     WARN("...Ignoring UseNUMAInterleaving flag.");
2916     return false;
2917   }
2918 
2919   if (!gdi_can_use_split_reservation_memory(UseLargePages, min_interleave_granularity)) {
2920     WARN("Windows GDI cannot handle split reservations.");
2921     WARN("...Ignoring UseNUMAInterleaving flag.");
2922     return false;
2923   }
2924 
2925   if (log_is_enabled(Debug, os, cpu)) {
2926     Log(os, cpu) log;
2927     log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2928     for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2929       log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2930     }
2931   }
2932 
2933 #undef WARN
2934 
2935   return true;
2936 }
2937 
2938 // this routine is used whenever we need to reserve a contiguous VA range
2939 // but we need to make separate VirtualAlloc calls for each piece of the range
2940 // Reasons for doing this:
2941 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2942 //  * UseNUMAInterleaving requires a separate node for each piece
2943 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2944                                          DWORD prot,
2945                                          bool should_inject_error = false) {
2946   char * p_buf;
2947   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2948   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2949   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2950 
2951   // first reserve enough address space in advance since we want to be
2952   // able to break a single contiguous virtual address range into multiple
2953   // large page commits but WS2003 does not allow reserving large page space
2954   // so we just use 4K pages for reserve, this gives us a legal contiguous
2955   // address space. then we will deallocate that reservation, and re alloc
2956   // using large pages
2957   const size_t size_of_reserve = bytes + chunk_size;
2958   if (bytes > size_of_reserve) {
2959     // Overflowed.
2960     return NULL;
2961   }
2962   p_buf = (char *) VirtualAlloc(addr,
2963                                 size_of_reserve,  // size of Reserve
2964                                 MEM_RESERVE,
2965                                 PAGE_READWRITE);
2966   // If reservation failed, return NULL
2967   if (p_buf == NULL) return NULL;
2968   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2969   os::release_memory(p_buf, bytes + chunk_size);
2970 
2971   // we still need to round up to a page boundary (in case we are using large pages)
2972   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2973   // instead we handle this in the bytes_to_rq computation below
2974   p_buf = align_up(p_buf, page_size);
2975 
2976   // now go through and allocate one chunk at a time until all bytes are
2977   // allocated
2978   size_t  bytes_remaining = bytes;
2979   // An overflow of align_up() would have been caught above
2980   // in the calculation of size_of_reserve.
2981   char * next_alloc_addr = p_buf;
2982   HANDLE hProc = GetCurrentProcess();
2983 
2984 #ifdef ASSERT
2985   // Variable for the failure injection
2986   int ran_num = os::random();
2987   size_t fail_after = ran_num % bytes;
2988 #endif
2989 
2990   int count=0;
2991   while (bytes_remaining) {
2992     // select bytes_to_rq to get to the next chunk_size boundary
2993 
2994     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2995     // Note allocate and commit
2996     char * p_new;
2997 
2998 #ifdef ASSERT
2999     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3000 #else
3001     const bool inject_error_now = false;
3002 #endif
3003 
3004     if (inject_error_now) {
3005       p_new = NULL;
3006     } else {
3007       if (!UseNUMAInterleaving) {
3008         p_new = (char *) VirtualAlloc(next_alloc_addr,
3009                                       bytes_to_rq,
3010                                       flags,
3011                                       prot);
3012       } else {
3013         // get the next node to use from the used_node_list
3014         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3015         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3016         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3017       }
3018     }
3019 
3020     if (p_new == NULL) {
3021       // Free any allocated pages
3022       if (next_alloc_addr > p_buf) {
3023         // Some memory was committed so release it.
3024         size_t bytes_to_release = bytes - bytes_remaining;
3025         // NMT has yet to record any individual blocks, so it
3026         // need to create a dummy 'reserve' record to match
3027         // the release.
3028         MemTracker::record_virtual_memory_reserve((address)p_buf,
3029                                                   bytes_to_release, CALLER_PC);
3030         os::release_memory(p_buf, bytes_to_release);
3031       }
3032 #ifdef ASSERT
3033       if (should_inject_error) {
3034         log_develop_debug(pagesize)("Reserving pages individually failed.");
3035       }
3036 #endif
3037       return NULL;
3038     }
3039 
3040     bytes_remaining -= bytes_to_rq;
3041     next_alloc_addr += bytes_to_rq;
3042     count++;
3043   }
3044   // Although the memory is allocated individually, it is returned as one.
3045   // NMT records it as one block.
3046   if ((flags & MEM_COMMIT) != 0) {
3047     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3048   } else {
3049     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3050   }
3051 
3052   // made it this far, success
3053   return p_buf;
3054 }
3055 
3056 static size_t large_page_init_decide_size() {
3057   // print a warning if any large page related flag is specified on command line
3058   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3059                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
3060 
3061 #define WARN(msg) if (warn_on_failure) { warning(msg); }
3062 
3063   if (!request_lock_memory_privilege()) {
3064     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3065     return 0;
3066   }
3067 
3068   size_t size = GetLargePageMinimum();
3069   if (size == 0) {
3070     WARN("Large page is not supported by the processor.");
3071     return 0;
3072   }
3073 
3074 #if defined(IA32) || defined(AMD64)
3075   if (size > 4*M || LargePageSizeInBytes > 4*M) {
3076     WARN("JVM cannot use large pages bigger than 4mb.");
3077     return 0;
3078   }
3079 #endif
3080 
3081   if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3082     size = LargePageSizeInBytes;
3083   }
3084 
3085   // Now test allocating a page
3086   void* large_page = VirtualAlloc(NULL,
3087                                   size,
3088                                   MEM_RESERVE|MEM_COMMIT|MEM_LARGE_PAGES,
3089                                   PAGE_READWRITE);
3090   if (large_page == NULL) {
3091     WARN("JVM cannot allocate one single large page.");
3092     return 0;
3093   }
3094 
3095   // Detect if GDI can use memory backed by large pages
3096   if (!gdi_can_use_memory(large_page)) {
3097     WARN("JVM cannot use large pages because of bug in Windows GDI.");
3098     return 0;
3099   }
3100 
3101   // Release test page
3102   VirtualFreeChecked(large_page, 0, MEM_RELEASE);
3103 
3104 #undef WARN
3105 
3106   return size;
3107 }
3108 
3109 void os::large_page_init() {
3110   if (!UseLargePages) {
3111     return;
3112   }
3113 
3114   _large_page_size = large_page_init_decide_size();
3115 
3116   const size_t default_page_size = (size_t) vm_page_size();
3117   if (_large_page_size > default_page_size) {
3118     _page_sizes[0] = _large_page_size;
3119     _page_sizes[1] = default_page_size;
3120     _page_sizes[2] = 0;
3121   }
3122 
3123   UseLargePages = _large_page_size != 0;
3124 
3125   if (UseLargePages && UseLargePagesIndividualAllocation) {
3126     if (!gdi_can_use_split_reservation_memory(true /* use_large_pages */, _large_page_size)) {
3127       if (FLAG_IS_CMDLINE(UseLargePagesIndividualAllocation)) {
3128         warning("Windows GDI cannot handle split reservations.");
3129         warning("...Ignoring UseLargePagesIndividualAllocation flag.");
3130       }
3131       UseLargePagesIndividualAllocation = false;
3132     }
3133   }
3134 }
3135 
3136 int os::create_file_for_heap(const char* dir) {
3137 
3138   const char name_template[] = "/jvmheap.XXXXXX";
3139 
3140   size_t fullname_len = strlen(dir) + strlen(name_template);
3141   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3142   if (fullname == NULL) {
3143     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3144     return -1;
3145   }
3146   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3147   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3148 
3149   os::native_path(fullname);
3150 
3151   char *path = _mktemp(fullname);
3152   if (path == NULL) {
3153     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3154     os::free(fullname);
3155     return -1;
3156   }
3157 
3158   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3159 
3160   os::free(fullname);
3161   if (fd < 0) {
3162     warning("Problem opening file for heap (%s)", os::strerror(errno));
3163     return -1;
3164   }
3165   return fd;
3166 }
3167 
3168 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3169 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3170   assert(fd != -1, "File descriptor is not valid");
3171 
3172   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3173 #ifdef _LP64
3174   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3175     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3176 #else
3177   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3178     0, (DWORD)size, NULL);
3179 #endif
3180   if (fileMapping == NULL) {
3181     if (GetLastError() == ERROR_DISK_FULL) {
3182       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3183     }
3184     else {
3185       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3186     }
3187 
3188     return NULL;
3189   }
3190 
3191   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3192 
3193   CloseHandle(fileMapping);
3194 
3195   return (char*)addr;
3196 }
3197 
3198 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3199   assert(fd != -1, "File descriptor is not valid");
3200   assert(base != NULL, "Base address cannot be NULL");
3201 
3202   release_memory(base, size);
3203   return map_memory_to_file(base, size, fd);
3204 }
3205 
3206 // On win32, one cannot release just a part of reserved memory, it's an
3207 // all or nothing deal.  When we split a reservation, we must break the
3208 // reservation into two reservations.
3209 void os::split_reserved_memory(char *base, size_t size, size_t split) {
3210 
3211   char* const split_address = base + split;
3212   assert(size > 0, "Sanity");
3213   assert(size > split, "Sanity");
3214   assert(split > 0, "Sanity");
3215   assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
3216   assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
3217 
3218   release_memory(base, size);
3219   reserve_memory(split, base);
3220   reserve_memory(size - split, split_address);
3221 
3222   // NMT: nothing to do here. Since Windows implements the split by
3223   //  releasing and re-reserving memory, the parts are already registered
3224   //  as individual mappings with NMT.
3225 
3226 }
3227 
3228 // Multiple threads can race in this code but it's not possible to unmap small sections of
3229 // virtual space to get requested alignment, like posix-like os's.
3230 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3231 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3232   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3233          "Alignment must be a multiple of allocation granularity (page size)");
3234   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3235 
3236   size_t extra_size = size + alignment;
3237   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3238 
3239   char* aligned_base = NULL;
3240 
3241   do {
3242     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3243     if (extra_base == NULL) {
3244       return NULL;
3245     }
3246     // Do manual alignment
3247     aligned_base = align_up(extra_base, alignment);
3248 
3249     if (file_desc != -1) {
3250       os::unmap_memory(extra_base, extra_size);
3251     } else {
3252       os::release_memory(extra_base, extra_size);
3253     }
3254 
3255     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3256 
3257   } while (aligned_base == NULL);
3258 
3259   return aligned_base;
3260 }
3261 
3262 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3263   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3264          "reserve alignment");
3265   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3266   char* res;
3267   // note that if UseLargePages is on, all the areas that require interleaving
3268   // will go thru reserve_memory_special rather than thru here.
3269   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3270   if (!use_individual) {
3271     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3272   } else {
3273     elapsedTimer reserveTimer;
3274     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3275     // in numa interleaving, we have to allocate pages individually
3276     // (well really chunks of NUMAInterleaveGranularity size)
3277     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3278     if (res == NULL) {
3279       warning("NUMA page allocation failed");
3280     }
3281     if (Verbose && PrintMiscellaneous) {
3282       reserveTimer.stop();
3283       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3284                     reserveTimer.milliseconds(), reserveTimer.ticks());
3285     }
3286   }
3287   assert(res == NULL || addr == NULL || addr == res,
3288          "Unexpected address from reserve.");
3289 
3290   return res;
3291 }
3292 
3293 // Reserve memory at an arbitrary address, only if that area is
3294 // available (and not reserved for something else).
3295 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3296   // Windows os::reserve_memory() fails of the requested address range is
3297   // not avilable.
3298   return reserve_memory(bytes, requested_addr);
3299 }
3300 
3301 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3302   assert(file_desc >= 0, "file_desc is not valid");
3303   return map_memory_to_file(requested_addr, bytes, file_desc);
3304 }
3305 
3306 size_t os::large_page_size() {
3307   return _large_page_size;
3308 }
3309 
3310 bool os::can_commit_large_page_memory() {
3311   // Windows only uses large page memory when the entire region is reserved
3312   // and committed in a single VirtualAlloc() call. This may change in the
3313   // future, but with Windows 2003 it's not possible to commit on demand.
3314   return false;
3315 }
3316 
3317 bool os::can_execute_large_page_memory() {
3318   return true;
3319 }
3320 
3321 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3322                                     bool exec) {
3323   assert(UseLargePages, "only for large pages");
3324 
3325   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3326     return NULL; // Fallback to small pages.
3327   }
3328 
3329   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3330   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3331 
3332   // with large pages, there are two cases where we need to use Individual Allocation
3333   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3334   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3335   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3336     log_debug(pagesize)("Reserving large pages individually.");
3337 
3338     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3339     if (p_buf == NULL) {
3340       // give an appropriate warning message
3341       if (UseNUMAInterleaving) {
3342         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3343       }
3344       if (UseLargePagesIndividualAllocation) {
3345         warning("Individually allocated large pages failed, "
3346                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3347       }
3348       return NULL;
3349     }
3350 
3351     return p_buf;
3352 
3353   } else {
3354     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3355 
3356     // normal policy just allocate it all at once
3357     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3358     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3359 
3360     return res;
3361   }
3362 }
3363 
3364 bool os::pd_release_memory_special(char* base, size_t bytes) {
3365   assert(base != NULL, "Sanity check");
3366   return pd_release_memory(base, bytes);
3367 }
3368 
3369 void os::print_statistics() {
3370 }
3371 
3372 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3373   int err = os::get_last_error();
3374   char buf[256];
3375   size_t buf_len = os::lasterror(buf, sizeof(buf));
3376   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3377           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3378           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3379 }
3380 
3381 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3382   if (bytes == 0) {
3383     // Don't bother the OS with noops.
3384     return true;
3385   }
3386   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3387   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3388   // Don't attempt to print anything if the OS call fails. We're
3389   // probably low on resources, so the print itself may cause crashes.
3390 
3391   // unless we have NUMAInterleaving enabled, the range of a commit
3392   // is always within a reserve covered by a single VirtualAlloc
3393   // in that case we can just do a single commit for the requested size
3394   if (!UseNUMAInterleaving) {
3395     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3396       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3397       return false;
3398     }
3399     if (exec) {
3400       DWORD oldprot;
3401       // Windows doc says to use VirtualProtect to get execute permissions
3402       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3403         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3404         return false;
3405       }
3406     }
3407     return true;
3408   } else {
3409 
3410     // when NUMAInterleaving is enabled, the commit might cover a range that
3411     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3412     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3413     // returns represents the number of bytes that can be committed in one step.
3414     size_t bytes_remaining = bytes;
3415     char * next_alloc_addr = addr;
3416     while (bytes_remaining > 0) {
3417       MEMORY_BASIC_INFORMATION alloc_info;
3418       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3419       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3420       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3421                        PAGE_READWRITE) == NULL) {
3422         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3423                                             exec);)
3424         return false;
3425       }
3426       if (exec) {
3427         DWORD oldprot;
3428         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3429                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3430           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3431                                               exec);)
3432           return false;
3433         }
3434       }
3435       bytes_remaining -= bytes_to_rq;
3436       next_alloc_addr += bytes_to_rq;
3437     }
3438   }
3439   // if we made it this far, return true
3440   return true;
3441 }
3442 
3443 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3444                           bool exec) {
3445   // alignment_hint is ignored on this OS
3446   return pd_commit_memory(addr, size, exec);
3447 }
3448 
3449 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3450                                   const char* mesg) {
3451   assert(mesg != NULL, "mesg must be specified");
3452   if (!pd_commit_memory(addr, size, exec)) {
3453     warn_fail_commit_memory(addr, size, exec);
3454     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3455   }
3456 }
3457 
3458 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3459                                   size_t alignment_hint, bool exec,
3460                                   const char* mesg) {
3461   // alignment_hint is ignored on this OS
3462   pd_commit_memory_or_exit(addr, size, exec, mesg);
3463 }
3464 
3465 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3466   if (bytes == 0) {
3467     // Don't bother the OS with noops.
3468     return true;
3469   }
3470   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3471   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3472   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3473 }
3474 
3475 bool os::pd_release_memory(char* addr, size_t bytes) {
3476   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3477 }
3478 
3479 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3480   return os::commit_memory(addr, size, !ExecMem);
3481 }
3482 
3483 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3484   return os::uncommit_memory(addr, size);
3485 }
3486 
3487 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3488   uint count = 0;
3489   bool ret = false;
3490   size_t bytes_remaining = bytes;
3491   char * next_protect_addr = addr;
3492 
3493   // Use VirtualQuery() to get the chunk size.
3494   while (bytes_remaining) {
3495     MEMORY_BASIC_INFORMATION alloc_info;
3496     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3497       return false;
3498     }
3499 
3500     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3501     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3502     // but we don't distinguish here as both cases are protected by same API.
3503     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3504     warning("Failed protecting pages individually for chunk #%u", count);
3505     if (!ret) {
3506       return false;
3507     }
3508 
3509     bytes_remaining -= bytes_to_protect;
3510     next_protect_addr += bytes_to_protect;
3511     count++;
3512   }
3513   return ret;
3514 }
3515 
3516 // Set protections specified
3517 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3518                         bool is_committed) {
3519   unsigned int p = 0;
3520   switch (prot) {
3521   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3522   case MEM_PROT_READ: p = PAGE_READONLY; break;
3523   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3524   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3525   default:
3526     ShouldNotReachHere();
3527   }
3528 
3529   DWORD old_status;
3530 
3531   // Strange enough, but on Win32 one can change protection only for committed
3532   // memory, not a big deal anyway, as bytes less or equal than 64K
3533   if (!is_committed) {
3534     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3535                           "cannot commit protection page");
3536   }
3537   // One cannot use os::guard_memory() here, as on Win32 guard page
3538   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3539   //
3540   // Pages in the region become guard pages. Any attempt to access a guard page
3541   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3542   // the guard page status. Guard pages thus act as a one-time access alarm.
3543   bool ret;
3544   if (UseNUMAInterleaving) {
3545     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3546     // so we must protect the chunks individually.
3547     ret = protect_pages_individually(addr, bytes, p, &old_status);
3548   } else {
3549     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3550   }
3551 #ifdef ASSERT
3552   if (!ret) {
3553     int err = os::get_last_error();
3554     char buf[256];
3555     size_t buf_len = os::lasterror(buf, sizeof(buf));
3556     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3557           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3558           buf_len != 0 ? buf : "<no_error_string>", err);
3559   }
3560 #endif
3561   return ret;
3562 }
3563 
3564 bool os::guard_memory(char* addr, size_t bytes) {
3565   DWORD old_status;
3566   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3567 }
3568 
3569 bool os::unguard_memory(char* addr, size_t bytes) {
3570   DWORD old_status;
3571   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3572 }
3573 
3574 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3575 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3576 void os::numa_make_global(char *addr, size_t bytes)    { }
3577 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3578 bool os::numa_topology_changed()                       { return false; }
3579 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3580 int os::numa_get_group_id()                            { return 0; }
3581 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3582   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3583     // Provide an answer for UMA systems
3584     ids[0] = 0;
3585     return 1;
3586   } else {
3587     // check for size bigger than actual groups_num
3588     size = MIN2(size, numa_get_groups_num());
3589     for (int i = 0; i < (int)size; i++) {
3590       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3591     }
3592     return size;
3593   }
3594 }
3595 
3596 int os::numa_get_group_id_for_address(const void* address) {
3597   return 0;
3598 }
3599 
3600 bool os::get_page_info(char *start, page_info* info) {
3601   return false;
3602 }
3603 
3604 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3605                      page_info* page_found) {
3606   return end;
3607 }
3608 
3609 char* os::non_memory_address_word() {
3610   // Must never look like an address returned by reserve_memory,
3611   // even in its subfields (as defined by the CPU immediate fields,
3612   // if the CPU splits constants across multiple instructions).
3613   return (char*)-1;
3614 }
3615 
3616 #define MAX_ERROR_COUNT 100
3617 #define SYS_THREAD_ERROR 0xffffffffUL
3618 
3619 void os::pd_start_thread(Thread* thread) {
3620   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3621   // Returns previous suspend state:
3622   // 0:  Thread was not suspended
3623   // 1:  Thread is running now
3624   // >1: Thread is still suspended.
3625   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3626 }
3627 
3628 
3629 // Short sleep, direct OS call.
3630 //
3631 // ms = 0, means allow others (if any) to run.
3632 //
3633 void os::naked_short_sleep(jlong ms) {
3634   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3635   Sleep(ms);
3636 }
3637 
3638 // Windows does not provide sleep functionality with nanosecond resolution, so we
3639 // try to approximate this with spinning combined with yielding if another thread
3640 // is ready to run on the current processor.
3641 void os::naked_short_nanosleep(jlong ns) {
3642   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3643 
3644   int64_t start = os::javaTimeNanos();
3645   do {
3646     if (SwitchToThread() == 0) {
3647       // Nothing else is ready to run on this cpu, spin a little
3648       SpinPause();
3649     }
3650   } while (os::javaTimeNanos() - start < ns);
3651 }
3652 
3653 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3654 void os::infinite_sleep() {
3655   while (true) {    // sleep forever ...
3656     Sleep(100000);  // ... 100 seconds at a time
3657   }
3658 }
3659 
3660 typedef BOOL (WINAPI * STTSignature)(void);
3661 
3662 void os::naked_yield() {
3663   // Consider passing back the return value from SwitchToThread().
3664   SwitchToThread();
3665 }
3666 
3667 // Win32 only gives you access to seven real priorities at a time,
3668 // so we compress Java's ten down to seven.  It would be better
3669 // if we dynamically adjusted relative priorities.
3670 
3671 int os::java_to_os_priority[CriticalPriority + 1] = {
3672   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3673   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3674   THREAD_PRIORITY_LOWEST,                       // 2
3675   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3676   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3677   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3678   THREAD_PRIORITY_NORMAL,                       // 6
3679   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3680   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3681   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3682   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3683   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3684 };
3685 
3686 int prio_policy1[CriticalPriority + 1] = {
3687   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3688   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3689   THREAD_PRIORITY_LOWEST,                       // 2
3690   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3691   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3692   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3693   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3694   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3695   THREAD_PRIORITY_HIGHEST,                      // 8
3696   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3697   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3698   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3699 };
3700 
3701 static int prio_init() {
3702   // If ThreadPriorityPolicy is 1, switch tables
3703   if (ThreadPriorityPolicy == 1) {
3704     int i;
3705     for (i = 0; i < CriticalPriority + 1; i++) {
3706       os::java_to_os_priority[i] = prio_policy1[i];
3707     }
3708   }
3709   if (UseCriticalJavaThreadPriority) {
3710     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3711   }
3712   return 0;
3713 }
3714 
3715 OSReturn os::set_native_priority(Thread* thread, int priority) {
3716   if (!UseThreadPriorities) return OS_OK;
3717   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3718   return ret ? OS_OK : OS_ERR;
3719 }
3720 
3721 OSReturn os::get_native_priority(const Thread* const thread,
3722                                  int* priority_ptr) {
3723   if (!UseThreadPriorities) {
3724     *priority_ptr = java_to_os_priority[NormPriority];
3725     return OS_OK;
3726   }
3727   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3728   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3729     assert(false, "GetThreadPriority failed");
3730     return OS_ERR;
3731   }
3732   *priority_ptr = os_prio;
3733   return OS_OK;
3734 }
3735 
3736 // GetCurrentThreadId() returns DWORD
3737 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3738 
3739 static int _initial_pid = 0;
3740 
3741 int os::current_process_id() {
3742   return (_initial_pid ? _initial_pid : _getpid());
3743 }
3744 
3745 int    os::win32::_vm_page_size              = 0;
3746 int    os::win32::_vm_allocation_granularity = 0;
3747 int    os::win32::_processor_type            = 0;
3748 // Processor level is not available on non-NT systems, use vm_version instead
3749 int    os::win32::_processor_level           = 0;
3750 julong os::win32::_physical_memory           = 0;
3751 size_t os::win32::_default_stack_size        = 0;
3752 
3753 intx          os::win32::_os_thread_limit    = 0;
3754 volatile intx os::win32::_os_thread_count    = 0;
3755 
3756 bool   os::win32::_is_windows_server         = false;
3757 
3758 // 6573254
3759 // Currently, the bug is observed across all the supported Windows releases,
3760 // including the latest one (as of this writing - Windows Server 2012 R2)
3761 bool   os::win32::_has_exit_bug              = true;
3762 
3763 void os::win32::initialize_system_info() {
3764   SYSTEM_INFO si;
3765   GetSystemInfo(&si);
3766   _vm_page_size    = si.dwPageSize;
3767   _vm_allocation_granularity = si.dwAllocationGranularity;
3768   _processor_type  = si.dwProcessorType;
3769   _processor_level = si.wProcessorLevel;
3770   set_processor_count(si.dwNumberOfProcessors);
3771 
3772   MEMORYSTATUSEX ms;
3773   ms.dwLength = sizeof(ms);
3774 
3775   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3776   // dwMemoryLoad (% of memory in use)
3777   GlobalMemoryStatusEx(&ms);
3778   _physical_memory = ms.ullTotalPhys;
3779 
3780   if (FLAG_IS_DEFAULT(MaxRAM)) {
3781     // Adjust MaxRAM according to the maximum virtual address space available.
3782     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3783   }
3784 
3785   OSVERSIONINFOEX oi;
3786   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3787   GetVersionEx((OSVERSIONINFO*)&oi);
3788   switch (oi.dwPlatformId) {
3789   case VER_PLATFORM_WIN32_NT:
3790     {
3791       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3792       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3793           oi.wProductType == VER_NT_SERVER) {
3794         _is_windows_server = true;
3795       }
3796     }
3797     break;
3798   default: fatal("Unknown platform");
3799   }
3800 
3801   _default_stack_size = os::current_stack_size();
3802   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3803   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3804          "stack size not a multiple of page size");
3805 
3806   initialize_performance_counter();
3807 }
3808 
3809 
3810 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3811                                       int ebuflen) {
3812   char path[MAX_PATH];
3813   DWORD size;
3814   DWORD pathLen = (DWORD)sizeof(path);
3815   HINSTANCE result = NULL;
3816 
3817   // only allow library name without path component
3818   assert(strchr(name, '\\') == NULL, "path not allowed");
3819   assert(strchr(name, ':') == NULL, "path not allowed");
3820   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3821     jio_snprintf(ebuf, ebuflen,
3822                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3823     return NULL;
3824   }
3825 
3826   // search system directory
3827   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3828     if (size >= pathLen) {
3829       return NULL; // truncated
3830     }
3831     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3832       return NULL; // truncated
3833     }
3834     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3835       return result;
3836     }
3837   }
3838 
3839   // try Windows directory
3840   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3841     if (size >= pathLen) {
3842       return NULL; // truncated
3843     }
3844     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3845       return NULL; // truncated
3846     }
3847     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3848       return result;
3849     }
3850   }
3851 
3852   jio_snprintf(ebuf, ebuflen,
3853                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3854   return NULL;
3855 }
3856 
3857 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3858 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3859 
3860 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3861   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3862   return TRUE;
3863 }
3864 
3865 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3866   // Basic approach:
3867   //  - Each exiting thread registers its intent to exit and then does so.
3868   //  - A thread trying to terminate the process must wait for all
3869   //    threads currently exiting to complete their exit.
3870 
3871   if (os::win32::has_exit_bug()) {
3872     // The array holds handles of the threads that have started exiting by calling
3873     // _endthreadex().
3874     // Should be large enough to avoid blocking the exiting thread due to lack of
3875     // a free slot.
3876     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3877     static int handle_count = 0;
3878 
3879     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3880     static CRITICAL_SECTION crit_sect;
3881     static volatile DWORD process_exiting = 0;
3882     int i, j;
3883     DWORD res;
3884     HANDLE hproc, hthr;
3885 
3886     // We only attempt to register threads until a process exiting
3887     // thread manages to set the process_exiting flag. Any threads
3888     // that come through here after the process_exiting flag is set
3889     // are unregistered and will be caught in the SuspendThread()
3890     // infinite loop below.
3891     bool registered = false;
3892 
3893     // The first thread that reached this point, initializes the critical section.
3894     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3895       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3896     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3897       if (what != EPT_THREAD) {
3898         // Atomically set process_exiting before the critical section
3899         // to increase the visibility between racing threads.
3900         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3901       }
3902       EnterCriticalSection(&crit_sect);
3903 
3904       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3905         // Remove from the array those handles of the threads that have completed exiting.
3906         for (i = 0, j = 0; i < handle_count; ++i) {
3907           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3908           if (res == WAIT_TIMEOUT) {
3909             handles[j++] = handles[i];
3910           } else {
3911             if (res == WAIT_FAILED) {
3912               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3913                       GetLastError(), __FILE__, __LINE__);
3914             }
3915             // Don't keep the handle, if we failed waiting for it.
3916             CloseHandle(handles[i]);
3917           }
3918         }
3919 
3920         // If there's no free slot in the array of the kept handles, we'll have to
3921         // wait until at least one thread completes exiting.
3922         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3923           // Raise the priority of the oldest exiting thread to increase its chances
3924           // to complete sooner.
3925           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3926           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3927           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3928             i = (res - WAIT_OBJECT_0);
3929             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3930             for (; i < handle_count; ++i) {
3931               handles[i] = handles[i + 1];
3932             }
3933           } else {
3934             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3935                     (res == WAIT_FAILED ? "failed" : "timed out"),
3936                     GetLastError(), __FILE__, __LINE__);
3937             // Don't keep handles, if we failed waiting for them.
3938             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3939               CloseHandle(handles[i]);
3940             }
3941             handle_count = 0;
3942           }
3943         }
3944 
3945         // Store a duplicate of the current thread handle in the array of handles.
3946         hproc = GetCurrentProcess();
3947         hthr = GetCurrentThread();
3948         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3949                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3950           warning("DuplicateHandle failed (%u) in %s: %d\n",
3951                   GetLastError(), __FILE__, __LINE__);
3952 
3953           // We can't register this thread (no more handles) so this thread
3954           // may be racing with a thread that is calling exit(). If the thread
3955           // that is calling exit() has managed to set the process_exiting
3956           // flag, then this thread will be caught in the SuspendThread()
3957           // infinite loop below which closes that race. A small timing
3958           // window remains before the process_exiting flag is set, but it
3959           // is only exposed when we are out of handles.
3960         } else {
3961           ++handle_count;
3962           registered = true;
3963 
3964           // The current exiting thread has stored its handle in the array, and now
3965           // should leave the critical section before calling _endthreadex().
3966         }
3967 
3968       } else if (what != EPT_THREAD && handle_count > 0) {
3969         jlong start_time, finish_time, timeout_left;
3970         // Before ending the process, make sure all the threads that had called
3971         // _endthreadex() completed.
3972 
3973         // Set the priority level of the current thread to the same value as
3974         // the priority level of exiting threads.
3975         // This is to ensure it will be given a fair chance to execute if
3976         // the timeout expires.
3977         hthr = GetCurrentThread();
3978         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3979         start_time = os::javaTimeNanos();
3980         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3981         for (i = 0; ; ) {
3982           int portion_count = handle_count - i;
3983           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3984             portion_count = MAXIMUM_WAIT_OBJECTS;
3985           }
3986           for (j = 0; j < portion_count; ++j) {
3987             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3988           }
3989           timeout_left = (finish_time - start_time) / 1000000L;
3990           if (timeout_left < 0) {
3991             timeout_left = 0;
3992           }
3993           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3994           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3995             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3996                     (res == WAIT_FAILED ? "failed" : "timed out"),
3997                     GetLastError(), __FILE__, __LINE__);
3998             // Reset portion_count so we close the remaining
3999             // handles due to this error.
4000             portion_count = handle_count - i;
4001           }
4002           for (j = 0; j < portion_count; ++j) {
4003             CloseHandle(handles[i + j]);
4004           }
4005           if ((i += portion_count) >= handle_count) {
4006             break;
4007           }
4008           start_time = os::javaTimeNanos();
4009         }
4010         handle_count = 0;
4011       }
4012 
4013       LeaveCriticalSection(&crit_sect);
4014     }
4015 
4016     if (!registered &&
4017         Atomic::load_acquire(&process_exiting) != 0 &&
4018         process_exiting != GetCurrentThreadId()) {
4019       // Some other thread is about to call exit(), so we don't let
4020       // the current unregistered thread proceed to exit() or _endthreadex()
4021       while (true) {
4022         SuspendThread(GetCurrentThread());
4023         // Avoid busy-wait loop, if SuspendThread() failed.
4024         Sleep(EXIT_TIMEOUT);
4025       }
4026     }
4027   }
4028 
4029   // We are here if either
4030   // - there's no 'race at exit' bug on this OS release;
4031   // - initialization of the critical section failed (unlikely);
4032   // - the current thread has registered itself and left the critical section;
4033   // - the process-exiting thread has raised the flag and left the critical section.
4034   if (what == EPT_THREAD) {
4035     _endthreadex((unsigned)exit_code);
4036   } else if (what == EPT_PROCESS) {
4037     ::exit(exit_code);
4038   } else {
4039     _exit(exit_code);
4040   }
4041 
4042   // Should not reach here
4043   return exit_code;
4044 }
4045 
4046 #undef EXIT_TIMEOUT
4047 
4048 void os::win32::setmode_streams() {
4049   _setmode(_fileno(stdin), _O_BINARY);
4050   _setmode(_fileno(stdout), _O_BINARY);
4051   _setmode(_fileno(stderr), _O_BINARY);
4052 }
4053 
4054 void os::wait_for_keypress_at_exit(void) {
4055   if (PauseAtExit) {
4056     fprintf(stderr, "Press any key to continue...\n");
4057     fgetc(stdin);
4058   }
4059 }
4060 
4061 
4062 bool os::message_box(const char* title, const char* message) {
4063   int result = MessageBox(NULL, message, title,
4064                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4065   return result == IDYES;
4066 }
4067 
4068 #ifndef PRODUCT
4069 #ifndef _WIN64
4070 // Helpers to check whether NX protection is enabled
4071 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4072   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4073       pex->ExceptionRecord->NumberParameters > 0 &&
4074       pex->ExceptionRecord->ExceptionInformation[0] ==
4075       EXCEPTION_INFO_EXEC_VIOLATION) {
4076     return EXCEPTION_EXECUTE_HANDLER;
4077   }
4078   return EXCEPTION_CONTINUE_SEARCH;
4079 }
4080 
4081 void nx_check_protection() {
4082   // If NX is enabled we'll get an exception calling into code on the stack
4083   char code[] = { (char)0xC3 }; // ret
4084   void *code_ptr = (void *)code;
4085   __try {
4086     __asm call code_ptr
4087   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4088     tty->print_raw_cr("NX protection detected.");
4089   }
4090 }
4091 #endif // _WIN64
4092 #endif // PRODUCT
4093 
4094 // This is called _before_ the global arguments have been parsed
4095 void os::init(void) {
4096   _initial_pid = _getpid();
4097 
4098   init_random(1234567);
4099 
4100   win32::initialize_system_info();
4101   win32::setmode_streams();
4102   init_page_sizes((size_t) win32::vm_page_size());
4103 
4104   // This may be overridden later when argument processing is done.
4105   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4106 
4107   // Initialize main_process and main_thread
4108   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4109   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4110                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4111     fatal("DuplicateHandle failed\n");
4112   }
4113   main_thread_id = (int) GetCurrentThreadId();
4114 
4115   // initialize fast thread access - only used for 32-bit
4116   win32::initialize_thread_ptr_offset();
4117 }
4118 
4119 // To install functions for atexit processing
4120 extern "C" {
4121   static void perfMemory_exit_helper() {
4122     perfMemory_exit();
4123   }
4124 }
4125 
4126 static jint initSock();
4127 
4128 // this is called _after_ the global arguments have been parsed
4129 jint os::init_2(void) {
4130 
4131   // This could be set any time but all platforms
4132   // have to set it the same so we have to mirror Solaris.
4133   DEBUG_ONLY(os::set_mutex_init_done();)
4134 
4135   // Setup Windows Exceptions
4136 
4137 #if INCLUDE_AOT
4138   // If AOT is enabled we need to install a vectored exception handler
4139   // in order to forward implicit exceptions from code in AOT
4140   // generated DLLs.  This is necessary since these DLLs are not
4141   // registered for structured exceptions like codecache methods are.
4142   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4143     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4144   }
4145 #endif
4146 
4147   // for debugging float code generation bugs
4148   if (ForceFloatExceptions) {
4149 #ifndef  _WIN64
4150     static long fp_control_word = 0;
4151     __asm { fstcw fp_control_word }
4152     // see Intel PPro Manual, Vol. 2, p 7-16
4153     const long precision = 0x20;
4154     const long underflow = 0x10;
4155     const long overflow  = 0x08;
4156     const long zero_div  = 0x04;
4157     const long denorm    = 0x02;
4158     const long invalid   = 0x01;
4159     fp_control_word |= invalid;
4160     __asm { fldcw fp_control_word }
4161 #endif
4162   }
4163 
4164   // If stack_commit_size is 0, windows will reserve the default size,
4165   // but only commit a small portion of it.
4166   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4167   size_t default_reserve_size = os::win32::default_stack_size();
4168   size_t actual_reserve_size = stack_commit_size;
4169   if (stack_commit_size < default_reserve_size) {
4170     // If stack_commit_size == 0, we want this too
4171     actual_reserve_size = default_reserve_size;
4172   }
4173 
4174   // Check minimum allowable stack size for thread creation and to initialize
4175   // the java system classes, including StackOverflowError - depends on page
4176   // size.  Add two 4K pages for compiler2 recursion in main thread.
4177   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4178   // class initialization depending on 32 or 64 bit VM.
4179   size_t min_stack_allowed =
4180             (size_t)(JavaThread::stack_guard_zone_size() +
4181                      JavaThread::stack_shadow_zone_size() +
4182                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4183 
4184   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4185 
4186   if (actual_reserve_size < min_stack_allowed) {
4187     tty->print_cr("\nThe Java thread stack size specified is too small. "
4188                   "Specify at least %dk",
4189                   min_stack_allowed / K);
4190     return JNI_ERR;
4191   }
4192 
4193   JavaThread::set_stack_size_at_create(stack_commit_size);
4194 
4195   // Calculate theoretical max. size of Threads to guard gainst artifical
4196   // out-of-memory situations, where all available address-space has been
4197   // reserved by thread stacks.
4198   assert(actual_reserve_size != 0, "Must have a stack");
4199 
4200   // Calculate the thread limit when we should start doing Virtual Memory
4201   // banging. Currently when the threads will have used all but 200Mb of space.
4202   //
4203   // TODO: consider performing a similar calculation for commit size instead
4204   // as reserve size, since on a 64-bit platform we'll run into that more
4205   // often than running out of virtual memory space.  We can use the
4206   // lower value of the two calculations as the os_thread_limit.
4207   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4208   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4209 
4210   // at exit methods are called in the reverse order of their registration.
4211   // there is no limit to the number of functions registered. atexit does
4212   // not set errno.
4213 
4214   if (PerfAllowAtExitRegistration) {
4215     // only register atexit functions if PerfAllowAtExitRegistration is set.
4216     // atexit functions can be delayed until process exit time, which
4217     // can be problematic for embedded VM situations. Embedded VMs should
4218     // call DestroyJavaVM() to assure that VM resources are released.
4219 
4220     // note: perfMemory_exit_helper atexit function may be removed in
4221     // the future if the appropriate cleanup code can be added to the
4222     // VM_Exit VMOperation's doit method.
4223     if (atexit(perfMemory_exit_helper) != 0) {
4224       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4225     }
4226   }
4227 
4228 #ifndef _WIN64
4229   // Print something if NX is enabled (win32 on AMD64)
4230   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4231 #endif
4232 
4233   // initialize thread priority policy
4234   prio_init();
4235 
4236   UseNUMA = false; // We don't fully support this yet
4237 
4238   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4239     if (!numa_interleaving_init()) {
4240       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4241     } else if (!UseNUMAInterleaving) {
4242       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4243       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4244     }
4245   }
4246 
4247   if (initSock() != JNI_OK) {
4248     return JNI_ERR;
4249   }
4250 
4251   SymbolEngine::recalc_search_path();
4252 
4253   // Initialize data for jdk.internal.misc.Signal
4254   if (!ReduceSignalUsage) {
4255     jdk_misc_signal_init();
4256   }
4257 
4258   return JNI_OK;
4259 }
4260 
4261 // combine the high and low DWORD into a ULONGLONG
4262 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4263   ULONGLONG value = high_word;
4264   value <<= sizeof(high_word) * 8;
4265   value |= low_word;
4266   return value;
4267 }
4268 
4269 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4270 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4271   ::memset((void*)sbuf, 0, sizeof(struct stat));
4272   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4273   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4274                                   file_data.ftLastWriteTime.dwLowDateTime);
4275   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4276                                   file_data.ftCreationTime.dwLowDateTime);
4277   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4278                                   file_data.ftLastAccessTime.dwLowDateTime);
4279   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4280     sbuf->st_mode |= S_IFDIR;
4281   } else {
4282     sbuf->st_mode |= S_IFREG;
4283   }
4284 }
4285 
4286 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4287   // Get required buffer size to convert to Unicode
4288   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4289                                              MB_ERR_INVALID_CHARS,
4290                                              char_path, -1,
4291                                              NULL, 0);
4292   if (unicode_path_len == 0) {
4293     return EINVAL;
4294   }
4295 
4296   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4297 
4298   int result = MultiByteToWideChar(CP_ACP,
4299                                    MB_ERR_INVALID_CHARS,
4300                                    char_path, -1,
4301                                    *unicode_path, unicode_path_len);
4302   assert(result == unicode_path_len, "length already checked above");
4303 
4304   return ERROR_SUCCESS;
4305 }
4306 
4307 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4308   // Get required buffer size to convert to full path. The return
4309   // value INCLUDES the terminating null character.
4310   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4311   if (full_path_len == 0) {
4312     return EINVAL;
4313   }
4314 
4315   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4316 
4317   // When the buffer has sufficient size, the return value EXCLUDES the
4318   // terminating null character
4319   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4320   assert(result <= full_path_len, "length already checked above");
4321 
4322   return ERROR_SUCCESS;
4323 }
4324 
4325 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4326   *prefix_off = 0;
4327   *needs_fullpath = true;
4328 
4329   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4330     *prefix = L"\\\\?\\";
4331   } else if (buf[0] == '\\' && buf[1] == '\\') {
4332     if (buf[2] == '?' && buf[3] == '\\') {
4333       *prefix = L"";
4334       *needs_fullpath = false;
4335     } else {
4336       *prefix = L"\\\\?\\UNC";
4337       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4338     }
4339   } else {
4340     *prefix = L"\\\\?\\";
4341   }
4342 }
4343 
4344 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4345 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4346 // additional_space is the size of space, in wchar_t, the function will additionally add to
4347 // the allocation of return buffer (such that the size of the returned buffer is at least
4348 // wcslen(buf) + 1 + additional_space).
4349 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4350   if ((path == NULL) || (path[0] == '\0')) {
4351     err = ENOENT;
4352     return NULL;
4353   }
4354 
4355   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4356   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4357   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4358   strncpy(buf, path, buf_len);
4359   os::native_path(buf);
4360 
4361   LPWSTR prefix = NULL;
4362   int prefix_off = 0;
4363   bool needs_fullpath = true;
4364   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4365 
4366   LPWSTR unicode_path = NULL;
4367   err = convert_to_unicode(buf, &unicode_path);
4368   FREE_C_HEAP_ARRAY(char, buf);
4369   if (err != ERROR_SUCCESS) {
4370     return NULL;
4371   }
4372 
4373   LPWSTR converted_path = NULL;
4374   if (needs_fullpath) {
4375     err = get_full_path(unicode_path, &converted_path);
4376   } else {
4377     converted_path = unicode_path;
4378   }
4379 
4380   LPWSTR result = NULL;
4381   if (converted_path != NULL) {
4382     size_t prefix_len = wcslen(prefix);
4383     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4384     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4385     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4386 
4387     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4388     result_len = wcslen(result);
4389     if ((result[result_len - 1] == L'\\') &&
4390         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4391       result[result_len - 1] = L'\0';
4392     }
4393   }
4394 
4395   if (converted_path != unicode_path) {
4396     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4397   }
4398   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4399 
4400   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4401 }
4402 
4403 int os::stat(const char *path, struct stat *sbuf) {
4404   errno_t err;
4405   wchar_t* wide_path = wide_abs_unc_path(path, err);
4406 
4407   if (wide_path == NULL) {
4408     errno = err;
4409     return -1;
4410   }
4411 
4412   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4413   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4414   os::free(wide_path);
4415 
4416   if (!bret) {
4417     errno = ::GetLastError();
4418     return -1;
4419   }
4420 
4421   file_attribute_data_to_stat(sbuf, file_data);
4422   return 0;
4423 }
4424 
4425 static HANDLE create_read_only_file_handle(const char* file) {
4426   errno_t err;
4427   wchar_t* wide_path = wide_abs_unc_path(file, err);
4428 
4429   if (wide_path == NULL) {
4430     errno = err;
4431     return INVALID_HANDLE_VALUE;
4432   }
4433 
4434   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4435                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4436   os::free(wide_path);
4437 
4438   return handle;
4439 }
4440 
4441 bool os::same_files(const char* file1, const char* file2) {
4442 
4443   if (file1 == NULL && file2 == NULL) {
4444     return true;
4445   }
4446 
4447   if (file1 == NULL || file2 == NULL) {
4448     return false;
4449   }
4450 
4451   if (strcmp(file1, file2) == 0) {
4452     return true;
4453   }
4454 
4455   HANDLE handle1 = create_read_only_file_handle(file1);
4456   HANDLE handle2 = create_read_only_file_handle(file2);
4457   bool result = false;
4458 
4459   // if we could open both paths...
4460   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4461     BY_HANDLE_FILE_INFORMATION fileInfo1;
4462     BY_HANDLE_FILE_INFORMATION fileInfo2;
4463     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4464       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4465       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4466       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4467         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4468         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4469         result = true;
4470       }
4471     }
4472   }
4473 
4474   //free the handles
4475   if (handle1 != INVALID_HANDLE_VALUE) {
4476     ::CloseHandle(handle1);
4477   }
4478 
4479   if (handle2 != INVALID_HANDLE_VALUE) {
4480     ::CloseHandle(handle2);
4481   }
4482 
4483   return result;
4484 }
4485 
4486 #define FT2INT64(ft) \
4487   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4488 
4489 
4490 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4491 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4492 // of a thread.
4493 //
4494 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4495 // the fast estimate available on the platform.
4496 
4497 // current_thread_cpu_time() is not optimized for Windows yet
4498 jlong os::current_thread_cpu_time() {
4499   // return user + sys since the cost is the same
4500   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4501 }
4502 
4503 jlong os::thread_cpu_time(Thread* thread) {
4504   // consistent with what current_thread_cpu_time() returns.
4505   return os::thread_cpu_time(thread, true /* user+sys */);
4506 }
4507 
4508 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4509   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4510 }
4511 
4512 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4513   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4514   // If this function changes, os::is_thread_cpu_time_supported() should too
4515   FILETIME CreationTime;
4516   FILETIME ExitTime;
4517   FILETIME KernelTime;
4518   FILETIME UserTime;
4519 
4520   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4521                       &ExitTime, &KernelTime, &UserTime) == 0) {
4522     return -1;
4523   } else if (user_sys_cpu_time) {
4524     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4525   } else {
4526     return FT2INT64(UserTime) * 100;
4527   }
4528 }
4529 
4530 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4531   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4532   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4533   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4534   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4535 }
4536 
4537 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4538   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4539   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4540   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4541   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4542 }
4543 
4544 bool os::is_thread_cpu_time_supported() {
4545   // see os::thread_cpu_time
4546   FILETIME CreationTime;
4547   FILETIME ExitTime;
4548   FILETIME KernelTime;
4549   FILETIME UserTime;
4550 
4551   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4552                       &KernelTime, &UserTime) == 0) {
4553     return false;
4554   } else {
4555     return true;
4556   }
4557 }
4558 
4559 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4560 // It does have primitives (PDH API) to get CPU usage and run queue length.
4561 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4562 // If we wanted to implement loadavg on Windows, we have a few options:
4563 //
4564 // a) Query CPU usage and run queue length and "fake" an answer by
4565 //    returning the CPU usage if it's under 100%, and the run queue
4566 //    length otherwise.  It turns out that querying is pretty slow
4567 //    on Windows, on the order of 200 microseconds on a fast machine.
4568 //    Note that on the Windows the CPU usage value is the % usage
4569 //    since the last time the API was called (and the first call
4570 //    returns 100%), so we'd have to deal with that as well.
4571 //
4572 // b) Sample the "fake" answer using a sampling thread and store
4573 //    the answer in a global variable.  The call to loadavg would
4574 //    just return the value of the global, avoiding the slow query.
4575 //
4576 // c) Sample a better answer using exponential decay to smooth the
4577 //    value.  This is basically the algorithm used by UNIX kernels.
4578 //
4579 // Note that sampling thread starvation could affect both (b) and (c).
4580 int os::loadavg(double loadavg[], int nelem) {
4581   return -1;
4582 }
4583 
4584 
4585 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4586 bool os::dont_yield() {
4587   return DontYieldALot;
4588 }
4589 
4590 int os::open(const char *path, int oflag, int mode) {
4591   errno_t err;
4592   wchar_t* wide_path = wide_abs_unc_path(path, err);
4593 
4594   if (wide_path == NULL) {
4595     errno = err;
4596     return -1;
4597   }
4598   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4599   os::free(wide_path);
4600 
4601   if (fd == -1) {
4602     errno = ::GetLastError();
4603   }
4604 
4605   return fd;
4606 }
4607 
4608 FILE* os::open(int fd, const char* mode) {
4609   return ::_fdopen(fd, mode);
4610 }
4611 
4612 // Is a (classpath) directory empty?
4613 bool os::dir_is_empty(const char* path) {
4614   errno_t err;
4615   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4616 
4617   if (wide_path == NULL) {
4618     errno = err;
4619     return false;
4620   }
4621 
4622   // Make sure we end with "\\*"
4623   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4624     wcscat(wide_path, L"*");
4625   } else {
4626     wcscat(wide_path, L"\\*");
4627   }
4628 
4629   WIN32_FIND_DATAW fd;
4630   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4631   os::free(wide_path);
4632   bool is_empty = true;
4633 
4634   if (f != INVALID_HANDLE_VALUE) {
4635     while (is_empty && ::FindNextFileW(f, &fd)) {
4636       // An empty directory contains only the current directory file
4637       // and the previous directory file.
4638       if ((wcscmp(fd.cFileName, L".") != 0) &&
4639           (wcscmp(fd.cFileName, L"..") != 0)) {
4640         is_empty = false;
4641       }
4642     }
4643     FindClose(f);
4644   } else {
4645     errno = ::GetLastError();
4646   }
4647 
4648   return is_empty;
4649 }
4650 
4651 // create binary file, rewriting existing file if required
4652 int os::create_binary_file(const char* path, bool rewrite_existing) {
4653   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4654   if (!rewrite_existing) {
4655     oflags |= _O_EXCL;
4656   }
4657   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4658 }
4659 
4660 // return current position of file pointer
4661 jlong os::current_file_offset(int fd) {
4662   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4663 }
4664 
4665 // move file pointer to the specified offset
4666 jlong os::seek_to_file_offset(int fd, jlong offset) {
4667   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4668 }
4669 
4670 
4671 jlong os::lseek(int fd, jlong offset, int whence) {
4672   return (jlong) ::_lseeki64(fd, offset, whence);
4673 }
4674 
4675 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4676   OVERLAPPED ov;
4677   DWORD nread;
4678   BOOL result;
4679 
4680   ZeroMemory(&ov, sizeof(ov));
4681   ov.Offset = (DWORD)offset;
4682   ov.OffsetHigh = (DWORD)(offset >> 32);
4683 
4684   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4685 
4686   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4687 
4688   return result ? nread : 0;
4689 }
4690 
4691 
4692 // This method is a slightly reworked copy of JDK's sysNativePath
4693 // from src/windows/hpi/src/path_md.c
4694 
4695 // Convert a pathname to native format.  On win32, this involves forcing all
4696 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4697 // sometimes rejects '/') and removing redundant separators.  The input path is
4698 // assumed to have been converted into the character encoding used by the local
4699 // system.  Because this might be a double-byte encoding, care is taken to
4700 // treat double-byte lead characters correctly.
4701 //
4702 // This procedure modifies the given path in place, as the result is never
4703 // longer than the original.  There is no error return; this operation always
4704 // succeeds.
4705 char * os::native_path(char *path) {
4706   char *src = path, *dst = path, *end = path;
4707   char *colon = NULL;  // If a drive specifier is found, this will
4708                        // point to the colon following the drive letter
4709 
4710   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4711   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4712           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4713 
4714   // Check for leading separators
4715 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4716   while (isfilesep(*src)) {
4717     src++;
4718   }
4719 
4720   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4721     // Remove leading separators if followed by drive specifier.  This
4722     // hack is necessary to support file URLs containing drive
4723     // specifiers (e.g., "file://c:/path").  As a side effect,
4724     // "/c:/path" can be used as an alternative to "c:/path".
4725     *dst++ = *src++;
4726     colon = dst;
4727     *dst++ = ':';
4728     src++;
4729   } else {
4730     src = path;
4731     if (isfilesep(src[0]) && isfilesep(src[1])) {
4732       // UNC pathname: Retain first separator; leave src pointed at
4733       // second separator so that further separators will be collapsed
4734       // into the second separator.  The result will be a pathname
4735       // beginning with "\\\\" followed (most likely) by a host name.
4736       src = dst = path + 1;
4737       path[0] = '\\';     // Force first separator to '\\'
4738     }
4739   }
4740 
4741   end = dst;
4742 
4743   // Remove redundant separators from remainder of path, forcing all
4744   // separators to be '\\' rather than '/'. Also, single byte space
4745   // characters are removed from the end of the path because those
4746   // are not legal ending characters on this operating system.
4747   //
4748   while (*src != '\0') {
4749     if (isfilesep(*src)) {
4750       *dst++ = '\\'; src++;
4751       while (isfilesep(*src)) src++;
4752       if (*src == '\0') {
4753         // Check for trailing separator
4754         end = dst;
4755         if (colon == dst - 2) break;  // "z:\\"
4756         if (dst == path + 1) break;   // "\\"
4757         if (dst == path + 2 && isfilesep(path[0])) {
4758           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4759           // beginning of a UNC pathname.  Even though it is not, by
4760           // itself, a valid UNC pathname, we leave it as is in order
4761           // to be consistent with the path canonicalizer as well
4762           // as the win32 APIs, which treat this case as an invalid
4763           // UNC pathname rather than as an alias for the root
4764           // directory of the current drive.
4765           break;
4766         }
4767         end = --dst;  // Path does not denote a root directory, so
4768                       // remove trailing separator
4769         break;
4770       }
4771       end = dst;
4772     } else {
4773       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4774         *dst++ = *src++;
4775         if (*src) *dst++ = *src++;
4776         end = dst;
4777       } else {  // Copy a single-byte character
4778         char c = *src++;
4779         *dst++ = c;
4780         // Space is not a legal ending character
4781         if (c != ' ') end = dst;
4782       }
4783     }
4784   }
4785 
4786   *end = '\0';
4787 
4788   // For "z:", add "." to work around a bug in the C runtime library
4789   if (colon == dst - 1) {
4790     path[2] = '.';
4791     path[3] = '\0';
4792   }
4793 
4794   return path;
4795 }
4796 
4797 // This code is a copy of JDK's sysSetLength
4798 // from src/windows/hpi/src/sys_api_md.c
4799 
4800 int os::ftruncate(int fd, jlong length) {
4801   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4802   long high = (long)(length >> 32);
4803   DWORD ret;
4804 
4805   if (h == (HANDLE)(-1)) {
4806     return -1;
4807   }
4808 
4809   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4810   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4811     return -1;
4812   }
4813 
4814   if (::SetEndOfFile(h) == FALSE) {
4815     return -1;
4816   }
4817 
4818   return 0;
4819 }
4820 
4821 int os::get_fileno(FILE* fp) {
4822   return _fileno(fp);
4823 }
4824 
4825 // This code is a copy of JDK's sysSync
4826 // from src/windows/hpi/src/sys_api_md.c
4827 // except for the legacy workaround for a bug in Win 98
4828 
4829 int os::fsync(int fd) {
4830   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4831 
4832   if ((!::FlushFileBuffers(handle)) &&
4833       (GetLastError() != ERROR_ACCESS_DENIED)) {
4834     // from winerror.h
4835     return -1;
4836   }
4837   return 0;
4838 }
4839 
4840 static int nonSeekAvailable(int, long *);
4841 static int stdinAvailable(int, long *);
4842 
4843 // This code is a copy of JDK's sysAvailable
4844 // from src/windows/hpi/src/sys_api_md.c
4845 
4846 int os::available(int fd, jlong *bytes) {
4847   jlong cur, end;
4848   struct _stati64 stbuf64;
4849 
4850   if (::_fstati64(fd, &stbuf64) >= 0) {
4851     int mode = stbuf64.st_mode;
4852     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4853       int ret;
4854       long lpbytes;
4855       if (fd == 0) {
4856         ret = stdinAvailable(fd, &lpbytes);
4857       } else {
4858         ret = nonSeekAvailable(fd, &lpbytes);
4859       }
4860       (*bytes) = (jlong)(lpbytes);
4861       return ret;
4862     }
4863     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4864       return FALSE;
4865     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4866       return FALSE;
4867     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4868       return FALSE;
4869     }
4870     *bytes = end - cur;
4871     return TRUE;
4872   } else {
4873     return FALSE;
4874   }
4875 }
4876 
4877 void os::flockfile(FILE* fp) {
4878   _lock_file(fp);
4879 }
4880 
4881 void os::funlockfile(FILE* fp) {
4882   _unlock_file(fp);
4883 }
4884 
4885 // This code is a copy of JDK's nonSeekAvailable
4886 // from src/windows/hpi/src/sys_api_md.c
4887 
4888 static int nonSeekAvailable(int fd, long *pbytes) {
4889   // This is used for available on non-seekable devices
4890   // (like both named and anonymous pipes, such as pipes
4891   //  connected to an exec'd process).
4892   // Standard Input is a special case.
4893   HANDLE han;
4894 
4895   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4896     return FALSE;
4897   }
4898 
4899   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4900     // PeekNamedPipe fails when at EOF.  In that case we
4901     // simply make *pbytes = 0 which is consistent with the
4902     // behavior we get on Solaris when an fd is at EOF.
4903     // The only alternative is to raise an Exception,
4904     // which isn't really warranted.
4905     //
4906     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4907       return FALSE;
4908     }
4909     *pbytes = 0;
4910   }
4911   return TRUE;
4912 }
4913 
4914 #define MAX_INPUT_EVENTS 2000
4915 
4916 // This code is a copy of JDK's stdinAvailable
4917 // from src/windows/hpi/src/sys_api_md.c
4918 
4919 static int stdinAvailable(int fd, long *pbytes) {
4920   HANDLE han;
4921   DWORD numEventsRead = 0;  // Number of events read from buffer
4922   DWORD numEvents = 0;      // Number of events in buffer
4923   DWORD i = 0;              // Loop index
4924   DWORD curLength = 0;      // Position marker
4925   DWORD actualLength = 0;   // Number of bytes readable
4926   BOOL error = FALSE;       // Error holder
4927   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4928 
4929   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4930     return FALSE;
4931   }
4932 
4933   // Construct an array of input records in the console buffer
4934   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4935   if (error == 0) {
4936     return nonSeekAvailable(fd, pbytes);
4937   }
4938 
4939   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4940   if (numEvents > MAX_INPUT_EVENTS) {
4941     numEvents = MAX_INPUT_EVENTS;
4942   }
4943 
4944   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4945   if (lpBuffer == NULL) {
4946     return FALSE;
4947   }
4948 
4949   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4950   if (error == 0) {
4951     os::free(lpBuffer);
4952     return FALSE;
4953   }
4954 
4955   // Examine input records for the number of bytes available
4956   for (i=0; i<numEvents; i++) {
4957     if (lpBuffer[i].EventType == KEY_EVENT) {
4958 
4959       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4960                                       &(lpBuffer[i].Event);
4961       if (keyRecord->bKeyDown == TRUE) {
4962         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4963         curLength++;
4964         if (*keyPressed == '\r') {
4965           actualLength = curLength;
4966         }
4967       }
4968     }
4969   }
4970 
4971   if (lpBuffer != NULL) {
4972     os::free(lpBuffer);
4973   }
4974 
4975   *pbytes = (long) actualLength;
4976   return TRUE;
4977 }
4978 
4979 // Map a block of memory.
4980 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4981                         char *addr, size_t bytes, bool read_only,
4982                         bool allow_exec) {
4983   HANDLE hFile;
4984   char* base;
4985 
4986   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4987                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4988   if (hFile == INVALID_HANDLE_VALUE) {
4989     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4990     return NULL;
4991   }
4992 
4993   if (allow_exec) {
4994     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4995     // unless it comes from a PE image (which the shared archive is not.)
4996     // Even VirtualProtect refuses to give execute access to mapped memory
4997     // that was not previously executable.
4998     //
4999     // Instead, stick the executable region in anonymous memory.  Yuck.
5000     // Penalty is that ~4 pages will not be shareable - in the future
5001     // we might consider DLLizing the shared archive with a proper PE
5002     // header so that mapping executable + sharing is possible.
5003 
5004     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5005                                 PAGE_READWRITE);
5006     if (base == NULL) {
5007       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
5008       CloseHandle(hFile);
5009       return NULL;
5010     }
5011 
5012     // Record virtual memory allocation
5013     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5014 
5015     DWORD bytes_read;
5016     OVERLAPPED overlapped;
5017     overlapped.Offset = (DWORD)file_offset;
5018     overlapped.OffsetHigh = 0;
5019     overlapped.hEvent = NULL;
5020     // ReadFile guarantees that if the return value is true, the requested
5021     // number of bytes were read before returning.
5022     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5023     if (!res) {
5024       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5025       release_memory(base, bytes);
5026       CloseHandle(hFile);
5027       return NULL;
5028     }
5029   } else {
5030     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5031                                     NULL /* file_name */);
5032     if (hMap == NULL) {
5033       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5034       CloseHandle(hFile);
5035       return NULL;
5036     }
5037 
5038     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5039     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5040                                   (DWORD)bytes, addr);
5041     if (base == NULL) {
5042       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
5043       CloseHandle(hMap);
5044       CloseHandle(hFile);
5045       return NULL;
5046     }
5047 
5048     if (CloseHandle(hMap) == 0) {
5049       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5050       CloseHandle(hFile);
5051       return base;
5052     }
5053   }
5054 
5055   if (allow_exec) {
5056     DWORD old_protect;
5057     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5058     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5059 
5060     if (!res) {
5061       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5062       // Don't consider this a hard error, on IA32 even if the
5063       // VirtualProtect fails, we should still be able to execute
5064       CloseHandle(hFile);
5065       return base;
5066     }
5067   }
5068 
5069   if (CloseHandle(hFile) == 0) {
5070     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5071     return base;
5072   }
5073 
5074   return base;
5075 }
5076 
5077 
5078 // Remap a block of memory.
5079 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5080                           char *addr, size_t bytes, bool read_only,
5081                           bool allow_exec) {
5082   // This OS does not allow existing memory maps to be remapped so we
5083   // would have to unmap the memory before we remap it.
5084 
5085   // Because there is a small window between unmapping memory and mapping
5086   // it in again with different protections, CDS archives are mapped RW
5087   // on windows, so this function isn't called.
5088   ShouldNotReachHere();
5089   return NULL;
5090 }
5091 
5092 
5093 // Unmap a block of memory.
5094 // Returns true=success, otherwise false.
5095 
5096 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5097   MEMORY_BASIC_INFORMATION mem_info;
5098   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5099     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5100     return false;
5101   }
5102 
5103   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5104   // Instead, executable region was allocated using VirtualAlloc(). See
5105   // pd_map_memory() above.
5106   //
5107   // The following flags should match the 'exec_access' flages used for
5108   // VirtualProtect() in pd_map_memory().
5109   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5110       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5111     return pd_release_memory(addr, bytes);
5112   }
5113 
5114   BOOL result = UnmapViewOfFile(addr);
5115   if (result == 0) {
5116     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5117     return false;
5118   }
5119   return true;
5120 }
5121 
5122 void os::pause() {
5123   char filename[MAX_PATH];
5124   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5125     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5126   } else {
5127     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5128   }
5129 
5130   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5131   if (fd != -1) {
5132     struct stat buf;
5133     ::close(fd);
5134     while (::stat(filename, &buf) == 0) {
5135       Sleep(100);
5136     }
5137   } else {
5138     jio_fprintf(stderr,
5139                 "Could not open pause file '%s', continuing immediately.\n", filename);
5140   }
5141 }
5142 
5143 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5144 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5145 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5146 
5147 os::ThreadCrashProtection::ThreadCrashProtection() {
5148 }
5149 
5150 // See the caveats for this class in os_windows.hpp
5151 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5152 // into this method and returns false. If no OS EXCEPTION was raised, returns
5153 // true.
5154 // The callback is supposed to provide the method that should be protected.
5155 //
5156 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5157 
5158   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5159 
5160   _protected_thread = Thread::current_or_null();
5161   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5162 
5163   bool success = true;
5164   __try {
5165     _crash_protection = this;
5166     cb.call();
5167   } __except(EXCEPTION_EXECUTE_HANDLER) {
5168     // only for protection, nothing to do
5169     success = false;
5170   }
5171   _crash_protection = NULL;
5172   _protected_thread = NULL;
5173   Thread::muxRelease(&_crash_mux);
5174   return success;
5175 }
5176 
5177 
5178 class HighResolutionInterval : public CHeapObj<mtThread> {
5179   // The default timer resolution seems to be 10 milliseconds.
5180   // (Where is this written down?)
5181   // If someone wants to sleep for only a fraction of the default,
5182   // then we set the timer resolution down to 1 millisecond for
5183   // the duration of their interval.
5184   // We carefully set the resolution back, since otherwise we
5185   // seem to incur an overhead (3%?) that we don't need.
5186   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5187   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5188   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5189   // timeBeginPeriod() if the relative error exceeded some threshold.
5190   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5191   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5192   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5193   // resolution timers running.
5194  private:
5195   jlong resolution;
5196  public:
5197   HighResolutionInterval(jlong ms) {
5198     resolution = ms % 10L;
5199     if (resolution != 0) {
5200       MMRESULT result = timeBeginPeriod(1L);
5201     }
5202   }
5203   ~HighResolutionInterval() {
5204     if (resolution != 0) {
5205       MMRESULT result = timeEndPeriod(1L);
5206     }
5207     resolution = 0L;
5208   }
5209 };
5210 
5211 // An Event wraps a win32 "CreateEvent" kernel handle.
5212 //
5213 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5214 //
5215 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5216 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5217 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5218 //     In addition, an unpark() operation might fetch the handle field, but the
5219 //     event could recycle between the fetch and the SetEvent() operation.
5220 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5221 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5222 //     on an stale but recycled handle would be harmless, but in practice this might
5223 //     confuse other non-Sun code, so it's not a viable approach.
5224 //
5225 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5226 //     with the Event.  The event handle is never closed.  This could be construed
5227 //     as handle leakage, but only up to the maximum # of threads that have been extant
5228 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5229 //     permit a process to have hundreds of thousands of open handles.
5230 //
5231 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5232 //     and release unused handles.
5233 //
5234 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5235 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5236 //
5237 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5238 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5239 //
5240 // We use (2).
5241 //
5242 // TODO-FIXME:
5243 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5244 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5245 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5246 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5247 //     into a single win32 CreateEvent() handle.
5248 //
5249 // Assumption:
5250 //    Only one parker can exist on an event, which is why we allocate
5251 //    them per-thread. Multiple unparkers can coexist.
5252 //
5253 // _Event transitions in park()
5254 //   -1 => -1 : illegal
5255 //    1 =>  0 : pass - return immediately
5256 //    0 => -1 : block; then set _Event to 0 before returning
5257 //
5258 // _Event transitions in unpark()
5259 //    0 => 1 : just return
5260 //    1 => 1 : just return
5261 //   -1 => either 0 or 1; must signal target thread
5262 //         That is, we can safely transition _Event from -1 to either
5263 //         0 or 1.
5264 //
5265 // _Event serves as a restricted-range semaphore.
5266 //   -1 : thread is blocked, i.e. there is a waiter
5267 //    0 : neutral: thread is running or ready,
5268 //        could have been signaled after a wait started
5269 //    1 : signaled - thread is running or ready
5270 //
5271 // Another possible encoding of _Event would be with
5272 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5273 //
5274 
5275 int os::PlatformEvent::park(jlong Millis) {
5276   // Transitions for _Event:
5277   //   -1 => -1 : illegal
5278   //    1 =>  0 : pass - return immediately
5279   //    0 => -1 : block; then set _Event to 0 before returning
5280 
5281   guarantee(_ParkHandle != NULL , "Invariant");
5282   guarantee(Millis > 0          , "Invariant");
5283 
5284   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5285   // the initial park() operation.
5286   // Consider: use atomic decrement instead of CAS-loop
5287 
5288   int v;
5289   for (;;) {
5290     v = _Event;
5291     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5292   }
5293   guarantee((v == 0) || (v == 1), "invariant");
5294   if (v != 0) return OS_OK;
5295 
5296   // Do this the hard way by blocking ...
5297   // TODO: consider a brief spin here, gated on the success of recent
5298   // spin attempts by this thread.
5299   //
5300   // We decompose long timeouts into series of shorter timed waits.
5301   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5302   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5303   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5304   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5305   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5306   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5307   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5308   // for the already waited time.  This policy does not admit any new outcomes.
5309   // In the future, however, we might want to track the accumulated wait time and
5310   // adjust Millis accordingly if we encounter a spurious wakeup.
5311 
5312   const int MAXTIMEOUT = 0x10000000;
5313   DWORD rv = WAIT_TIMEOUT;
5314   while (_Event < 0 && Millis > 0) {
5315     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5316     if (Millis > MAXTIMEOUT) {
5317       prd = MAXTIMEOUT;
5318     }
5319     HighResolutionInterval *phri = NULL;
5320     if (!ForceTimeHighResolution) {
5321       phri = new HighResolutionInterval(prd);
5322     }
5323     rv = ::WaitForSingleObject(_ParkHandle, prd);
5324     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5325     if (rv == WAIT_TIMEOUT) {
5326       Millis -= prd;
5327     }
5328     delete phri; // if it is NULL, harmless
5329   }
5330   v = _Event;
5331   _Event = 0;
5332   // see comment at end of os::PlatformEvent::park() below:
5333   OrderAccess::fence();
5334   // If we encounter a nearly simultanous timeout expiry and unpark()
5335   // we return OS_OK indicating we awoke via unpark().
5336   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5337   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5338 }
5339 
5340 void os::PlatformEvent::park() {
5341   // Transitions for _Event:
5342   //   -1 => -1 : illegal
5343   //    1 =>  0 : pass - return immediately
5344   //    0 => -1 : block; then set _Event to 0 before returning
5345 
5346   guarantee(_ParkHandle != NULL, "Invariant");
5347   // Invariant: Only the thread associated with the Event/PlatformEvent
5348   // may call park().
5349   // Consider: use atomic decrement instead of CAS-loop
5350   int v;
5351   for (;;) {
5352     v = _Event;
5353     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5354   }
5355   guarantee((v == 0) || (v == 1), "invariant");
5356   if (v != 0) return;
5357 
5358   // Do this the hard way by blocking ...
5359   // TODO: consider a brief spin here, gated on the success of recent
5360   // spin attempts by this thread.
5361   while (_Event < 0) {
5362     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5363     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5364   }
5365 
5366   // Usually we'll find _Event == 0 at this point, but as
5367   // an optional optimization we clear it, just in case can
5368   // multiple unpark() operations drove _Event up to 1.
5369   _Event = 0;
5370   OrderAccess::fence();
5371   guarantee(_Event >= 0, "invariant");
5372 }
5373 
5374 void os::PlatformEvent::unpark() {
5375   guarantee(_ParkHandle != NULL, "Invariant");
5376 
5377   // Transitions for _Event:
5378   //    0 => 1 : just return
5379   //    1 => 1 : just return
5380   //   -1 => either 0 or 1; must signal target thread
5381   //         That is, we can safely transition _Event from -1 to either
5382   //         0 or 1.
5383   // See also: "Semaphores in Plan 9" by Mullender & Cox
5384   //
5385   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5386   // that it will take two back-to-back park() calls for the owning
5387   // thread to block. This has the benefit of forcing a spurious return
5388   // from the first park() call after an unpark() call which will help
5389   // shake out uses of park() and unpark() without condition variables.
5390 
5391   if (Atomic::xchg(&_Event, 1) >= 0) return;
5392 
5393   ::SetEvent(_ParkHandle);
5394 }
5395 
5396 
5397 // JSR166
5398 // -------------------------------------------------------
5399 
5400 // The Windows implementation of Park is very straightforward: Basic
5401 // operations on Win32 Events turn out to have the right semantics to
5402 // use them directly. We opportunistically resuse the event inherited
5403 // from Monitor.
5404 
5405 void Parker::park(bool isAbsolute, jlong time) {
5406   guarantee(_ParkEvent != NULL, "invariant");
5407   // First, demultiplex/decode time arguments
5408   if (time < 0) { // don't wait
5409     return;
5410   } else if (time == 0 && !isAbsolute) {
5411     time = INFINITE;
5412   } else if (isAbsolute) {
5413     time -= os::javaTimeMillis(); // convert to relative time
5414     if (time <= 0) {  // already elapsed
5415       return;
5416     }
5417   } else { // relative
5418     time /= 1000000;  // Must coarsen from nanos to millis
5419     if (time == 0) {  // Wait for the minimal time unit if zero
5420       time = 1;
5421     }
5422   }
5423 
5424   JavaThread* thread = JavaThread::current();
5425 
5426   // Don't wait if interrupted or already triggered
5427   if (thread->is_interrupted(false) ||
5428       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5429     ResetEvent(_ParkEvent);
5430     return;
5431   } else {
5432     ThreadBlockInVM tbivm(thread);
5433     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5434     thread->set_suspend_equivalent();
5435 
5436     WaitForSingleObject(_ParkEvent, time);
5437     ResetEvent(_ParkEvent);
5438 
5439     // If externally suspended while waiting, re-suspend
5440     if (thread->handle_special_suspend_equivalent_condition()) {
5441       thread->java_suspend_self();
5442     }
5443   }
5444 }
5445 
5446 void Parker::unpark() {
5447   guarantee(_ParkEvent != NULL, "invariant");
5448   SetEvent(_ParkEvent);
5449 }
5450 
5451 // Platform Monitor implementation
5452 
5453 // Must already be locked
5454 int os::PlatformMonitor::wait(jlong millis) {
5455   assert(millis >= 0, "negative timeout");
5456   int ret = OS_TIMEOUT;
5457   int status = SleepConditionVariableCS(&_cond, &_mutex,
5458                                         millis == 0 ? INFINITE : millis);
5459   if (status != 0) {
5460     ret = OS_OK;
5461   }
5462   #ifndef PRODUCT
5463   else {
5464     DWORD err = GetLastError();
5465     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5466   }
5467   #endif
5468   return ret;
5469 }
5470 
5471 // Run the specified command in a separate process. Return its exit value,
5472 // or -1 on failure (e.g. can't create a new process).
5473 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5474   STARTUPINFO si;
5475   PROCESS_INFORMATION pi;
5476   DWORD exit_code;
5477 
5478   char * cmd_string;
5479   const char * cmd_prefix = "cmd /C ";
5480   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5481   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5482   if (cmd_string == NULL) {
5483     return -1;
5484   }
5485   cmd_string[0] = '\0';
5486   strcat(cmd_string, cmd_prefix);
5487   strcat(cmd_string, cmd);
5488 
5489   // now replace all '\n' with '&'
5490   char * substring = cmd_string;
5491   while ((substring = strchr(substring, '\n')) != NULL) {
5492     substring[0] = '&';
5493     substring++;
5494   }
5495   memset(&si, 0, sizeof(si));
5496   si.cb = sizeof(si);
5497   memset(&pi, 0, sizeof(pi));
5498   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5499                             cmd_string,    // command line
5500                             NULL,   // process security attribute
5501                             NULL,   // thread security attribute
5502                             TRUE,   // inherits system handles
5503                             0,      // no creation flags
5504                             NULL,   // use parent's environment block
5505                             NULL,   // use parent's starting directory
5506                             &si,    // (in) startup information
5507                             &pi);   // (out) process information
5508 
5509   if (rslt) {
5510     // Wait until child process exits.
5511     WaitForSingleObject(pi.hProcess, INFINITE);
5512 
5513     GetExitCodeProcess(pi.hProcess, &exit_code);
5514 
5515     // Close process and thread handles.
5516     CloseHandle(pi.hProcess);
5517     CloseHandle(pi.hThread);
5518   } else {
5519     exit_code = -1;
5520   }
5521 
5522   FREE_C_HEAP_ARRAY(char, cmd_string);
5523   return (int)exit_code;
5524 }
5525 
5526 bool os::find(address addr, outputStream* st) {
5527   int offset = -1;
5528   bool result = false;
5529   char buf[256];
5530   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5531     st->print(PTR_FORMAT " ", addr);
5532     if (strlen(buf) < sizeof(buf) - 1) {
5533       char* p = strrchr(buf, '\\');
5534       if (p) {
5535         st->print("%s", p + 1);
5536       } else {
5537         st->print("%s", buf);
5538       }
5539     } else {
5540         // The library name is probably truncated. Let's omit the library name.
5541         // See also JDK-8147512.
5542     }
5543     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5544       st->print("::%s + 0x%x", buf, offset);
5545     }
5546     st->cr();
5547     result = true;
5548   }
5549   return result;
5550 }
5551 
5552 static jint initSock() {
5553   WSADATA wsadata;
5554 
5555   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5556     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5557                 ::GetLastError());
5558     return JNI_ERR;
5559   }
5560   return JNI_OK;
5561 }
5562 
5563 struct hostent* os::get_host_by_name(char* name) {
5564   return (struct hostent*)gethostbyname(name);
5565 }
5566 
5567 int os::socket_close(int fd) {
5568   return ::closesocket(fd);
5569 }
5570 
5571 int os::socket(int domain, int type, int protocol) {
5572   return ::socket(domain, type, protocol);
5573 }
5574 
5575 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5576   return ::connect(fd, him, len);
5577 }
5578 
5579 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5580   return ::recv(fd, buf, (int)nBytes, flags);
5581 }
5582 
5583 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5584   return ::send(fd, buf, (int)nBytes, flags);
5585 }
5586 
5587 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5588   return ::send(fd, buf, (int)nBytes, flags);
5589 }
5590 
5591 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5592 #if defined(IA32)
5593   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5594 #elif defined (AMD64)
5595   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5596 #endif
5597 
5598 // returns true if thread could be suspended,
5599 // false otherwise
5600 static bool do_suspend(HANDLE* h) {
5601   if (h != NULL) {
5602     if (SuspendThread(*h) != ~0) {
5603       return true;
5604     }
5605   }
5606   return false;
5607 }
5608 
5609 // resume the thread
5610 // calling resume on an active thread is a no-op
5611 static void do_resume(HANDLE* h) {
5612   if (h != NULL) {
5613     ResumeThread(*h);
5614   }
5615 }
5616 
5617 // retrieve a suspend/resume context capable handle
5618 // from the tid. Caller validates handle return value.
5619 void get_thread_handle_for_extended_context(HANDLE* h,
5620                                             OSThread::thread_id_t tid) {
5621   if (h != NULL) {
5622     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5623   }
5624 }
5625 
5626 // Thread sampling implementation
5627 //
5628 void os::SuspendedThreadTask::internal_do_task() {
5629   CONTEXT    ctxt;
5630   HANDLE     h = NULL;
5631 
5632   // get context capable handle for thread
5633   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5634 
5635   // sanity
5636   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5637     return;
5638   }
5639 
5640   // suspend the thread
5641   if (do_suspend(&h)) {
5642     ctxt.ContextFlags = sampling_context_flags;
5643     // get thread context
5644     GetThreadContext(h, &ctxt);
5645     SuspendedThreadTaskContext context(_thread, &ctxt);
5646     // pass context to Thread Sampling impl
5647     do_task(context);
5648     // resume thread
5649     do_resume(&h);
5650   }
5651 
5652   // close handle
5653   CloseHandle(h);
5654 }
5655 
5656 bool os::start_debugging(char *buf, int buflen) {
5657   int len = (int)strlen(buf);
5658   char *p = &buf[len];
5659 
5660   jio_snprintf(p, buflen-len,
5661              "\n\n"
5662              "Do you want to debug the problem?\n\n"
5663              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5664              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5665              "Otherwise, select 'No' to abort...",
5666              os::current_process_id(), os::current_thread_id());
5667 
5668   bool yes = os::message_box("Unexpected Error", buf);
5669 
5670   if (yes) {
5671     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5672     // exception. If VM is running inside a debugger, the debugger will
5673     // catch the exception. Otherwise, the breakpoint exception will reach
5674     // the default windows exception handler, which can spawn a debugger and
5675     // automatically attach to the dying VM.
5676     os::breakpoint();
5677     yes = false;
5678   }
5679   return yes;
5680 }
5681 
5682 void* os::get_default_process_handle() {
5683   return (void*)GetModuleHandle(NULL);
5684 }
5685 
5686 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5687 // which is used to find statically linked in agents.
5688 // Additionally for windows, takes into account __stdcall names.
5689 // Parameters:
5690 //            sym_name: Symbol in library we are looking for
5691 //            lib_name: Name of library to look in, NULL for shared libs.
5692 //            is_absolute_path == true if lib_name is absolute path to agent
5693 //                                     such as "C:/a/b/L.dll"
5694 //            == false if only the base name of the library is passed in
5695 //               such as "L"
5696 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5697                                     bool is_absolute_path) {
5698   char *agent_entry_name;
5699   size_t len;
5700   size_t name_len;
5701   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5702   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5703   const char *start;
5704 
5705   if (lib_name != NULL) {
5706     len = name_len = strlen(lib_name);
5707     if (is_absolute_path) {
5708       // Need to strip path, prefix and suffix
5709       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5710         lib_name = ++start;
5711       } else {
5712         // Need to check for drive prefix
5713         if ((start = strchr(lib_name, ':')) != NULL) {
5714           lib_name = ++start;
5715         }
5716       }
5717       if (len <= (prefix_len + suffix_len)) {
5718         return NULL;
5719       }
5720       lib_name += prefix_len;
5721       name_len = strlen(lib_name) - suffix_len;
5722     }
5723   }
5724   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5725   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5726   if (agent_entry_name == NULL) {
5727     return NULL;
5728   }
5729   if (lib_name != NULL) {
5730     const char *p = strrchr(sym_name, '@');
5731     if (p != NULL && p != sym_name) {
5732       // sym_name == _Agent_OnLoad@XX
5733       strncpy(agent_entry_name, sym_name, (p - sym_name));
5734       agent_entry_name[(p-sym_name)] = '\0';
5735       // agent_entry_name == _Agent_OnLoad
5736       strcat(agent_entry_name, "_");
5737       strncat(agent_entry_name, lib_name, name_len);
5738       strcat(agent_entry_name, p);
5739       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5740     } else {
5741       strcpy(agent_entry_name, sym_name);
5742       strcat(agent_entry_name, "_");
5743       strncat(agent_entry_name, lib_name, name_len);
5744     }
5745   } else {
5746     strcpy(agent_entry_name, sym_name);
5747   }
5748   return agent_entry_name;
5749 }
5750 
5751 #ifndef PRODUCT
5752 
5753 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5754 // contiguous memory block at a particular address.
5755 // The test first tries to find a good approximate address to allocate at by using the same
5756 // method to allocate some memory at any address. The test then tries to allocate memory in
5757 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5758 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5759 // the previously allocated memory is available for allocation. The only actual failure
5760 // that is reported is when the test tries to allocate at a particular location but gets a
5761 // different valid one. A NULL return value at this point is not considered an error but may
5762 // be legitimate.
5763 void TestReserveMemorySpecial_test() {
5764   if (!UseLargePages) {
5765     return;
5766   }
5767   // save current value of globals
5768   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5769   bool old_use_numa_interleaving = UseNUMAInterleaving;
5770 
5771   // set globals to make sure we hit the correct code path
5772   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5773 
5774   // do an allocation at an address selected by the OS to get a good one.
5775   const size_t large_allocation_size = os::large_page_size() * 4;
5776   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5777   if (result == NULL) {
5778   } else {
5779     os::release_memory_special(result, large_allocation_size);
5780 
5781     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5782     // we managed to get it once.
5783     const size_t expected_allocation_size = os::large_page_size();
5784     char* expected_location = result + os::large_page_size();
5785     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5786     if (actual_location == NULL) {
5787     } else {
5788       // release memory
5789       os::release_memory_special(actual_location, expected_allocation_size);
5790       // only now check, after releasing any memory to avoid any leaks.
5791       assert(actual_location == expected_location,
5792              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5793              expected_location, expected_allocation_size, actual_location);
5794     }
5795   }
5796 
5797   // restore globals
5798   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5799   UseNUMAInterleaving = old_use_numa_interleaving;
5800 }
5801 #endif // PRODUCT
5802 
5803 /*
5804   All the defined signal names for Windows.
5805 
5806   NOTE that not all of these names are accepted by FindSignal!
5807 
5808   For various reasons some of these may be rejected at runtime.
5809 
5810   Here are the names currently accepted by a user of sun.misc.Signal with
5811   1.4.1 (ignoring potential interaction with use of chaining, etc):
5812 
5813      (LIST TBD)
5814 
5815 */
5816 int os::get_signal_number(const char* name) {
5817   static const struct {
5818     const char* name;
5819     int         number;
5820   } siglabels [] =
5821     // derived from version 6.0 VC98/include/signal.h
5822   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5823   "FPE",        SIGFPE,         // floating point exception
5824   "SEGV",       SIGSEGV,        // segment violation
5825   "INT",        SIGINT,         // interrupt
5826   "TERM",       SIGTERM,        // software term signal from kill
5827   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5828   "ILL",        SIGILL};        // illegal instruction
5829   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5830     if (strcmp(name, siglabels[i].name) == 0) {
5831       return siglabels[i].number;
5832     }
5833   }
5834   return -1;
5835 }
5836 
5837 // Fast current thread access
5838 
5839 int os::win32::_thread_ptr_offset = 0;
5840 
5841 static void call_wrapper_dummy() {}
5842 
5843 // We need to call the os_exception_wrapper once so that it sets
5844 // up the offset from FS of the thread pointer.
5845 void os::win32::initialize_thread_ptr_offset() {
5846   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5847                            NULL, methodHandle(), NULL, NULL);
5848 }
5849 
5850 bool os::supports_map_sync() {
5851   return false;
5852 }