1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 
  85 #include <windows.h>
  86 #include <sys/types.h>
  87 #include <sys/stat.h>
  88 #include <sys/timeb.h>
  89 #include <objidl.h>
  90 #include <shlobj.h>
  91 
  92 #include <malloc.h>
  93 #include <signal.h>
  94 #include <direct.h>
  95 #include <errno.h>
  96 #include <fcntl.h>
  97 #include <io.h>
  98 #include <process.h>              // For _beginthreadex(), _endthreadex()
  99 #include <imagehlp.h>             // For os::dll_address_to_function_name
 100 // for enumerating dll libraries
 101 #include <vdmdbg.h>
 102 #include <psapi.h>
 103 #include <mmsystem.h>
 104 #include <winsock2.h>
 105 
 106 // for timer info max values which include all bits
 107 #define ALL_64_BITS CONST64(-1)
 108 
 109 // For DLL loading/load error detection
 110 // Values of PE COFF
 111 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 112 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 113 
 114 static HANDLE main_process;
 115 static HANDLE main_thread;
 116 static int    main_thread_id;
 117 
 118 static FILETIME process_creation_time;
 119 static FILETIME process_exit_time;
 120 static FILETIME process_user_time;
 121 static FILETIME process_kernel_time;
 122 
 123 #ifdef _M_AMD64
 124   #define __CPU__ amd64
 125 #else
 126   #define __CPU__ i486
 127 #endif
 128 
 129 #if INCLUDE_AOT
 130 PVOID  topLevelVectoredExceptionHandler = NULL;
 131 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 132 #endif
 133 
 134 // save DLL module handle, used by GetModuleFileName
 135 
 136 HINSTANCE vm_lib_handle;
 137 
 138 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 139   switch (reason) {
 140   case DLL_PROCESS_ATTACH:
 141     vm_lib_handle = hinst;
 142     if (ForceTimeHighResolution) {
 143       timeBeginPeriod(1L);
 144     }
 145     WindowsDbgHelp::pre_initialize();
 146     SymbolEngine::pre_initialize();
 147     break;
 148   case DLL_PROCESS_DETACH:
 149     if (ForceTimeHighResolution) {
 150       timeEndPeriod(1L);
 151     }
 152 #if INCLUDE_AOT
 153     if (topLevelVectoredExceptionHandler != NULL) {
 154       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 155       topLevelVectoredExceptionHandler = NULL;
 156     }
 157 #endif
 158     break;
 159   default:
 160     break;
 161   }
 162   return true;
 163 }
 164 
 165 static inline double fileTimeAsDouble(FILETIME* time) {
 166   const double high  = (double) ((unsigned int) ~0);
 167   const double split = 10000000.0;
 168   double result = (time->dwLowDateTime / split) +
 169                    time->dwHighDateTime * (high/split);
 170   return result;
 171 }
 172 
 173 // Implementation of os
 174 
 175 bool os::unsetenv(const char* name) {
 176   assert(name != NULL, "Null pointer");
 177   return (SetEnvironmentVariable(name, NULL) == TRUE);
 178 }
 179 
 180 // No setuid programs under Windows.
 181 bool os::have_special_privileges() {
 182   return false;
 183 }
 184 
 185 
 186 // This method is  a periodic task to check for misbehaving JNI applications
 187 // under CheckJNI, we can add any periodic checks here.
 188 // For Windows at the moment does nothing
 189 void os::run_periodic_checks() {
 190   return;
 191 }
 192 
 193 // previous UnhandledExceptionFilter, if there is one
 194 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 195 
 196 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 197 
 198 void os::init_system_properties_values() {
 199   // sysclasspath, java_home, dll_dir
 200   {
 201     char *home_path;
 202     char *dll_path;
 203     char *pslash;
 204     const char *bin = "\\bin";
 205     char home_dir[MAX_PATH + 1];
 206     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 207 
 208     if (alt_home_dir != NULL)  {
 209       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 210       home_dir[MAX_PATH] = '\0';
 211     } else {
 212       os::jvm_path(home_dir, sizeof(home_dir));
 213       // Found the full path to jvm.dll.
 214       // Now cut the path to <java_home>/jre if we can.
 215       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 216       pslash = strrchr(home_dir, '\\');
 217       if (pslash != NULL) {
 218         *pslash = '\0';                   // get rid of \{client|server}
 219         pslash = strrchr(home_dir, '\\');
 220         if (pslash != NULL) {
 221           *pslash = '\0';                 // get rid of \bin
 222         }
 223       }
 224     }
 225 
 226     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 227     if (home_path == NULL) {
 228       return;
 229     }
 230     strcpy(home_path, home_dir);
 231     Arguments::set_java_home(home_path);
 232     FREE_C_HEAP_ARRAY(char, home_path);
 233 
 234     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 235                                 mtInternal);
 236     if (dll_path == NULL) {
 237       return;
 238     }
 239     strcpy(dll_path, home_dir);
 240     strcat(dll_path, bin);
 241     Arguments::set_dll_dir(dll_path);
 242     FREE_C_HEAP_ARRAY(char, dll_path);
 243 
 244     if (!set_boot_path('\\', ';')) {
 245       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 246     }
 247   }
 248 
 249 // library_path
 250 #define EXT_DIR "\\lib\\ext"
 251 #define BIN_DIR "\\bin"
 252 #define PACKAGE_DIR "\\Sun\\Java"
 253   {
 254     // Win32 library search order (See the documentation for LoadLibrary):
 255     //
 256     // 1. The directory from which application is loaded.
 257     // 2. The system wide Java Extensions directory (Java only)
 258     // 3. System directory (GetSystemDirectory)
 259     // 4. Windows directory (GetWindowsDirectory)
 260     // 5. The PATH environment variable
 261     // 6. The current directory
 262 
 263     char *library_path;
 264     char tmp[MAX_PATH];
 265     char *path_str = ::getenv("PATH");
 266 
 267     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 268                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 269 
 270     library_path[0] = '\0';
 271 
 272     GetModuleFileName(NULL, tmp, sizeof(tmp));
 273     *(strrchr(tmp, '\\')) = '\0';
 274     strcat(library_path, tmp);
 275 
 276     GetWindowsDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279     strcat(library_path, PACKAGE_DIR BIN_DIR);
 280 
 281     GetSystemDirectory(tmp, sizeof(tmp));
 282     strcat(library_path, ";");
 283     strcat(library_path, tmp);
 284 
 285     GetWindowsDirectory(tmp, sizeof(tmp));
 286     strcat(library_path, ";");
 287     strcat(library_path, tmp);
 288 
 289     if (path_str) {
 290       strcat(library_path, ";");
 291       strcat(library_path, path_str);
 292     }
 293 
 294     strcat(library_path, ";.");
 295 
 296     Arguments::set_library_path(library_path);
 297     FREE_C_HEAP_ARRAY(char, library_path);
 298   }
 299 
 300   // Default extensions directory
 301   {
 302     char path[MAX_PATH];
 303     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 304     GetWindowsDirectory(path, MAX_PATH);
 305     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 306             path, PACKAGE_DIR, EXT_DIR);
 307     Arguments::set_ext_dirs(buf);
 308   }
 309   #undef EXT_DIR
 310   #undef BIN_DIR
 311   #undef PACKAGE_DIR
 312 
 313 #ifndef _WIN64
 314   // set our UnhandledExceptionFilter and save any previous one
 315   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 316 #endif
 317 
 318   // Done
 319   return;
 320 }
 321 
 322 void os::breakpoint() {
 323   DebugBreak();
 324 }
 325 
 326 // Invoked from the BREAKPOINT Macro
 327 extern "C" void breakpoint() {
 328   os::breakpoint();
 329 }
 330 
 331 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 332 // So far, this method is only used by Native Memory Tracking, which is
 333 // only supported on Windows XP or later.
 334 //
 335 int os::get_native_stack(address* stack, int frames, int toSkip) {
 336   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 337   for (int index = captured; index < frames; index ++) {
 338     stack[index] = NULL;
 339   }
 340   return captured;
 341 }
 342 
 343 
 344 // os::current_stack_base()
 345 //
 346 //   Returns the base of the stack, which is the stack's
 347 //   starting address.  This function must be called
 348 //   while running on the stack of the thread being queried.
 349 
 350 address os::current_stack_base() {
 351   MEMORY_BASIC_INFORMATION minfo;
 352   address stack_bottom;
 353   size_t stack_size;
 354 
 355   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 356   stack_bottom =  (address)minfo.AllocationBase;
 357   stack_size = minfo.RegionSize;
 358 
 359   // Add up the sizes of all the regions with the same
 360   // AllocationBase.
 361   while (1) {
 362     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 363     if (stack_bottom == (address)minfo.AllocationBase) {
 364       stack_size += minfo.RegionSize;
 365     } else {
 366       break;
 367     }
 368   }
 369   return stack_bottom + stack_size;
 370 }
 371 
 372 size_t os::current_stack_size() {
 373   size_t sz;
 374   MEMORY_BASIC_INFORMATION minfo;
 375   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 376   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 377   return sz;
 378 }
 379 
 380 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 381   MEMORY_BASIC_INFORMATION minfo;
 382   committed_start = NULL;
 383   committed_size = 0;
 384   address top = start + size;
 385   const address start_addr = start;
 386   while (start < top) {
 387     VirtualQuery(start, &minfo, sizeof(minfo));
 388     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 389       if (committed_start != NULL) {
 390         break;
 391       }
 392     } else {  // committed
 393       if (committed_start == NULL) {
 394         committed_start = start;
 395       }
 396       size_t offset = start - (address)minfo.BaseAddress;
 397       committed_size += minfo.RegionSize - offset;
 398     }
 399     start = (address)minfo.BaseAddress + minfo.RegionSize;
 400   }
 401 
 402   if (committed_start == NULL) {
 403     assert(committed_size == 0, "Sanity");
 404     return false;
 405   } else {
 406     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 407     // current region may go beyond the limit, trim to the limit
 408     committed_size = MIN2(committed_size, size_t(top - committed_start));
 409     return true;
 410   }
 411 }
 412 
 413 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 414   const struct tm* time_struct_ptr = localtime(clock);
 415   if (time_struct_ptr != NULL) {
 416     *res = *time_struct_ptr;
 417     return res;
 418   }
 419   return NULL;
 420 }
 421 
 422 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 423   const struct tm* time_struct_ptr = gmtime(clock);
 424   if (time_struct_ptr != NULL) {
 425     *res = *time_struct_ptr;
 426     return res;
 427   }
 428   return NULL;
 429 }
 430 
 431 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 432 
 433 // Thread start routine for all newly created threads
 434 static unsigned __stdcall thread_native_entry(Thread* thread) {
 435 
 436   thread->record_stack_base_and_size();
 437 
 438   // Try to randomize the cache line index of hot stack frames.
 439   // This helps when threads of the same stack traces evict each other's
 440   // cache lines. The threads can be either from the same JVM instance, or
 441   // from different JVM instances. The benefit is especially true for
 442   // processors with hyperthreading technology.
 443   static int counter = 0;
 444   int pid = os::current_process_id();
 445   _alloca(((pid ^ counter++) & 7) * 128);
 446 
 447   thread->initialize_thread_current();
 448 
 449   OSThread* osthr = thread->osthread();
 450   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 451 
 452   if (UseNUMA) {
 453     int lgrp_id = os::numa_get_group_id();
 454     if (lgrp_id != -1) {
 455       thread->set_lgrp_id(lgrp_id);
 456     }
 457   }
 458 
 459   // Diagnostic code to investigate JDK-6573254
 460   int res = 30115;  // non-java thread
 461   if (thread->is_Java_thread()) {
 462     res = 20115;    // java thread
 463   }
 464 
 465   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 466 
 467   // Install a win32 structured exception handler around every thread created
 468   // by VM, so VM can generate error dump when an exception occurred in non-
 469   // Java thread (e.g. VM thread).
 470   __try {
 471     thread->call_run();
 472   } __except(topLevelExceptionFilter(
 473                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 474     // Nothing to do.
 475   }
 476 
 477   // Note: at this point the thread object may already have deleted itself.
 478   // Do not dereference it from here on out.
 479 
 480   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 481 
 482   // One less thread is executing
 483   // When the VMThread gets here, the main thread may have already exited
 484   // which frees the CodeHeap containing the Atomic::add code
 485   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 486     Atomic::dec(&os::win32::_os_thread_count);
 487   }
 488 
 489   // Thread must not return from exit_process_or_thread(), but if it does,
 490   // let it proceed to exit normally
 491   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 492 }
 493 
 494 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 495                                   int thread_id) {
 496   // Allocate the OSThread object
 497   OSThread* osthread = new OSThread(NULL, NULL);
 498   if (osthread == NULL) return NULL;
 499 
 500   // Initialize the JDK library's interrupt event.
 501   // This should really be done when OSThread is constructed,
 502   // but there is no way for a constructor to report failure to
 503   // allocate the event.
 504   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 505   if (interrupt_event == NULL) {
 506     delete osthread;
 507     return NULL;
 508   }
 509   osthread->set_interrupt_event(interrupt_event);
 510 
 511   // Store info on the Win32 thread into the OSThread
 512   osthread->set_thread_handle(thread_handle);
 513   osthread->set_thread_id(thread_id);
 514 
 515   if (UseNUMA) {
 516     int lgrp_id = os::numa_get_group_id();
 517     if (lgrp_id != -1) {
 518       thread->set_lgrp_id(lgrp_id);
 519     }
 520   }
 521 
 522   // Initial thread state is INITIALIZED, not SUSPENDED
 523   osthread->set_state(INITIALIZED);
 524 
 525   return osthread;
 526 }
 527 
 528 
 529 bool os::create_attached_thread(JavaThread* thread) {
 530 #ifdef ASSERT
 531   thread->verify_not_published();
 532 #endif
 533   HANDLE thread_h;
 534   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 535                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 536     fatal("DuplicateHandle failed\n");
 537   }
 538   OSThread* osthread = create_os_thread(thread, thread_h,
 539                                         (int)current_thread_id());
 540   if (osthread == NULL) {
 541     return false;
 542   }
 543 
 544   // Initial thread state is RUNNABLE
 545   osthread->set_state(RUNNABLE);
 546 
 547   thread->set_osthread(osthread);
 548 
 549   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 550     os::current_thread_id());
 551 
 552   return true;
 553 }
 554 
 555 bool os::create_main_thread(JavaThread* thread) {
 556 #ifdef ASSERT
 557   thread->verify_not_published();
 558 #endif
 559   if (_starting_thread == NULL) {
 560     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 561     if (_starting_thread == NULL) {
 562       return false;
 563     }
 564   }
 565 
 566   // The primordial thread is runnable from the start)
 567   _starting_thread->set_state(RUNNABLE);
 568 
 569   thread->set_osthread(_starting_thread);
 570   return true;
 571 }
 572 
 573 // Helper function to trace _beginthreadex attributes,
 574 //  similar to os::Posix::describe_pthread_attr()
 575 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 576                                                size_t stacksize, unsigned initflag) {
 577   stringStream ss(buf, buflen);
 578   if (stacksize == 0) {
 579     ss.print("stacksize: default, ");
 580   } else {
 581     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 582   }
 583   ss.print("flags: ");
 584   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 585   #define ALL(X) \
 586     X(CREATE_SUSPENDED) \
 587     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 588   ALL(PRINT_FLAG)
 589   #undef ALL
 590   #undef PRINT_FLAG
 591   return buf;
 592 }
 593 
 594 // Allocate and initialize a new OSThread
 595 bool os::create_thread(Thread* thread, ThreadType thr_type,
 596                        size_t stack_size) {
 597   unsigned thread_id;
 598 
 599   // Allocate the OSThread object
 600   OSThread* osthread = new OSThread(NULL, NULL);
 601   if (osthread == NULL) {
 602     return false;
 603   }
 604 
 605   // Initialize the JDK library's interrupt event.
 606   // This should really be done when OSThread is constructed,
 607   // but there is no way for a constructor to report failure to
 608   // allocate the event.
 609   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 610   if (interrupt_event == NULL) {
 611     delete osthread;
 612     return false;
 613   }
 614   osthread->set_interrupt_event(interrupt_event);
 615   osthread->set_interrupted(false);
 616 
 617   thread->set_osthread(osthread);
 618 
 619   if (stack_size == 0) {
 620     switch (thr_type) {
 621     case os::java_thread:
 622       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 623       if (JavaThread::stack_size_at_create() > 0) {
 624         stack_size = JavaThread::stack_size_at_create();
 625       }
 626       break;
 627     case os::compiler_thread:
 628       if (CompilerThreadStackSize > 0) {
 629         stack_size = (size_t)(CompilerThreadStackSize * K);
 630         break;
 631       } // else fall through:
 632         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 633     case os::vm_thread:
 634     case os::pgc_thread:
 635     case os::cgc_thread:
 636     case os::watcher_thread:
 637       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 638       break;
 639     }
 640   }
 641 
 642   // Create the Win32 thread
 643   //
 644   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 645   // does not specify stack size. Instead, it specifies the size of
 646   // initially committed space. The stack size is determined by
 647   // PE header in the executable. If the committed "stack_size" is larger
 648   // than default value in the PE header, the stack is rounded up to the
 649   // nearest multiple of 1MB. For example if the launcher has default
 650   // stack size of 320k, specifying any size less than 320k does not
 651   // affect the actual stack size at all, it only affects the initial
 652   // commitment. On the other hand, specifying 'stack_size' larger than
 653   // default value may cause significant increase in memory usage, because
 654   // not only the stack space will be rounded up to MB, but also the
 655   // entire space is committed upfront.
 656   //
 657   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 658   // for CreateThread() that can treat 'stack_size' as stack size. However we
 659   // are not supposed to call CreateThread() directly according to MSDN
 660   // document because JVM uses C runtime library. The good news is that the
 661   // flag appears to work with _beginthredex() as well.
 662 
 663   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 664   HANDLE thread_handle =
 665     (HANDLE)_beginthreadex(NULL,
 666                            (unsigned)stack_size,
 667                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 668                            thread,
 669                            initflag,
 670                            &thread_id);
 671 
 672   char buf[64];
 673   if (thread_handle != NULL) {
 674     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 675       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 676   } else {
 677     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 678       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 679     // Log some OS information which might explain why creating the thread failed.
 680     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 681     LogStream st(Log(os, thread)::info());
 682     os::print_memory_info(&st);
 683   }
 684 
 685   if (thread_handle == NULL) {
 686     // Need to clean up stuff we've allocated so far
 687     CloseHandle(osthread->interrupt_event());
 688     thread->set_osthread(NULL);
 689     delete osthread;
 690     return false;
 691   }
 692 
 693   Atomic::inc(&os::win32::_os_thread_count);
 694 
 695   // Store info on the Win32 thread into the OSThread
 696   osthread->set_thread_handle(thread_handle);
 697   osthread->set_thread_id(thread_id);
 698 
 699   // Initial thread state is INITIALIZED, not SUSPENDED
 700   osthread->set_state(INITIALIZED);
 701 
 702   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 703   return true;
 704 }
 705 
 706 
 707 // Free Win32 resources related to the OSThread
 708 void os::free_thread(OSThread* osthread) {
 709   assert(osthread != NULL, "osthread not set");
 710 
 711   // We are told to free resources of the argument thread,
 712   // but we can only really operate on the current thread.
 713   assert(Thread::current()->osthread() == osthread,
 714          "os::free_thread but not current thread");
 715 
 716   CloseHandle(osthread->thread_handle());
 717   CloseHandle(osthread->interrupt_event());
 718   delete osthread;
 719 }
 720 
 721 static jlong first_filetime;
 722 static jlong initial_performance_count;
 723 static jlong performance_frequency;
 724 
 725 
 726 jlong as_long(LARGE_INTEGER x) {
 727   jlong result = 0; // initialization to avoid warning
 728   set_high(&result, x.HighPart);
 729   set_low(&result, x.LowPart);
 730   return result;
 731 }
 732 
 733 
 734 jlong os::elapsed_counter() {
 735   LARGE_INTEGER count;
 736   QueryPerformanceCounter(&count);
 737   return as_long(count) - initial_performance_count;
 738 }
 739 
 740 
 741 jlong os::elapsed_frequency() {
 742   return performance_frequency;
 743 }
 744 
 745 
 746 julong os::available_memory() {
 747   return win32::available_memory();
 748 }
 749 
 750 julong os::win32::available_memory() {
 751   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 752   // value if total memory is larger than 4GB
 753   MEMORYSTATUSEX ms;
 754   ms.dwLength = sizeof(ms);
 755   GlobalMemoryStatusEx(&ms);
 756 
 757   return (julong)ms.ullAvailPhys;
 758 }
 759 
 760 julong os::physical_memory() {
 761   return win32::physical_memory();
 762 }
 763 
 764 bool os::has_allocatable_memory_limit(julong* limit) {
 765   MEMORYSTATUSEX ms;
 766   ms.dwLength = sizeof(ms);
 767   GlobalMemoryStatusEx(&ms);
 768 #ifdef _LP64
 769   *limit = (julong)ms.ullAvailVirtual;
 770   return true;
 771 #else
 772   // Limit to 1400m because of the 2gb address space wall
 773   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 774   return true;
 775 #endif
 776 }
 777 
 778 int os::active_processor_count() {
 779   // User has overridden the number of active processors
 780   if (ActiveProcessorCount > 0) {
 781     log_trace(os)("active_processor_count: "
 782                   "active processor count set by user : %d",
 783                   ActiveProcessorCount);
 784     return ActiveProcessorCount;
 785   }
 786 
 787   DWORD_PTR lpProcessAffinityMask = 0;
 788   DWORD_PTR lpSystemAffinityMask = 0;
 789   int proc_count = processor_count();
 790   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 791       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 792     // Nof active processors is number of bits in process affinity mask
 793     int bitcount = 0;
 794     while (lpProcessAffinityMask != 0) {
 795       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 796       bitcount++;
 797     }
 798     return bitcount;
 799   } else {
 800     return proc_count;
 801   }
 802 }
 803 
 804 void os::set_native_thread_name(const char *name) {
 805 
 806   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 807   //
 808   // Note that unfortunately this only works if the process
 809   // is already attached to a debugger; debugger must observe
 810   // the exception below to show the correct name.
 811 
 812   // If there is no debugger attached skip raising the exception
 813   if (!IsDebuggerPresent()) {
 814     return;
 815   }
 816 
 817   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 818   struct {
 819     DWORD dwType;     // must be 0x1000
 820     LPCSTR szName;    // pointer to name (in user addr space)
 821     DWORD dwThreadID; // thread ID (-1=caller thread)
 822     DWORD dwFlags;    // reserved for future use, must be zero
 823   } info;
 824 
 825   info.dwType = 0x1000;
 826   info.szName = name;
 827   info.dwThreadID = -1;
 828   info.dwFlags = 0;
 829 
 830   __try {
 831     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 832   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 833 }
 834 
 835 bool os::distribute_processes(uint length, uint* distribution) {
 836   // Not yet implemented.
 837   return false;
 838 }
 839 
 840 bool os::bind_to_processor(uint processor_id) {
 841   // Not yet implemented.
 842   return false;
 843 }
 844 
 845 void os::win32::initialize_performance_counter() {
 846   LARGE_INTEGER count;
 847   QueryPerformanceFrequency(&count);
 848   performance_frequency = as_long(count);
 849   QueryPerformanceCounter(&count);
 850   initial_performance_count = as_long(count);
 851 }
 852 
 853 
 854 double os::elapsedTime() {
 855   return (double) elapsed_counter() / (double) elapsed_frequency();
 856 }
 857 
 858 
 859 // Windows format:
 860 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 861 // Java format:
 862 //   Java standards require the number of milliseconds since 1/1/1970
 863 
 864 // Constant offset - calculated using offset()
 865 static jlong  _offset   = 116444736000000000;
 866 // Fake time counter for reproducible results when debugging
 867 static jlong  fake_time = 0;
 868 
 869 #ifdef ASSERT
 870 // Just to be safe, recalculate the offset in debug mode
 871 static jlong _calculated_offset = 0;
 872 static int   _has_calculated_offset = 0;
 873 
 874 jlong offset() {
 875   if (_has_calculated_offset) return _calculated_offset;
 876   SYSTEMTIME java_origin;
 877   java_origin.wYear          = 1970;
 878   java_origin.wMonth         = 1;
 879   java_origin.wDayOfWeek     = 0; // ignored
 880   java_origin.wDay           = 1;
 881   java_origin.wHour          = 0;
 882   java_origin.wMinute        = 0;
 883   java_origin.wSecond        = 0;
 884   java_origin.wMilliseconds  = 0;
 885   FILETIME jot;
 886   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 887     fatal("Error = %d\nWindows error", GetLastError());
 888   }
 889   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 890   _has_calculated_offset = 1;
 891   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 892   return _calculated_offset;
 893 }
 894 #else
 895 jlong offset() {
 896   return _offset;
 897 }
 898 #endif
 899 
 900 jlong windows_to_java_time(FILETIME wt) {
 901   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 902   return (a - offset()) / 10000;
 903 }
 904 
 905 // Returns time ticks in (10th of micro seconds)
 906 jlong windows_to_time_ticks(FILETIME wt) {
 907   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 908   return (a - offset());
 909 }
 910 
 911 FILETIME java_to_windows_time(jlong l) {
 912   jlong a = (l * 10000) + offset();
 913   FILETIME result;
 914   result.dwHighDateTime = high(a);
 915   result.dwLowDateTime  = low(a);
 916   return result;
 917 }
 918 
 919 bool os::supports_vtime() { return true; }
 920 bool os::enable_vtime() { return false; }
 921 bool os::vtime_enabled() { return false; }
 922 
 923 double os::elapsedVTime() {
 924   FILETIME created;
 925   FILETIME exited;
 926   FILETIME kernel;
 927   FILETIME user;
 928   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 929     // the resolution of windows_to_java_time() should be sufficient (ms)
 930     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 931   } else {
 932     return elapsedTime();
 933   }
 934 }
 935 
 936 jlong os::javaTimeMillis() {
 937   FILETIME wt;
 938   GetSystemTimeAsFileTime(&wt);
 939   return windows_to_java_time(wt);
 940 }
 941 
 942 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 943   FILETIME wt;
 944   GetSystemTimeAsFileTime(&wt);
 945   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 946   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 947   seconds = secs;
 948   nanos = jlong(ticks - (secs*10000000)) * 100;
 949 }
 950 
 951 jlong os::javaTimeNanos() {
 952     LARGE_INTEGER current_count;
 953     QueryPerformanceCounter(&current_count);
 954     double current = as_long(current_count);
 955     double freq = performance_frequency;
 956     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 957     return time;
 958 }
 959 
 960 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 961   jlong freq = performance_frequency;
 962   if (freq < NANOSECS_PER_SEC) {
 963     // the performance counter is 64 bits and we will
 964     // be multiplying it -- so no wrap in 64 bits
 965     info_ptr->max_value = ALL_64_BITS;
 966   } else if (freq > NANOSECS_PER_SEC) {
 967     // use the max value the counter can reach to
 968     // determine the max value which could be returned
 969     julong max_counter = (julong)ALL_64_BITS;
 970     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 971   } else {
 972     // the performance counter is 64 bits and we will
 973     // be using it directly -- so no wrap in 64 bits
 974     info_ptr->max_value = ALL_64_BITS;
 975   }
 976 
 977   // using a counter, so no skipping
 978   info_ptr->may_skip_backward = false;
 979   info_ptr->may_skip_forward = false;
 980 
 981   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 982 }
 983 
 984 char* os::local_time_string(char *buf, size_t buflen) {
 985   SYSTEMTIME st;
 986   GetLocalTime(&st);
 987   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 988                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 989   return buf;
 990 }
 991 
 992 bool os::getTimesSecs(double* process_real_time,
 993                       double* process_user_time,
 994                       double* process_system_time) {
 995   HANDLE h_process = GetCurrentProcess();
 996   FILETIME create_time, exit_time, kernel_time, user_time;
 997   BOOL result = GetProcessTimes(h_process,
 998                                 &create_time,
 999                                 &exit_time,
1000                                 &kernel_time,
1001                                 &user_time);
1002   if (result != 0) {
1003     FILETIME wt;
1004     GetSystemTimeAsFileTime(&wt);
1005     jlong rtc_millis = windows_to_java_time(wt);
1006     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1007     *process_user_time =
1008       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1009     *process_system_time =
1010       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1011     return true;
1012   } else {
1013     return false;
1014   }
1015 }
1016 
1017 void os::shutdown() {
1018   // allow PerfMemory to attempt cleanup of any persistent resources
1019   perfMemory_exit();
1020 
1021   // flush buffered output, finish log files
1022   ostream_abort();
1023 
1024   // Check for abort hook
1025   abort_hook_t abort_hook = Arguments::abort_hook();
1026   if (abort_hook != NULL) {
1027     abort_hook();
1028   }
1029 }
1030 
1031 
1032 static HANDLE dumpFile = NULL;
1033 
1034 // Check if dump file can be created.
1035 void os::check_dump_limit(char* buffer, size_t buffsz) {
1036   bool status = true;
1037   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1038     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1039     status = false;
1040   }
1041 
1042 #ifndef ASSERT
1043   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1044     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1045     status = false;
1046   }
1047 #endif
1048 
1049   if (status) {
1050     const char* cwd = get_current_directory(NULL, 0);
1051     int pid = current_process_id();
1052     if (cwd != NULL) {
1053       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1054     } else {
1055       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1056     }
1057 
1058     if (dumpFile == NULL &&
1059        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1060                  == INVALID_HANDLE_VALUE) {
1061       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1062       status = false;
1063     }
1064   }
1065   VMError::record_coredump_status(buffer, status);
1066 }
1067 
1068 void os::abort(bool dump_core, void* siginfo, const void* context) {
1069   EXCEPTION_POINTERS ep;
1070   MINIDUMP_EXCEPTION_INFORMATION mei;
1071   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1072 
1073   HANDLE hProcess = GetCurrentProcess();
1074   DWORD processId = GetCurrentProcessId();
1075   MINIDUMP_TYPE dumpType;
1076 
1077   shutdown();
1078   if (!dump_core || dumpFile == NULL) {
1079     if (dumpFile != NULL) {
1080       CloseHandle(dumpFile);
1081     }
1082     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1083   }
1084 
1085   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1086     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1087 
1088   if (siginfo != NULL && context != NULL) {
1089     ep.ContextRecord = (PCONTEXT) context;
1090     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1091 
1092     mei.ThreadId = GetCurrentThreadId();
1093     mei.ExceptionPointers = &ep;
1094     pmei = &mei;
1095   } else {
1096     pmei = NULL;
1097   }
1098 
1099   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1100   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1101   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1102       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1103     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1104   }
1105   CloseHandle(dumpFile);
1106   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1107 }
1108 
1109 // Die immediately, no exit hook, no abort hook, no cleanup.
1110 void os::die() {
1111   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1112 }
1113 
1114 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1115 //  * dirent_md.c       1.15 00/02/02
1116 //
1117 // The declarations for DIR and struct dirent are in jvm_win32.h.
1118 
1119 // Caller must have already run dirname through JVM_NativePath, which removes
1120 // duplicate slashes and converts all instances of '/' into '\\'.
1121 
1122 DIR * os::opendir(const char *dirname) {
1123   assert(dirname != NULL, "just checking");   // hotspot change
1124   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1125   DWORD fattr;                                // hotspot change
1126   char alt_dirname[4] = { 0, 0, 0, 0 };
1127 
1128   if (dirp == 0) {
1129     errno = ENOMEM;
1130     return 0;
1131   }
1132 
1133   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1134   // as a directory in FindFirstFile().  We detect this case here and
1135   // prepend the current drive name.
1136   //
1137   if (dirname[1] == '\0' && dirname[0] == '\\') {
1138     alt_dirname[0] = _getdrive() + 'A' - 1;
1139     alt_dirname[1] = ':';
1140     alt_dirname[2] = '\\';
1141     alt_dirname[3] = '\0';
1142     dirname = alt_dirname;
1143   }
1144 
1145   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1146   if (dirp->path == 0) {
1147     free(dirp);
1148     errno = ENOMEM;
1149     return 0;
1150   }
1151   strcpy(dirp->path, dirname);
1152 
1153   fattr = GetFileAttributes(dirp->path);
1154   if (fattr == 0xffffffff) {
1155     free(dirp->path);
1156     free(dirp);
1157     errno = ENOENT;
1158     return 0;
1159   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1160     free(dirp->path);
1161     free(dirp);
1162     errno = ENOTDIR;
1163     return 0;
1164   }
1165 
1166   // Append "*.*", or possibly "\\*.*", to path
1167   if (dirp->path[1] == ':' &&
1168       (dirp->path[2] == '\0' ||
1169       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1170     // No '\\' needed for cases like "Z:" or "Z:\"
1171     strcat(dirp->path, "*.*");
1172   } else {
1173     strcat(dirp->path, "\\*.*");
1174   }
1175 
1176   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1177   if (dirp->handle == INVALID_HANDLE_VALUE) {
1178     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1179       free(dirp->path);
1180       free(dirp);
1181       errno = EACCES;
1182       return 0;
1183     }
1184   }
1185   return dirp;
1186 }
1187 
1188 struct dirent * os::readdir(DIR *dirp) {
1189   assert(dirp != NULL, "just checking");      // hotspot change
1190   if (dirp->handle == INVALID_HANDLE_VALUE) {
1191     return NULL;
1192   }
1193 
1194   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1195 
1196   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1197     if (GetLastError() == ERROR_INVALID_HANDLE) {
1198       errno = EBADF;
1199       return NULL;
1200     }
1201     FindClose(dirp->handle);
1202     dirp->handle = INVALID_HANDLE_VALUE;
1203   }
1204 
1205   return &dirp->dirent;
1206 }
1207 
1208 int os::closedir(DIR *dirp) {
1209   assert(dirp != NULL, "just checking");      // hotspot change
1210   if (dirp->handle != INVALID_HANDLE_VALUE) {
1211     if (!FindClose(dirp->handle)) {
1212       errno = EBADF;
1213       return -1;
1214     }
1215     dirp->handle = INVALID_HANDLE_VALUE;
1216   }
1217   free(dirp->path);
1218   free(dirp);
1219   return 0;
1220 }
1221 
1222 // This must be hard coded because it's the system's temporary
1223 // directory not the java application's temp directory, ala java.io.tmpdir.
1224 const char* os::get_temp_directory() {
1225   static char path_buf[MAX_PATH];
1226   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1227     return path_buf;
1228   } else {
1229     path_buf[0] = '\0';
1230     return path_buf;
1231   }
1232 }
1233 
1234 // Needs to be in os specific directory because windows requires another
1235 // header file <direct.h>
1236 const char* os::get_current_directory(char *buf, size_t buflen) {
1237   int n = static_cast<int>(buflen);
1238   if (buflen > INT_MAX)  n = INT_MAX;
1239   return _getcwd(buf, n);
1240 }
1241 
1242 //-----------------------------------------------------------
1243 // Helper functions for fatal error handler
1244 #ifdef _WIN64
1245 // Helper routine which returns true if address in
1246 // within the NTDLL address space.
1247 //
1248 static bool _addr_in_ntdll(address addr) {
1249   HMODULE hmod;
1250   MODULEINFO minfo;
1251 
1252   hmod = GetModuleHandle("NTDLL.DLL");
1253   if (hmod == NULL) return false;
1254   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1255                                           &minfo, sizeof(MODULEINFO))) {
1256     return false;
1257   }
1258 
1259   if ((addr >= minfo.lpBaseOfDll) &&
1260       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1261     return true;
1262   } else {
1263     return false;
1264   }
1265 }
1266 #endif
1267 
1268 struct _modinfo {
1269   address addr;
1270   char*   full_path;   // point to a char buffer
1271   int     buflen;      // size of the buffer
1272   address base_addr;
1273 };
1274 
1275 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1276                                   address top_address, void * param) {
1277   struct _modinfo *pmod = (struct _modinfo *)param;
1278   if (!pmod) return -1;
1279 
1280   if (base_addr   <= pmod->addr &&
1281       top_address > pmod->addr) {
1282     // if a buffer is provided, copy path name to the buffer
1283     if (pmod->full_path) {
1284       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1285     }
1286     pmod->base_addr = base_addr;
1287     return 1;
1288   }
1289   return 0;
1290 }
1291 
1292 bool os::dll_address_to_library_name(address addr, char* buf,
1293                                      int buflen, int* offset) {
1294   // buf is not optional, but offset is optional
1295   assert(buf != NULL, "sanity check");
1296 
1297 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1298 //       return the full path to the DLL file, sometimes it returns path
1299 //       to the corresponding PDB file (debug info); sometimes it only
1300 //       returns partial path, which makes life painful.
1301 
1302   struct _modinfo mi;
1303   mi.addr      = addr;
1304   mi.full_path = buf;
1305   mi.buflen    = buflen;
1306   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1307     // buf already contains path name
1308     if (offset) *offset = addr - mi.base_addr;
1309     return true;
1310   }
1311 
1312   buf[0] = '\0';
1313   if (offset) *offset = -1;
1314   return false;
1315 }
1316 
1317 bool os::dll_address_to_function_name(address addr, char *buf,
1318                                       int buflen, int *offset,
1319                                       bool demangle) {
1320   // buf is not optional, but offset is optional
1321   assert(buf != NULL, "sanity check");
1322 
1323   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1324     return true;
1325   }
1326   if (offset != NULL)  *offset  = -1;
1327   buf[0] = '\0';
1328   return false;
1329 }
1330 
1331 // save the start and end address of jvm.dll into param[0] and param[1]
1332 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1333                            address top_address, void * param) {
1334   if (!param) return -1;
1335 
1336   if (base_addr   <= (address)_locate_jvm_dll &&
1337       top_address > (address)_locate_jvm_dll) {
1338     ((address*)param)[0] = base_addr;
1339     ((address*)param)[1] = top_address;
1340     return 1;
1341   }
1342   return 0;
1343 }
1344 
1345 address vm_lib_location[2];    // start and end address of jvm.dll
1346 
1347 // check if addr is inside jvm.dll
1348 bool os::address_is_in_vm(address addr) {
1349   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1351       assert(false, "Can't find jvm module.");
1352       return false;
1353     }
1354   }
1355 
1356   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1357 }
1358 
1359 // print module info; param is outputStream*
1360 static int _print_module(const char* fname, address base_address,
1361                          address top_address, void* param) {
1362   if (!param) return -1;
1363 
1364   outputStream* st = (outputStream*)param;
1365 
1366   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1367   return 0;
1368 }
1369 
1370 // Loads .dll/.so and
1371 // in case of error it checks if .dll/.so was built for the
1372 // same architecture as Hotspot is running on
1373 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1374   log_info(os)("attempting shared library load of %s", name);
1375 
1376   void * result = LoadLibrary(name);
1377   if (result != NULL) {
1378     Events::log(NULL, "Loaded shared library %s", name);
1379     // Recalculate pdb search path if a DLL was loaded successfully.
1380     SymbolEngine::recalc_search_path();
1381     log_info(os)("shared library load of %s was successful", name);
1382     return result;
1383   }
1384   DWORD errcode = GetLastError();
1385   // Read system error message into ebuf
1386   // It may or may not be overwritten below (in the for loop and just above)
1387   lasterror(ebuf, (size_t) ebuflen);
1388   ebuf[ebuflen - 1] = '\0';
1389   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1390   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1391 
1392   if (errcode == ERROR_MOD_NOT_FOUND) {
1393     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1394     ebuf[ebuflen - 1] = '\0';
1395     return NULL;
1396   }
1397 
1398   // Parsing dll below
1399   // If we can read dll-info and find that dll was built
1400   // for an architecture other than Hotspot is running in
1401   // - then print to buffer "DLL was built for a different architecture"
1402   // else call os::lasterror to obtain system error message
1403   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1404   if (fd < 0) {
1405     return NULL;
1406   }
1407 
1408   uint32_t signature_offset;
1409   uint16_t lib_arch = 0;
1410   bool failed_to_get_lib_arch =
1411     ( // Go to position 3c in the dll
1412      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1413      ||
1414      // Read location of signature
1415      (sizeof(signature_offset) !=
1416      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1417      ||
1418      // Go to COFF File Header in dll
1419      // that is located after "signature" (4 bytes long)
1420      (os::seek_to_file_offset(fd,
1421      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1422      ||
1423      // Read field that contains code of architecture
1424      // that dll was built for
1425      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1426     );
1427 
1428   ::close(fd);
1429   if (failed_to_get_lib_arch) {
1430     // file i/o error - report os::lasterror(...) msg
1431     return NULL;
1432   }
1433 
1434   typedef struct {
1435     uint16_t arch_code;
1436     char* arch_name;
1437   } arch_t;
1438 
1439   static const arch_t arch_array[] = {
1440     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1441     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1442   };
1443 #if (defined _M_AMD64)
1444   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1445 #elif (defined _M_IX86)
1446   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1447 #else
1448   #error Method os::dll_load requires that one of following \
1449          is defined :_M_AMD64 or _M_IX86
1450 #endif
1451 
1452 
1453   // Obtain a string for printf operation
1454   // lib_arch_str shall contain string what platform this .dll was built for
1455   // running_arch_str shall string contain what platform Hotspot was built for
1456   char *running_arch_str = NULL, *lib_arch_str = NULL;
1457   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1458     if (lib_arch == arch_array[i].arch_code) {
1459       lib_arch_str = arch_array[i].arch_name;
1460     }
1461     if (running_arch == arch_array[i].arch_code) {
1462       running_arch_str = arch_array[i].arch_name;
1463     }
1464   }
1465 
1466   assert(running_arch_str,
1467          "Didn't find running architecture code in arch_array");
1468 
1469   // If the architecture is right
1470   // but some other error took place - report os::lasterror(...) msg
1471   if (lib_arch == running_arch) {
1472     return NULL;
1473   }
1474 
1475   if (lib_arch_str != NULL) {
1476     ::_snprintf(ebuf, ebuflen - 1,
1477                 "Can't load %s-bit .dll on a %s-bit platform",
1478                 lib_arch_str, running_arch_str);
1479   } else {
1480     // don't know what architecture this dll was build for
1481     ::_snprintf(ebuf, ebuflen - 1,
1482                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1483                 lib_arch, running_arch_str);
1484   }
1485 
1486   return NULL;
1487 }
1488 
1489 void os::print_dll_info(outputStream *st) {
1490   st->print_cr("Dynamic libraries:");
1491   get_loaded_modules_info(_print_module, (void *)st);
1492 }
1493 
1494 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1495   HANDLE   hProcess;
1496 
1497 # define MAX_NUM_MODULES 128
1498   HMODULE     modules[MAX_NUM_MODULES];
1499   static char filename[MAX_PATH];
1500   int         result = 0;
1501 
1502   int pid = os::current_process_id();
1503   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1504                          FALSE, pid);
1505   if (hProcess == NULL) return 0;
1506 
1507   DWORD size_needed;
1508   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1509     CloseHandle(hProcess);
1510     return 0;
1511   }
1512 
1513   // number of modules that are currently loaded
1514   int num_modules = size_needed / sizeof(HMODULE);
1515 
1516   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1517     // Get Full pathname:
1518     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1519       filename[0] = '\0';
1520     }
1521 
1522     MODULEINFO modinfo;
1523     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1524       modinfo.lpBaseOfDll = NULL;
1525       modinfo.SizeOfImage = 0;
1526     }
1527 
1528     // Invoke callback function
1529     result = callback(filename, (address)modinfo.lpBaseOfDll,
1530                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1531     if (result) break;
1532   }
1533 
1534   CloseHandle(hProcess);
1535   return result;
1536 }
1537 
1538 bool os::get_host_name(char* buf, size_t buflen) {
1539   DWORD size = (DWORD)buflen;
1540   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1541 }
1542 
1543 void os::get_summary_os_info(char* buf, size_t buflen) {
1544   stringStream sst(buf, buflen);
1545   os::win32::print_windows_version(&sst);
1546   // chop off newline character
1547   char* nl = strchr(buf, '\n');
1548   if (nl != NULL) *nl = '\0';
1549 }
1550 
1551 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1552 #if _MSC_VER >= 1900
1553   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1554   int result = ::vsnprintf(buf, len, fmt, args);
1555   // If an encoding error occurred (result < 0) then it's not clear
1556   // whether the buffer is NUL terminated, so ensure it is.
1557   if ((result < 0) && (len > 0)) {
1558     buf[len - 1] = '\0';
1559   }
1560   return result;
1561 #else
1562   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1563   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1564   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1565   // go straight to _vscprintf.  The output is going to be truncated in
1566   // that case, except in the unusual case of empty output.  More
1567   // importantly, the documentation for various versions of Visual Studio
1568   // are inconsistent about the behavior of _vsnprintf when len == 0,
1569   // including it possibly being an error.
1570   int result = -1;
1571   if (len > 0) {
1572     result = _vsnprintf(buf, len, fmt, args);
1573     // If output (including NUL terminator) is truncated, the buffer
1574     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1575     if ((result < 0) || ((size_t)result >= len)) {
1576       buf[len - 1] = '\0';
1577     }
1578   }
1579   if (result < 0) {
1580     result = _vscprintf(fmt, args);
1581   }
1582   return result;
1583 #endif // _MSC_VER dispatch
1584 }
1585 
1586 static inline time_t get_mtime(const char* filename) {
1587   struct stat st;
1588   int ret = os::stat(filename, &st);
1589   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1590   return st.st_mtime;
1591 }
1592 
1593 int os::compare_file_modified_times(const char* file1, const char* file2) {
1594   time_t t1 = get_mtime(file1);
1595   time_t t2 = get_mtime(file2);
1596   return t1 - t2;
1597 }
1598 
1599 void os::print_os_info_brief(outputStream* st) {
1600   os::print_os_info(st);
1601 }
1602 
1603 void os::print_os_info(outputStream* st) {
1604 #ifdef ASSERT
1605   char buffer[1024];
1606   st->print("HostName: ");
1607   if (get_host_name(buffer, sizeof(buffer))) {
1608     st->print("%s ", buffer);
1609   } else {
1610     st->print("N/A ");
1611   }
1612 #endif
1613   st->print("OS:");
1614   os::win32::print_windows_version(st);
1615 
1616 #ifdef _LP64
1617   VM_Version::print_platform_virtualization_info(st);
1618 #endif
1619 }
1620 
1621 void os::win32::print_windows_version(outputStream* st) {
1622   OSVERSIONINFOEX osvi;
1623   VS_FIXEDFILEINFO *file_info;
1624   TCHAR kernel32_path[MAX_PATH];
1625   UINT len, ret;
1626 
1627   // Use the GetVersionEx information to see if we're on a server or
1628   // workstation edition of Windows. Starting with Windows 8.1 we can't
1629   // trust the OS version information returned by this API.
1630   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1631   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1632   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1633     st->print_cr("Call to GetVersionEx failed");
1634     return;
1635   }
1636   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1637 
1638   // Get the full path to \Windows\System32\kernel32.dll and use that for
1639   // determining what version of Windows we're running on.
1640   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1641   ret = GetSystemDirectory(kernel32_path, len);
1642   if (ret == 0 || ret > len) {
1643     st->print_cr("Call to GetSystemDirectory failed");
1644     return;
1645   }
1646   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1647 
1648   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1649   if (version_size == 0) {
1650     st->print_cr("Call to GetFileVersionInfoSize failed");
1651     return;
1652   }
1653 
1654   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1655   if (version_info == NULL) {
1656     st->print_cr("Failed to allocate version_info");
1657     return;
1658   }
1659 
1660   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1661     os::free(version_info);
1662     st->print_cr("Call to GetFileVersionInfo failed");
1663     return;
1664   }
1665 
1666   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1667     os::free(version_info);
1668     st->print_cr("Call to VerQueryValue failed");
1669     return;
1670   }
1671 
1672   int major_version = HIWORD(file_info->dwProductVersionMS);
1673   int minor_version = LOWORD(file_info->dwProductVersionMS);
1674   int build_number = HIWORD(file_info->dwProductVersionLS);
1675   int build_minor = LOWORD(file_info->dwProductVersionLS);
1676   int os_vers = major_version * 1000 + minor_version;
1677   os::free(version_info);
1678 
1679   st->print(" Windows ");
1680   switch (os_vers) {
1681 
1682   case 6000:
1683     if (is_workstation) {
1684       st->print("Vista");
1685     } else {
1686       st->print("Server 2008");
1687     }
1688     break;
1689 
1690   case 6001:
1691     if (is_workstation) {
1692       st->print("7");
1693     } else {
1694       st->print("Server 2008 R2");
1695     }
1696     break;
1697 
1698   case 6002:
1699     if (is_workstation) {
1700       st->print("8");
1701     } else {
1702       st->print("Server 2012");
1703     }
1704     break;
1705 
1706   case 6003:
1707     if (is_workstation) {
1708       st->print("8.1");
1709     } else {
1710       st->print("Server 2012 R2");
1711     }
1712     break;
1713 
1714   case 10000:
1715     if (is_workstation) {
1716       st->print("10");
1717     } else {
1718       // distinguish Windows Server 2016 and 2019 by build number
1719       // Windows server 2019 GA 10/2018 build number is 17763
1720       if (build_number > 17762) {
1721         st->print("Server 2019");
1722       } else {
1723         st->print("Server 2016");
1724       }
1725     }
1726     break;
1727 
1728   default:
1729     // Unrecognized windows, print out its major and minor versions
1730     st->print("%d.%d", major_version, minor_version);
1731     break;
1732   }
1733 
1734   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1735   // find out whether we are running on 64 bit processor or not
1736   SYSTEM_INFO si;
1737   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1738   GetNativeSystemInfo(&si);
1739   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1740     st->print(" , 64 bit");
1741   }
1742 
1743   st->print(" Build %d", build_number);
1744   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1745   st->cr();
1746 }
1747 
1748 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1749   // Nothing to do for now.
1750 }
1751 
1752 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1753   HKEY key;
1754   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1755                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1756   if (status == ERROR_SUCCESS) {
1757     DWORD size = (DWORD)buflen;
1758     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1759     if (status != ERROR_SUCCESS) {
1760         strncpy(buf, "## __CPU__", buflen);
1761     }
1762     RegCloseKey(key);
1763   } else {
1764     // Put generic cpu info to return
1765     strncpy(buf, "## __CPU__", buflen);
1766   }
1767 }
1768 
1769 void os::print_memory_info(outputStream* st) {
1770   st->print("Memory:");
1771   st->print(" %dk page", os::vm_page_size()>>10);
1772 
1773   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1774   // value if total memory is larger than 4GB
1775   MEMORYSTATUSEX ms;
1776   ms.dwLength = sizeof(ms);
1777   int r1 = GlobalMemoryStatusEx(&ms);
1778 
1779   if (r1 != 0) {
1780     st->print(", system-wide physical " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPhys >> 20);
1782     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1783 
1784     st->print("TotalPageFile size " INT64_FORMAT "M ",
1785              (int64_t) ms.ullTotalPageFile >> 20);
1786     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1787              (int64_t) ms.ullAvailPageFile >> 20);
1788 
1789     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1790 #if defined(_M_IX86)
1791     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1792              (int64_t) ms.ullTotalVirtual >> 20);
1793     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1794 #endif
1795   } else {
1796     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1797   }
1798 
1799   // extended memory statistics for a process
1800   PROCESS_MEMORY_COUNTERS_EX pmex;
1801   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1802   pmex.cb = sizeof(pmex);
1803   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1804 
1805   if (r2 != 0) {
1806     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.WorkingSetSize >> 20);
1808     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1809 
1810     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1811              (int64_t) pmex.PrivateUsage >> 20);
1812     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1813   } else {
1814     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1815   }
1816 
1817   st->cr();
1818 }
1819 
1820 bool os::signal_sent_by_kill(const void* siginfo) {
1821   // TODO: Is this possible?
1822   return false;
1823 }
1824 
1825 void os::print_siginfo(outputStream *st, const void* siginfo) {
1826   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1827   st->print("siginfo:");
1828 
1829   char tmp[64];
1830   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1831     strcpy(tmp, "EXCEPTION_??");
1832   }
1833   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1834 
1835   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1836        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1837        er->NumberParameters >= 2) {
1838     switch (er->ExceptionInformation[0]) {
1839     case 0: st->print(", reading address"); break;
1840     case 1: st->print(", writing address"); break;
1841     case 8: st->print(", data execution prevention violation at address"); break;
1842     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1843                        er->ExceptionInformation[0]);
1844     }
1845     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1846   } else {
1847     int num = er->NumberParameters;
1848     if (num > 0) {
1849       st->print(", ExceptionInformation=");
1850       for (int i = 0; i < num; i++) {
1851         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1852       }
1853     }
1854   }
1855   st->cr();
1856 }
1857 
1858 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1859   // TODO: Can we kill thread?
1860   return false;
1861 }
1862 
1863 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1864   // do nothing
1865 }
1866 
1867 static char saved_jvm_path[MAX_PATH] = {0};
1868 
1869 // Find the full path to the current module, jvm.dll
1870 void os::jvm_path(char *buf, jint buflen) {
1871   // Error checking.
1872   if (buflen < MAX_PATH) {
1873     assert(false, "must use a large-enough buffer");
1874     buf[0] = '\0';
1875     return;
1876   }
1877   // Lazy resolve the path to current module.
1878   if (saved_jvm_path[0] != 0) {
1879     strcpy(buf, saved_jvm_path);
1880     return;
1881   }
1882 
1883   buf[0] = '\0';
1884   if (Arguments::sun_java_launcher_is_altjvm()) {
1885     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1886     // for a JAVA_HOME environment variable and fix up the path so it
1887     // looks like jvm.dll is installed there (append a fake suffix
1888     // hotspot/jvm.dll).
1889     char* java_home_var = ::getenv("JAVA_HOME");
1890     if (java_home_var != NULL && java_home_var[0] != 0 &&
1891         strlen(java_home_var) < (size_t)buflen) {
1892       strncpy(buf, java_home_var, buflen);
1893 
1894       // determine if this is a legacy image or modules image
1895       // modules image doesn't have "jre" subdirectory
1896       size_t len = strlen(buf);
1897       char* jrebin_p = buf + len;
1898       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1899       if (0 != _access(buf, 0)) {
1900         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1901       }
1902       len = strlen(buf);
1903       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1904     }
1905   }
1906 
1907   if (buf[0] == '\0') {
1908     GetModuleFileName(vm_lib_handle, buf, buflen);
1909   }
1910   strncpy(saved_jvm_path, buf, MAX_PATH);
1911   saved_jvm_path[MAX_PATH - 1] = '\0';
1912 }
1913 
1914 
1915 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1916 #ifndef _WIN64
1917   st->print("_");
1918 #endif
1919 }
1920 
1921 
1922 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1923 #ifndef _WIN64
1924   st->print("@%d", args_size  * sizeof(int));
1925 #endif
1926 }
1927 
1928 // This method is a copy of JDK's sysGetLastErrorString
1929 // from src/windows/hpi/src/system_md.c
1930 
1931 size_t os::lasterror(char* buf, size_t len) {
1932   DWORD errval;
1933 
1934   if ((errval = GetLastError()) != 0) {
1935     // DOS error
1936     size_t n = (size_t)FormatMessage(
1937                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1938                                      NULL,
1939                                      errval,
1940                                      0,
1941                                      buf,
1942                                      (DWORD)len,
1943                                      NULL);
1944     if (n > 3) {
1945       // Drop final '.', CR, LF
1946       if (buf[n - 1] == '\n') n--;
1947       if (buf[n - 1] == '\r') n--;
1948       if (buf[n - 1] == '.') n--;
1949       buf[n] = '\0';
1950     }
1951     return n;
1952   }
1953 
1954   if (errno != 0) {
1955     // C runtime error that has no corresponding DOS error code
1956     const char* s = os::strerror(errno);
1957     size_t n = strlen(s);
1958     if (n >= len) n = len - 1;
1959     strncpy(buf, s, n);
1960     buf[n] = '\0';
1961     return n;
1962   }
1963 
1964   return 0;
1965 }
1966 
1967 int os::get_last_error() {
1968   DWORD error = GetLastError();
1969   if (error == 0) {
1970     error = errno;
1971   }
1972   return (int)error;
1973 }
1974 
1975 // sun.misc.Signal
1976 // NOTE that this is a workaround for an apparent kernel bug where if
1977 // a signal handler for SIGBREAK is installed then that signal handler
1978 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1979 // See bug 4416763.
1980 static void (*sigbreakHandler)(int) = NULL;
1981 
1982 static void UserHandler(int sig, void *siginfo, void *context) {
1983   os::signal_notify(sig);
1984   // We need to reinstate the signal handler each time...
1985   os::signal(sig, (void*)UserHandler);
1986 }
1987 
1988 void* os::user_handler() {
1989   return (void*) UserHandler;
1990 }
1991 
1992 void* os::signal(int signal_number, void* handler) {
1993   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1994     void (*oldHandler)(int) = sigbreakHandler;
1995     sigbreakHandler = (void (*)(int)) handler;
1996     return (void*) oldHandler;
1997   } else {
1998     return (void*)::signal(signal_number, (void (*)(int))handler);
1999   }
2000 }
2001 
2002 void os::signal_raise(int signal_number) {
2003   raise(signal_number);
2004 }
2005 
2006 // The Win32 C runtime library maps all console control events other than ^C
2007 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2008 // logoff, and shutdown events.  We therefore install our own console handler
2009 // that raises SIGTERM for the latter cases.
2010 //
2011 static BOOL WINAPI consoleHandler(DWORD event) {
2012   switch (event) {
2013   case CTRL_C_EVENT:
2014     if (VMError::is_error_reported()) {
2015       // Ctrl-C is pressed during error reporting, likely because the error
2016       // handler fails to abort. Let VM die immediately.
2017       os::die();
2018     }
2019 
2020     os::signal_raise(SIGINT);
2021     return TRUE;
2022     break;
2023   case CTRL_BREAK_EVENT:
2024     if (sigbreakHandler != NULL) {
2025       (*sigbreakHandler)(SIGBREAK);
2026     }
2027     return TRUE;
2028     break;
2029   case CTRL_LOGOFF_EVENT: {
2030     // Don't terminate JVM if it is running in a non-interactive session,
2031     // such as a service process.
2032     USEROBJECTFLAGS flags;
2033     HANDLE handle = GetProcessWindowStation();
2034     if (handle != NULL &&
2035         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2036         sizeof(USEROBJECTFLAGS), NULL)) {
2037       // If it is a non-interactive session, let next handler to deal
2038       // with it.
2039       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2040         return FALSE;
2041       }
2042     }
2043   }
2044   case CTRL_CLOSE_EVENT:
2045   case CTRL_SHUTDOWN_EVENT:
2046     os::signal_raise(SIGTERM);
2047     return TRUE;
2048     break;
2049   default:
2050     break;
2051   }
2052   return FALSE;
2053 }
2054 
2055 // The following code is moved from os.cpp for making this
2056 // code platform specific, which it is by its very nature.
2057 
2058 // Return maximum OS signal used + 1 for internal use only
2059 // Used as exit signal for signal_thread
2060 int os::sigexitnum_pd() {
2061   return NSIG;
2062 }
2063 
2064 // a counter for each possible signal value, including signal_thread exit signal
2065 static volatile jint pending_signals[NSIG+1] = { 0 };
2066 static Semaphore* sig_sem = NULL;
2067 
2068 static void jdk_misc_signal_init() {
2069   // Initialize signal structures
2070   memset((void*)pending_signals, 0, sizeof(pending_signals));
2071 
2072   // Initialize signal semaphore
2073   sig_sem = new Semaphore();
2074 
2075   // Programs embedding the VM do not want it to attempt to receive
2076   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2077   // shutdown hooks mechanism introduced in 1.3.  For example, when
2078   // the VM is run as part of a Windows NT service (i.e., a servlet
2079   // engine in a web server), the correct behavior is for any console
2080   // control handler to return FALSE, not TRUE, because the OS's
2081   // "final" handler for such events allows the process to continue if
2082   // it is a service (while terminating it if it is not a service).
2083   // To make this behavior uniform and the mechanism simpler, we
2084   // completely disable the VM's usage of these console events if -Xrs
2085   // (=ReduceSignalUsage) is specified.  This means, for example, that
2086   // the CTRL-BREAK thread dump mechanism is also disabled in this
2087   // case.  See bugs 4323062, 4345157, and related bugs.
2088 
2089   // Add a CTRL-C handler
2090   SetConsoleCtrlHandler(consoleHandler, TRUE);
2091 }
2092 
2093 void os::signal_notify(int sig) {
2094   if (sig_sem != NULL) {
2095     Atomic::inc(&pending_signals[sig]);
2096     sig_sem->signal();
2097   } else {
2098     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2099     // initialization isn't called.
2100     assert(ReduceSignalUsage, "signal semaphore should be created");
2101   }
2102 }
2103 
2104 static int check_pending_signals() {
2105   while (true) {
2106     for (int i = 0; i < NSIG + 1; i++) {
2107       jint n = pending_signals[i];
2108       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2109         return i;
2110       }
2111     }
2112     JavaThread *thread = JavaThread::current();
2113 
2114     ThreadBlockInVM tbivm(thread);
2115 
2116     bool threadIsSuspended;
2117     do {
2118       thread->set_suspend_equivalent();
2119       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2120       sig_sem->wait();
2121 
2122       // were we externally suspended while we were waiting?
2123       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2124       if (threadIsSuspended) {
2125         // The semaphore has been incremented, but while we were waiting
2126         // another thread suspended us. We don't want to continue running
2127         // while suspended because that would surprise the thread that
2128         // suspended us.
2129         sig_sem->signal();
2130 
2131         thread->java_suspend_self();
2132       }
2133     } while (threadIsSuspended);
2134   }
2135 }
2136 
2137 int os::signal_wait() {
2138   return check_pending_signals();
2139 }
2140 
2141 // Implicit OS exception handling
2142 
2143 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2144                       address handler) {
2145   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2146   // Save pc in thread
2147 #ifdef _M_AMD64
2148   // Do not blow up if no thread info available.
2149   if (thread) {
2150     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2151   }
2152   // Set pc to handler
2153   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2154 #else
2155   // Do not blow up if no thread info available.
2156   if (thread) {
2157     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2158   }
2159   // Set pc to handler
2160   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2161 #endif
2162 
2163   // Continue the execution
2164   return EXCEPTION_CONTINUE_EXECUTION;
2165 }
2166 
2167 
2168 // Used for PostMortemDump
2169 extern "C" void safepoints();
2170 extern "C" void find(int x);
2171 extern "C" void events();
2172 
2173 // According to Windows API documentation, an illegal instruction sequence should generate
2174 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2175 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2176 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2177 
2178 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2179 
2180 // From "Execution Protection in the Windows Operating System" draft 0.35
2181 // Once a system header becomes available, the "real" define should be
2182 // included or copied here.
2183 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2184 
2185 // Windows Vista/2008 heap corruption check
2186 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2187 
2188 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2189 // C++ compiler contain this error code. Because this is a compiler-generated
2190 // error, the code is not listed in the Win32 API header files.
2191 // The code is actually a cryptic mnemonic device, with the initial "E"
2192 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2193 // ASCII values of "msc".
2194 
2195 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2196 
2197 #define def_excpt(val) { #val, (val) }
2198 
2199 static const struct { const char* name; uint number; } exceptlabels[] = {
2200     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2201     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2202     def_excpt(EXCEPTION_BREAKPOINT),
2203     def_excpt(EXCEPTION_SINGLE_STEP),
2204     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2205     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2206     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2207     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2208     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2209     def_excpt(EXCEPTION_FLT_OVERFLOW),
2210     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2211     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2212     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2213     def_excpt(EXCEPTION_INT_OVERFLOW),
2214     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2215     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2216     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2217     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2218     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2219     def_excpt(EXCEPTION_STACK_OVERFLOW),
2220     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2221     def_excpt(EXCEPTION_GUARD_PAGE),
2222     def_excpt(EXCEPTION_INVALID_HANDLE),
2223     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2224     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2225 };
2226 
2227 #undef def_excpt
2228 
2229 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2230   uint code = static_cast<uint>(exception_code);
2231   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2232     if (exceptlabels[i].number == code) {
2233       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2234       return buf;
2235     }
2236   }
2237 
2238   return NULL;
2239 }
2240 
2241 //-----------------------------------------------------------------------------
2242 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2243   // handle exception caused by idiv; should only happen for -MinInt/-1
2244   // (division by zero is handled explicitly)
2245 #ifdef  _M_AMD64
2246   PCONTEXT ctx = exceptionInfo->ContextRecord;
2247   address pc = (address)ctx->Rip;
2248   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2249   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2250   if (pc[0] == 0xF7) {
2251     // set correct result values and continue after idiv instruction
2252     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2253   } else {
2254     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2255   }
2256   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2257   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2258   // idiv opcode (0xF7).
2259   ctx->Rdx = (DWORD)0;             // remainder
2260   // Continue the execution
2261 #else
2262   PCONTEXT ctx = exceptionInfo->ContextRecord;
2263   address pc = (address)ctx->Eip;
2264   assert(pc[0] == 0xF7, "not an idiv opcode");
2265   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2266   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2267   // set correct result values and continue after idiv instruction
2268   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2269   ctx->Eax = (DWORD)min_jint;      // result
2270   ctx->Edx = (DWORD)0;             // remainder
2271   // Continue the execution
2272 #endif
2273   return EXCEPTION_CONTINUE_EXECUTION;
2274 }
2275 
2276 //-----------------------------------------------------------------------------
2277 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2278   PCONTEXT ctx = exceptionInfo->ContextRecord;
2279 #ifndef  _WIN64
2280   // handle exception caused by native method modifying control word
2281   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2282 
2283   switch (exception_code) {
2284   case EXCEPTION_FLT_DENORMAL_OPERAND:
2285   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2286   case EXCEPTION_FLT_INEXACT_RESULT:
2287   case EXCEPTION_FLT_INVALID_OPERATION:
2288   case EXCEPTION_FLT_OVERFLOW:
2289   case EXCEPTION_FLT_STACK_CHECK:
2290   case EXCEPTION_FLT_UNDERFLOW:
2291     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2292     if (fp_control_word != ctx->FloatSave.ControlWord) {
2293       // Restore FPCW and mask out FLT exceptions
2294       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2295       // Mask out pending FLT exceptions
2296       ctx->FloatSave.StatusWord &=  0xffffff00;
2297       return EXCEPTION_CONTINUE_EXECUTION;
2298     }
2299   }
2300 
2301   if (prev_uef_handler != NULL) {
2302     // We didn't handle this exception so pass it to the previous
2303     // UnhandledExceptionFilter.
2304     return (prev_uef_handler)(exceptionInfo);
2305   }
2306 #else // !_WIN64
2307   // On Windows, the mxcsr control bits are non-volatile across calls
2308   // See also CR 6192333
2309   //
2310   jint MxCsr = INITIAL_MXCSR;
2311   // we can't use StubRoutines::addr_mxcsr_std()
2312   // because in Win64 mxcsr is not saved there
2313   if (MxCsr != ctx->MxCsr) {
2314     ctx->MxCsr = MxCsr;
2315     return EXCEPTION_CONTINUE_EXECUTION;
2316   }
2317 #endif // !_WIN64
2318 
2319   return EXCEPTION_CONTINUE_SEARCH;
2320 }
2321 
2322 static inline void report_error(Thread* t, DWORD exception_code,
2323                                 address addr, void* siginfo, void* context) {
2324   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2325 
2326   // If UseOsErrorReporting, this will return here and save the error file
2327   // somewhere where we can find it in the minidump.
2328 }
2329 
2330 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2331         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2332   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2333   address addr = (address) exceptionRecord->ExceptionInformation[1];
2334   if (Interpreter::contains(pc)) {
2335     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2336     if (!fr->is_first_java_frame()) {
2337       // get_frame_at_stack_banging_point() is only called when we
2338       // have well defined stacks so java_sender() calls do not need
2339       // to assert safe_for_sender() first.
2340       *fr = fr->java_sender();
2341     }
2342   } else {
2343     // more complex code with compiled code
2344     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2345     CodeBlob* cb = CodeCache::find_blob(pc);
2346     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2347       // Not sure where the pc points to, fallback to default
2348       // stack overflow handling
2349       return false;
2350     } else {
2351       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2352       // in compiled code, the stack banging is performed just after the return pc
2353       // has been pushed on the stack
2354       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2355       if (!fr->is_java_frame()) {
2356         // See java_sender() comment above.
2357         *fr = fr->java_sender();
2358       }
2359     }
2360   }
2361   assert(fr->is_java_frame(), "Safety check");
2362   return true;
2363 }
2364 
2365 #if INCLUDE_AOT
2366 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2367   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2368   address addr = (address) exceptionRecord->ExceptionInformation[1];
2369   address pc = (address) exceptionInfo->ContextRecord->Rip;
2370 
2371   // Handle the case where we get an implicit exception in AOT generated
2372   // code.  AOT DLL's loaded are not registered for structured exceptions.
2373   // If the exception occurred in the codeCache or AOT code, pass control
2374   // to our normal exception handler.
2375   CodeBlob* cb = CodeCache::find_blob(pc);
2376   if (cb != NULL) {
2377     return topLevelExceptionFilter(exceptionInfo);
2378   }
2379 
2380   return EXCEPTION_CONTINUE_SEARCH;
2381 }
2382 #endif
2383 
2384 //-----------------------------------------------------------------------------
2385 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2386   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2387   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2388 #ifdef _M_AMD64
2389   address pc = (address) exceptionInfo->ContextRecord->Rip;
2390 #else
2391   address pc = (address) exceptionInfo->ContextRecord->Eip;
2392 #endif
2393   Thread* t = Thread::current_or_null_safe();
2394 
2395   // Handle SafeFetch32 and SafeFetchN exceptions.
2396   if (StubRoutines::is_safefetch_fault(pc)) {
2397     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2398   }
2399 
2400 #ifndef _WIN64
2401   // Execution protection violation - win32 running on AMD64 only
2402   // Handled first to avoid misdiagnosis as a "normal" access violation;
2403   // This is safe to do because we have a new/unique ExceptionInformation
2404   // code for this condition.
2405   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2406     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2407     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2408     address addr = (address) exceptionRecord->ExceptionInformation[1];
2409 
2410     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2411       int page_size = os::vm_page_size();
2412 
2413       // Make sure the pc and the faulting address are sane.
2414       //
2415       // If an instruction spans a page boundary, and the page containing
2416       // the beginning of the instruction is executable but the following
2417       // page is not, the pc and the faulting address might be slightly
2418       // different - we still want to unguard the 2nd page in this case.
2419       //
2420       // 15 bytes seems to be a (very) safe value for max instruction size.
2421       bool pc_is_near_addr =
2422         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2423       bool instr_spans_page_boundary =
2424         (align_down((intptr_t) pc ^ (intptr_t) addr,
2425                          (intptr_t) page_size) > 0);
2426 
2427       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2428         static volatile address last_addr =
2429           (address) os::non_memory_address_word();
2430 
2431         // In conservative mode, don't unguard unless the address is in the VM
2432         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2433             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2434 
2435           // Set memory to RWX and retry
2436           address page_start = align_down(addr, page_size);
2437           bool res = os::protect_memory((char*) page_start, page_size,
2438                                         os::MEM_PROT_RWX);
2439 
2440           log_debug(os)("Execution protection violation "
2441                         "at " INTPTR_FORMAT
2442                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2443                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2444 
2445           // Set last_addr so if we fault again at the same address, we don't
2446           // end up in an endless loop.
2447           //
2448           // There are two potential complications here.  Two threads trapping
2449           // at the same address at the same time could cause one of the
2450           // threads to think it already unguarded, and abort the VM.  Likely
2451           // very rare.
2452           //
2453           // The other race involves two threads alternately trapping at
2454           // different addresses and failing to unguard the page, resulting in
2455           // an endless loop.  This condition is probably even more unlikely
2456           // than the first.
2457           //
2458           // Although both cases could be avoided by using locks or thread
2459           // local last_addr, these solutions are unnecessary complication:
2460           // this handler is a best-effort safety net, not a complete solution.
2461           // It is disabled by default and should only be used as a workaround
2462           // in case we missed any no-execute-unsafe VM code.
2463 
2464           last_addr = addr;
2465 
2466           return EXCEPTION_CONTINUE_EXECUTION;
2467         }
2468       }
2469 
2470       // Last unguard failed or not unguarding
2471       tty->print_raw_cr("Execution protection violation");
2472       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2473                    exceptionInfo->ContextRecord);
2474       return EXCEPTION_CONTINUE_SEARCH;
2475     }
2476   }
2477 #endif // _WIN64
2478 
2479   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2480       VM_Version::is_cpuinfo_segv_addr(pc)) {
2481     // Verify that OS save/restore AVX registers.
2482     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2483   }
2484 
2485   if (t != NULL && t->is_Java_thread()) {
2486     JavaThread* thread = (JavaThread*) t;
2487     bool in_java = thread->thread_state() == _thread_in_Java;
2488 
2489     // Handle potential stack overflows up front.
2490     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2491       if (thread->stack_guards_enabled()) {
2492         if (in_java) {
2493           frame fr;
2494           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2495           address addr = (address) exceptionRecord->ExceptionInformation[1];
2496           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2497             assert(fr.is_java_frame(), "Must be a Java frame");
2498             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2499           }
2500         }
2501         // Yellow zone violation.  The o/s has unprotected the first yellow
2502         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2503         // update the enabled status, even if the zone contains only one page.
2504         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2505         thread->disable_stack_yellow_reserved_zone();
2506         // If not in java code, return and hope for the best.
2507         return in_java
2508             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2509             :  EXCEPTION_CONTINUE_EXECUTION;
2510       } else {
2511         // Fatal red zone violation.
2512         thread->disable_stack_red_zone();
2513         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2514         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2515                       exceptionInfo->ContextRecord);
2516         return EXCEPTION_CONTINUE_SEARCH;
2517       }
2518     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2519       // Either stack overflow or null pointer exception.
2520       if (in_java) {
2521         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2522         address addr = (address) exceptionRecord->ExceptionInformation[1];
2523         address stack_end = thread->stack_end();
2524         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2525           // Stack overflow.
2526           assert(!os::uses_stack_guard_pages(),
2527                  "should be caught by red zone code above.");
2528           return Handle_Exception(exceptionInfo,
2529                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2530         }
2531         // Check for safepoint polling and implicit null
2532         // We only expect null pointers in the stubs (vtable)
2533         // the rest are checked explicitly now.
2534         CodeBlob* cb = CodeCache::find_blob(pc);
2535         if (cb != NULL) {
2536           if (os::is_poll_address(addr)) {
2537             address stub = SharedRuntime::get_poll_stub(pc);
2538             return Handle_Exception(exceptionInfo, stub);
2539           }
2540         }
2541         {
2542 #ifdef _WIN64
2543           // If it's a legal stack address map the entire region in
2544           //
2545           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2546           address addr = (address) exceptionRecord->ExceptionInformation[1];
2547           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2548             addr = (address)((uintptr_t)addr &
2549                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2550             os::commit_memory((char *)addr, thread->stack_base() - addr,
2551                               !ExecMem);
2552             return EXCEPTION_CONTINUE_EXECUTION;
2553           } else
2554 #endif
2555           {
2556             // Null pointer exception.
2557             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2558               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2559               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2560             }
2561             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2562                          exceptionInfo->ContextRecord);
2563             return EXCEPTION_CONTINUE_SEARCH;
2564           }
2565         }
2566       }
2567 
2568 #ifdef _WIN64
2569       // Special care for fast JNI field accessors.
2570       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2571       // in and the heap gets shrunk before the field access.
2572       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2573         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2574         if (addr != (address)-1) {
2575           return Handle_Exception(exceptionInfo, addr);
2576         }
2577       }
2578 #endif
2579 
2580       // Stack overflow or null pointer exception in native code.
2581       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2582                    exceptionInfo->ContextRecord);
2583       return EXCEPTION_CONTINUE_SEARCH;
2584     } // /EXCEPTION_ACCESS_VIOLATION
2585     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2586 
2587     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2588       CompiledMethod* nm = NULL;
2589       JavaThread* thread = (JavaThread*)t;
2590       if (in_java) {
2591         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2592         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2593       }
2594 
2595       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2596       if (((thread->thread_state() == _thread_in_vm ||
2597            thread->thread_state() == _thread_in_native ||
2598            is_unsafe_arraycopy) &&
2599           thread->doing_unsafe_access()) ||
2600           (nm != NULL && nm->has_unsafe_access())) {
2601         address next_pc =  Assembler::locate_next_instruction(pc);
2602         if (is_unsafe_arraycopy) {
2603           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2604         }
2605         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2606       }
2607     }
2608 
2609     if (in_java) {
2610       switch (exception_code) {
2611       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2612         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2613 
2614       case EXCEPTION_INT_OVERFLOW:
2615         return Handle_IDiv_Exception(exceptionInfo);
2616 
2617       } // switch
2618     }
2619     if (((thread->thread_state() == _thread_in_Java) ||
2620          (thread->thread_state() == _thread_in_native)) &&
2621          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2622       LONG result=Handle_FLT_Exception(exceptionInfo);
2623       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2624     }
2625   }
2626 
2627   if (exception_code != EXCEPTION_BREAKPOINT) {
2628     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2629                  exceptionInfo->ContextRecord);
2630   }
2631   return EXCEPTION_CONTINUE_SEARCH;
2632 }
2633 
2634 #ifndef _WIN64
2635 // Special care for fast JNI accessors.
2636 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2637 // the heap gets shrunk before the field access.
2638 // Need to install our own structured exception handler since native code may
2639 // install its own.
2640 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2641   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2642   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2643     address pc = (address) exceptionInfo->ContextRecord->Eip;
2644     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2645     if (addr != (address)-1) {
2646       return Handle_Exception(exceptionInfo, addr);
2647     }
2648   }
2649   return EXCEPTION_CONTINUE_SEARCH;
2650 }
2651 
2652 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2653   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2654                                                      jobject obj,           \
2655                                                      jfieldID fieldID) {    \
2656     __try {                                                                 \
2657       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2658                                                                  obj,       \
2659                                                                  fieldID);  \
2660     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2661                                               _exception_info())) {         \
2662     }                                                                       \
2663     return 0;                                                               \
2664   }
2665 
2666 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2667 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2668 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2669 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2670 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2671 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2672 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2673 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2674 
2675 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2676   switch (type) {
2677   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2678   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2679   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2680   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2681   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2682   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2683   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2684   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2685   default:        ShouldNotReachHere();
2686   }
2687   return (address)-1;
2688 }
2689 #endif
2690 
2691 // Virtual Memory
2692 
2693 int os::vm_page_size() { return os::win32::vm_page_size(); }
2694 int os::vm_allocation_granularity() {
2695   return os::win32::vm_allocation_granularity();
2696 }
2697 
2698 // Windows large page support is available on Windows 2003. In order to use
2699 // large page memory, the administrator must first assign additional privilege
2700 // to the user:
2701 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2702 //   + select Local Policies -> User Rights Assignment
2703 //   + double click "Lock pages in memory", add users and/or groups
2704 //   + reboot
2705 // Note the above steps are needed for administrator as well, as administrators
2706 // by default do not have the privilege to lock pages in memory.
2707 //
2708 // Note about Windows 2003: although the API supports committing large page
2709 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2710 // scenario, I found through experiment it only uses large page if the entire
2711 // memory region is reserved and committed in a single VirtualAlloc() call.
2712 // This makes Windows large page support more or less like Solaris ISM, in
2713 // that the entire heap must be committed upfront. This probably will change
2714 // in the future, if so the code below needs to be revisited.
2715 
2716 #ifndef MEM_LARGE_PAGES
2717   #define MEM_LARGE_PAGES 0x20000000
2718 #endif
2719 
2720 static HANDLE    _hProcess;
2721 static HANDLE    _hToken;
2722 
2723 // Container for NUMA node list info
2724 class NUMANodeListHolder {
2725  private:
2726   int *_numa_used_node_list;  // allocated below
2727   int _numa_used_node_count;
2728 
2729   void free_node_list() {
2730     if (_numa_used_node_list != NULL) {
2731       FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2732     }
2733   }
2734 
2735  public:
2736   NUMANodeListHolder() {
2737     _numa_used_node_count = 0;
2738     _numa_used_node_list = NULL;
2739     // do rest of initialization in build routine (after function pointers are set up)
2740   }
2741 
2742   ~NUMANodeListHolder() {
2743     free_node_list();
2744   }
2745 
2746   bool build() {
2747     DWORD_PTR proc_aff_mask;
2748     DWORD_PTR sys_aff_mask;
2749     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2750     ULONG highest_node_number;
2751     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2752     free_node_list();
2753     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2754     for (unsigned int i = 0; i <= highest_node_number; i++) {
2755       ULONGLONG proc_mask_numa_node;
2756       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2757       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2758         _numa_used_node_list[_numa_used_node_count++] = i;
2759       }
2760     }
2761     return (_numa_used_node_count > 1);
2762   }
2763 
2764   int get_count() { return _numa_used_node_count; }
2765   int get_node_list_entry(int n) {
2766     // for indexes out of range, returns -1
2767     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2768   }
2769 
2770 } numa_node_list_holder;
2771 
2772 
2773 
2774 static size_t _large_page_size = 0;
2775 
2776 static bool request_lock_memory_privilege() {
2777   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2778                           os::current_process_id());
2779 
2780   LUID luid;
2781   if (_hProcess != NULL &&
2782       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2783       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2784 
2785     TOKEN_PRIVILEGES tp;
2786     tp.PrivilegeCount = 1;
2787     tp.Privileges[0].Luid = luid;
2788     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2789 
2790     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2791     // privilege. Check GetLastError() too. See MSDN document.
2792     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2793         (GetLastError() == ERROR_SUCCESS)) {
2794       return true;
2795     }
2796   }
2797 
2798   return false;
2799 }
2800 
2801 static void cleanup_after_large_page_init() {
2802   if (_hProcess) CloseHandle(_hProcess);
2803   _hProcess = NULL;
2804   if (_hToken) CloseHandle(_hToken);
2805   _hToken = NULL;
2806 }
2807 
2808 static bool numa_interleaving_init() {
2809   bool success = false;
2810   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2811 
2812   // print a warning if UseNUMAInterleaving flag is specified on command line
2813   bool warn_on_failure = use_numa_interleaving_specified;
2814 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2815 
2816   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2817   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2818   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2819 
2820   if (numa_node_list_holder.build()) {
2821     if (log_is_enabled(Debug, os, cpu)) {
2822       Log(os, cpu) log;
2823       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2824       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2825         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2826       }
2827     }
2828     success = true;
2829   } else {
2830     WARN("Process does not cover multiple NUMA nodes.");
2831   }
2832   if (!success) {
2833     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2834   }
2835   return success;
2836 #undef WARN
2837 }
2838 
2839 // this routine is used whenever we need to reserve a contiguous VA range
2840 // but we need to make separate VirtualAlloc calls for each piece of the range
2841 // Reasons for doing this:
2842 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2843 //  * UseNUMAInterleaving requires a separate node for each piece
2844 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2845                                          DWORD prot,
2846                                          bool should_inject_error = false) {
2847   char * p_buf;
2848   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2849   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2850   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2851 
2852   // first reserve enough address space in advance since we want to be
2853   // able to break a single contiguous virtual address range into multiple
2854   // large page commits but WS2003 does not allow reserving large page space
2855   // so we just use 4K pages for reserve, this gives us a legal contiguous
2856   // address space. then we will deallocate that reservation, and re alloc
2857   // using large pages
2858   const size_t size_of_reserve = bytes + chunk_size;
2859   if (bytes > size_of_reserve) {
2860     // Overflowed.
2861     return NULL;
2862   }
2863   p_buf = (char *) VirtualAlloc(addr,
2864                                 size_of_reserve,  // size of Reserve
2865                                 MEM_RESERVE,
2866                                 PAGE_READWRITE);
2867   // If reservation failed, return NULL
2868   if (p_buf == NULL) return NULL;
2869   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2870   os::release_memory(p_buf, bytes + chunk_size);
2871 
2872   // we still need to round up to a page boundary (in case we are using large pages)
2873   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2874   // instead we handle this in the bytes_to_rq computation below
2875   p_buf = align_up(p_buf, page_size);
2876 
2877   // now go through and allocate one chunk at a time until all bytes are
2878   // allocated
2879   size_t  bytes_remaining = bytes;
2880   // An overflow of align_up() would have been caught above
2881   // in the calculation of size_of_reserve.
2882   char * next_alloc_addr = p_buf;
2883   HANDLE hProc = GetCurrentProcess();
2884 
2885 #ifdef ASSERT
2886   // Variable for the failure injection
2887   int ran_num = os::random();
2888   size_t fail_after = ran_num % bytes;
2889 #endif
2890 
2891   int count=0;
2892   while (bytes_remaining) {
2893     // select bytes_to_rq to get to the next chunk_size boundary
2894 
2895     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2896     // Note allocate and commit
2897     char * p_new;
2898 
2899 #ifdef ASSERT
2900     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2901 #else
2902     const bool inject_error_now = false;
2903 #endif
2904 
2905     if (inject_error_now) {
2906       p_new = NULL;
2907     } else {
2908       if (!UseNUMAInterleaving) {
2909         p_new = (char *) VirtualAlloc(next_alloc_addr,
2910                                       bytes_to_rq,
2911                                       flags,
2912                                       prot);
2913       } else {
2914         // get the next node to use from the used_node_list
2915         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2916         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2917         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2918       }
2919     }
2920 
2921     if (p_new == NULL) {
2922       // Free any allocated pages
2923       if (next_alloc_addr > p_buf) {
2924         // Some memory was committed so release it.
2925         size_t bytes_to_release = bytes - bytes_remaining;
2926         // NMT has yet to record any individual blocks, so it
2927         // need to create a dummy 'reserve' record to match
2928         // the release.
2929         MemTracker::record_virtual_memory_reserve((address)p_buf,
2930                                                   bytes_to_release, CALLER_PC);
2931         os::release_memory(p_buf, bytes_to_release);
2932       }
2933 #ifdef ASSERT
2934       if (should_inject_error) {
2935         log_develop_debug(pagesize)("Reserving pages individually failed.");
2936       }
2937 #endif
2938       return NULL;
2939     }
2940 
2941     bytes_remaining -= bytes_to_rq;
2942     next_alloc_addr += bytes_to_rq;
2943     count++;
2944   }
2945   // Although the memory is allocated individually, it is returned as one.
2946   // NMT records it as one block.
2947   if ((flags & MEM_COMMIT) != 0) {
2948     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2949   } else {
2950     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2951   }
2952 
2953   // made it this far, success
2954   return p_buf;
2955 }
2956 
2957 
2958 
2959 void os::large_page_init() {
2960   if (!UseLargePages) return;
2961 
2962   // print a warning if any large page related flag is specified on command line
2963   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2964                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2965   bool success = false;
2966 
2967 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2968   if (request_lock_memory_privilege()) {
2969     size_t s = GetLargePageMinimum();
2970     if (s) {
2971 #if defined(IA32) || defined(AMD64)
2972       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2973         WARN("JVM cannot use large pages bigger than 4mb.");
2974       } else {
2975 #endif
2976         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2977           _large_page_size = LargePageSizeInBytes;
2978         } else {
2979           _large_page_size = s;
2980         }
2981         success = true;
2982 #if defined(IA32) || defined(AMD64)
2983       }
2984 #endif
2985     } else {
2986       WARN("Large page is not supported by the processor.");
2987     }
2988   } else {
2989     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2990   }
2991 #undef WARN
2992 
2993   const size_t default_page_size = (size_t) vm_page_size();
2994   if (success && _large_page_size > default_page_size) {
2995     _page_sizes[0] = _large_page_size;
2996     _page_sizes[1] = default_page_size;
2997     _page_sizes[2] = 0;
2998   }
2999 
3000   cleanup_after_large_page_init();
3001   UseLargePages = success;
3002 }
3003 
3004 int os::create_file_for_heap(const char* dir) {
3005 
3006   const char name_template[] = "/jvmheap.XXXXXX";
3007 
3008   size_t fullname_len = strlen(dir) + strlen(name_template);
3009   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3010   if (fullname == NULL) {
3011     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3012     return -1;
3013   }
3014   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3015   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3016 
3017   os::native_path(fullname);
3018 
3019   char *path = _mktemp(fullname);
3020   if (path == NULL) {
3021     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3022     os::free(fullname);
3023     return -1;
3024   }
3025 
3026   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3027 
3028   os::free(fullname);
3029   if (fd < 0) {
3030     warning("Problem opening file for heap (%s)", os::strerror(errno));
3031     return -1;
3032   }
3033   return fd;
3034 }
3035 
3036 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3037 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3038   assert(fd != -1, "File descriptor is not valid");
3039 
3040   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3041 #ifdef _LP64
3042   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3043     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3044 #else
3045   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3046     0, (DWORD)size, NULL);
3047 #endif
3048   if (fileMapping == NULL) {
3049     if (GetLastError() == ERROR_DISK_FULL) {
3050       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3051     }
3052     else {
3053       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3054     }
3055 
3056     return NULL;
3057   }
3058 
3059   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3060 
3061   CloseHandle(fileMapping);
3062 
3063   return (char*)addr;
3064 }
3065 
3066 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3067   assert(fd != -1, "File descriptor is not valid");
3068   assert(base != NULL, "Base address cannot be NULL");
3069 
3070   release_memory(base, size);
3071   return map_memory_to_file(base, size, fd);
3072 }
3073 
3074 // On win32, one cannot release just a part of reserved memory, it's an
3075 // all or nothing deal.  When we split a reservation, we must break the
3076 // reservation into two reservations.
3077 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3078                                   bool realloc) {
3079   if (size > 0) {
3080     release_memory(base, size);
3081     if (realloc) {
3082       reserve_memory(split, base);
3083     }
3084     if (size != split) {
3085       reserve_memory(size - split, base + split);
3086     }
3087   }
3088 }
3089 
3090 // Multiple threads can race in this code but it's not possible to unmap small sections of
3091 // virtual space to get requested alignment, like posix-like os's.
3092 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3093 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3094   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3095          "Alignment must be a multiple of allocation granularity (page size)");
3096   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3097 
3098   size_t extra_size = size + alignment;
3099   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3100 
3101   char* aligned_base = NULL;
3102 
3103   do {
3104     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3105     if (extra_base == NULL) {
3106       return NULL;
3107     }
3108     // Do manual alignment
3109     aligned_base = align_up(extra_base, alignment);
3110 
3111     if (file_desc != -1) {
3112       os::unmap_memory(extra_base, extra_size);
3113     } else {
3114       os::release_memory(extra_base, extra_size);
3115     }
3116 
3117     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3118 
3119   } while (aligned_base == NULL);
3120 
3121   return aligned_base;
3122 }
3123 
3124 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3125   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3126          "reserve alignment");
3127   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3128   char* res;
3129   // note that if UseLargePages is on, all the areas that require interleaving
3130   // will go thru reserve_memory_special rather than thru here.
3131   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3132   if (!use_individual) {
3133     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3134   } else {
3135     elapsedTimer reserveTimer;
3136     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3137     // in numa interleaving, we have to allocate pages individually
3138     // (well really chunks of NUMAInterleaveGranularity size)
3139     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3140     if (res == NULL) {
3141       warning("NUMA page allocation failed");
3142     }
3143     if (Verbose && PrintMiscellaneous) {
3144       reserveTimer.stop();
3145       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3146                     reserveTimer.milliseconds(), reserveTimer.ticks());
3147     }
3148   }
3149   assert(res == NULL || addr == NULL || addr == res,
3150          "Unexpected address from reserve.");
3151 
3152   return res;
3153 }
3154 
3155 // Reserve memory at an arbitrary address, only if that area is
3156 // available (and not reserved for something else).
3157 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3158   // Windows os::reserve_memory() fails of the requested address range is
3159   // not avilable.
3160   return reserve_memory(bytes, requested_addr);
3161 }
3162 
3163 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3164   assert(file_desc >= 0, "file_desc is not valid");
3165   return map_memory_to_file(requested_addr, bytes, file_desc);
3166 }
3167 
3168 size_t os::large_page_size() {
3169   return _large_page_size;
3170 }
3171 
3172 bool os::can_commit_large_page_memory() {
3173   // Windows only uses large page memory when the entire region is reserved
3174   // and committed in a single VirtualAlloc() call. This may change in the
3175   // future, but with Windows 2003 it's not possible to commit on demand.
3176   return false;
3177 }
3178 
3179 bool os::can_execute_large_page_memory() {
3180   return true;
3181 }
3182 
3183 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3184                                  bool exec) {
3185   assert(UseLargePages, "only for large pages");
3186 
3187   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3188     return NULL; // Fallback to small pages.
3189   }
3190 
3191   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3192   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3193 
3194   // with large pages, there are two cases where we need to use Individual Allocation
3195   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3196   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3197   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3198     log_debug(pagesize)("Reserving large pages individually.");
3199 
3200     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3201     if (p_buf == NULL) {
3202       // give an appropriate warning message
3203       if (UseNUMAInterleaving) {
3204         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3205       }
3206       if (UseLargePagesIndividualAllocation) {
3207         warning("Individually allocated large pages failed, "
3208                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3209       }
3210       return NULL;
3211     }
3212 
3213     return p_buf;
3214 
3215   } else {
3216     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3217 
3218     // normal policy just allocate it all at once
3219     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3220     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3221     if (res != NULL) {
3222       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3223     }
3224 
3225     return res;
3226   }
3227 }
3228 
3229 bool os::release_memory_special(char* base, size_t bytes) {
3230   assert(base != NULL, "Sanity check");
3231   return release_memory(base, bytes);
3232 }
3233 
3234 void os::print_statistics() {
3235 }
3236 
3237 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3238   int err = os::get_last_error();
3239   char buf[256];
3240   size_t buf_len = os::lasterror(buf, sizeof(buf));
3241   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3242           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3243           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3244 }
3245 
3246 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3247   if (bytes == 0) {
3248     // Don't bother the OS with noops.
3249     return true;
3250   }
3251   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3252   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3253   // Don't attempt to print anything if the OS call fails. We're
3254   // probably low on resources, so the print itself may cause crashes.
3255 
3256   // unless we have NUMAInterleaving enabled, the range of a commit
3257   // is always within a reserve covered by a single VirtualAlloc
3258   // in that case we can just do a single commit for the requested size
3259   if (!UseNUMAInterleaving) {
3260     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3261       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3262       return false;
3263     }
3264     if (exec) {
3265       DWORD oldprot;
3266       // Windows doc says to use VirtualProtect to get execute permissions
3267       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3268         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3269         return false;
3270       }
3271     }
3272     return true;
3273   } else {
3274 
3275     // when NUMAInterleaving is enabled, the commit might cover a range that
3276     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3277     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3278     // returns represents the number of bytes that can be committed in one step.
3279     size_t bytes_remaining = bytes;
3280     char * next_alloc_addr = addr;
3281     while (bytes_remaining > 0) {
3282       MEMORY_BASIC_INFORMATION alloc_info;
3283       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3284       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3285       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3286                        PAGE_READWRITE) == NULL) {
3287         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3288                                             exec);)
3289         return false;
3290       }
3291       if (exec) {
3292         DWORD oldprot;
3293         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3294                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3295           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3296                                               exec);)
3297           return false;
3298         }
3299       }
3300       bytes_remaining -= bytes_to_rq;
3301       next_alloc_addr += bytes_to_rq;
3302     }
3303   }
3304   // if we made it this far, return true
3305   return true;
3306 }
3307 
3308 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3309                           bool exec) {
3310   // alignment_hint is ignored on this OS
3311   return pd_commit_memory(addr, size, exec);
3312 }
3313 
3314 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3315                                   const char* mesg) {
3316   assert(mesg != NULL, "mesg must be specified");
3317   if (!pd_commit_memory(addr, size, exec)) {
3318     warn_fail_commit_memory(addr, size, exec);
3319     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3320   }
3321 }
3322 
3323 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3324                                   size_t alignment_hint, bool exec,
3325                                   const char* mesg) {
3326   // alignment_hint is ignored on this OS
3327   pd_commit_memory_or_exit(addr, size, exec, mesg);
3328 }
3329 
3330 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3331   if (bytes == 0) {
3332     // Don't bother the OS with noops.
3333     return true;
3334   }
3335   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3336   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3337   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3338 }
3339 
3340 bool os::pd_release_memory(char* addr, size_t bytes) {
3341   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3342 }
3343 
3344 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3345   return os::commit_memory(addr, size, !ExecMem);
3346 }
3347 
3348 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3349   return os::uncommit_memory(addr, size);
3350 }
3351 
3352 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3353   uint count = 0;
3354   bool ret = false;
3355   size_t bytes_remaining = bytes;
3356   char * next_protect_addr = addr;
3357 
3358   // Use VirtualQuery() to get the chunk size.
3359   while (bytes_remaining) {
3360     MEMORY_BASIC_INFORMATION alloc_info;
3361     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3362       return false;
3363     }
3364 
3365     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3366     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3367     // but we don't distinguish here as both cases are protected by same API.
3368     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3369     warning("Failed protecting pages individually for chunk #%u", count);
3370     if (!ret) {
3371       return false;
3372     }
3373 
3374     bytes_remaining -= bytes_to_protect;
3375     next_protect_addr += bytes_to_protect;
3376     count++;
3377   }
3378   return ret;
3379 }
3380 
3381 // Set protections specified
3382 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3383                         bool is_committed) {
3384   unsigned int p = 0;
3385   switch (prot) {
3386   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3387   case MEM_PROT_READ: p = PAGE_READONLY; break;
3388   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3389   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3390   default:
3391     ShouldNotReachHere();
3392   }
3393 
3394   DWORD old_status;
3395 
3396   // Strange enough, but on Win32 one can change protection only for committed
3397   // memory, not a big deal anyway, as bytes less or equal than 64K
3398   if (!is_committed) {
3399     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3400                           "cannot commit protection page");
3401   }
3402   // One cannot use os::guard_memory() here, as on Win32 guard page
3403   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3404   //
3405   // Pages in the region become guard pages. Any attempt to access a guard page
3406   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3407   // the guard page status. Guard pages thus act as a one-time access alarm.
3408   bool ret;
3409   if (UseNUMAInterleaving) {
3410     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3411     // so we must protect the chunks individually.
3412     ret = protect_pages_individually(addr, bytes, p, &old_status);
3413   } else {
3414     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3415   }
3416 #ifdef ASSERT
3417   if (!ret) {
3418     int err = os::get_last_error();
3419     char buf[256];
3420     size_t buf_len = os::lasterror(buf, sizeof(buf));
3421     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3422           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3423           buf_len != 0 ? buf : "<no_error_string>", err);
3424   }
3425 #endif
3426   return ret;
3427 }
3428 
3429 bool os::guard_memory(char* addr, size_t bytes) {
3430   DWORD old_status;
3431   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3432 }
3433 
3434 bool os::unguard_memory(char* addr, size_t bytes) {
3435   DWORD old_status;
3436   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3437 }
3438 
3439 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3440 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3441 void os::numa_make_global(char *addr, size_t bytes)    { }
3442 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3443 bool os::numa_topology_changed()                       { return false; }
3444 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3445 int os::numa_get_group_id()                            { return 0; }
3446 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3447   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3448     // Provide an answer for UMA systems
3449     ids[0] = 0;
3450     return 1;
3451   } else {
3452     // check for size bigger than actual groups_num
3453     size = MIN2(size, numa_get_groups_num());
3454     for (int i = 0; i < (int)size; i++) {
3455       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3456     }
3457     return size;
3458   }
3459 }
3460 
3461 bool os::get_page_info(char *start, page_info* info) {
3462   return false;
3463 }
3464 
3465 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3466                      page_info* page_found) {
3467   return end;
3468 }
3469 
3470 char* os::non_memory_address_word() {
3471   // Must never look like an address returned by reserve_memory,
3472   // even in its subfields (as defined by the CPU immediate fields,
3473   // if the CPU splits constants across multiple instructions).
3474   return (char*)-1;
3475 }
3476 
3477 #define MAX_ERROR_COUNT 100
3478 #define SYS_THREAD_ERROR 0xffffffffUL
3479 
3480 void os::pd_start_thread(Thread* thread) {
3481   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3482   // Returns previous suspend state:
3483   // 0:  Thread was not suspended
3484   // 1:  Thread is running now
3485   // >1: Thread is still suspended.
3486   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3487 }
3488 
3489 
3490 
3491 // Short sleep, direct OS call.
3492 //
3493 // ms = 0, means allow others (if any) to run.
3494 //
3495 void os::naked_short_sleep(jlong ms) {
3496   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3497   Sleep(ms);
3498 }
3499 
3500 // Windows does not provide sleep functionality with nanosecond resolution, so we
3501 // try to approximate this with spinning combined with yielding if another thread
3502 // is ready to run on the current processor.
3503 void os::naked_short_nanosleep(jlong ns) {
3504   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3505 
3506   int64_t start = os::javaTimeNanos();
3507   do {
3508     if (SwitchToThread() == 0) {
3509       // Nothing else is ready to run on this cpu, spin a little
3510       SpinPause();
3511     }
3512   } while (os::javaTimeNanos() - start < ns);
3513 }
3514 
3515 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3516 void os::infinite_sleep() {
3517   while (true) {    // sleep forever ...
3518     Sleep(100000);  // ... 100 seconds at a time
3519   }
3520 }
3521 
3522 typedef BOOL (WINAPI * STTSignature)(void);
3523 
3524 void os::naked_yield() {
3525   // Consider passing back the return value from SwitchToThread().
3526   SwitchToThread();
3527 }
3528 
3529 // Win32 only gives you access to seven real priorities at a time,
3530 // so we compress Java's ten down to seven.  It would be better
3531 // if we dynamically adjusted relative priorities.
3532 
3533 int os::java_to_os_priority[CriticalPriority + 1] = {
3534   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3535   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3536   THREAD_PRIORITY_LOWEST,                       // 2
3537   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3538   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3539   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3540   THREAD_PRIORITY_NORMAL,                       // 6
3541   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3542   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3543   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3544   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3545   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3546 };
3547 
3548 int prio_policy1[CriticalPriority + 1] = {
3549   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3550   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3551   THREAD_PRIORITY_LOWEST,                       // 2
3552   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3553   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3554   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3555   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3556   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3557   THREAD_PRIORITY_HIGHEST,                      // 8
3558   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3559   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3560   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3561 };
3562 
3563 static int prio_init() {
3564   // If ThreadPriorityPolicy is 1, switch tables
3565   if (ThreadPriorityPolicy == 1) {
3566     int i;
3567     for (i = 0; i < CriticalPriority + 1; i++) {
3568       os::java_to_os_priority[i] = prio_policy1[i];
3569     }
3570   }
3571   if (UseCriticalJavaThreadPriority) {
3572     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3573   }
3574   return 0;
3575 }
3576 
3577 OSReturn os::set_native_priority(Thread* thread, int priority) {
3578   if (!UseThreadPriorities) return OS_OK;
3579   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3580   return ret ? OS_OK : OS_ERR;
3581 }
3582 
3583 OSReturn os::get_native_priority(const Thread* const thread,
3584                                  int* priority_ptr) {
3585   if (!UseThreadPriorities) {
3586     *priority_ptr = java_to_os_priority[NormPriority];
3587     return OS_OK;
3588   }
3589   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3590   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3591     assert(false, "GetThreadPriority failed");
3592     return OS_ERR;
3593   }
3594   *priority_ptr = os_prio;
3595   return OS_OK;
3596 }
3597 
3598 void os::interrupt(Thread* thread) {
3599   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
3600   assert(thread->is_Java_thread(), "invariant");
3601   JavaThread* jt = (JavaThread*) thread;
3602   OSThread* osthread = thread->osthread();
3603   osthread->set_interrupted(true);
3604   // More than one thread can get here with the same value of osthread,
3605   // resulting in multiple notifications.  We do, however, want the store
3606   // to interrupted() to be visible to other threads before we post
3607   // the interrupt event.
3608   OrderAccess::release();
3609   SetEvent(osthread->interrupt_event());
3610   // For JSR166:  unpark after setting status
3611   jt->parker()->unpark();
3612 
3613   ParkEvent * ev = thread->_ParkEvent;
3614   if (ev != NULL) ev->unpark();
3615 
3616   ev = jt->_SleepEvent;
3617   if (ev != NULL) ev->unpark();
3618 }
3619 
3620 
3621 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3622   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
3623 
3624   OSThread* osthread = thread->osthread();
3625   // There is no synchronization between the setting of the interrupt
3626   // and it being cleared here. It is critical - see 6535709 - that
3627   // we only clear the interrupt state, and reset the interrupt event,
3628   // if we are going to report that we were indeed interrupted - else
3629   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3630   // depending on the timing. By checking thread interrupt event to see
3631   // if the thread gets real interrupt thus prevent spurious wakeup.
3632   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3633   if (interrupted && clear_interrupted) {
3634     osthread->set_interrupted(false);
3635     ResetEvent(osthread->interrupt_event());
3636   } // Otherwise leave the interrupted state alone
3637 
3638   return interrupted;
3639 }
3640 
3641 // GetCurrentThreadId() returns DWORD
3642 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3643 
3644 static int _initial_pid = 0;
3645 
3646 int os::current_process_id() {
3647   return (_initial_pid ? _initial_pid : _getpid());
3648 }
3649 
3650 int    os::win32::_vm_page_size              = 0;
3651 int    os::win32::_vm_allocation_granularity = 0;
3652 int    os::win32::_processor_type            = 0;
3653 // Processor level is not available on non-NT systems, use vm_version instead
3654 int    os::win32::_processor_level           = 0;
3655 julong os::win32::_physical_memory           = 0;
3656 size_t os::win32::_default_stack_size        = 0;
3657 
3658 intx          os::win32::_os_thread_limit    = 0;
3659 volatile intx os::win32::_os_thread_count    = 0;
3660 
3661 bool   os::win32::_is_windows_server         = false;
3662 
3663 // 6573254
3664 // Currently, the bug is observed across all the supported Windows releases,
3665 // including the latest one (as of this writing - Windows Server 2012 R2)
3666 bool   os::win32::_has_exit_bug              = true;
3667 
3668 void os::win32::initialize_system_info() {
3669   SYSTEM_INFO si;
3670   GetSystemInfo(&si);
3671   _vm_page_size    = si.dwPageSize;
3672   _vm_allocation_granularity = si.dwAllocationGranularity;
3673   _processor_type  = si.dwProcessorType;
3674   _processor_level = si.wProcessorLevel;
3675   set_processor_count(si.dwNumberOfProcessors);
3676 
3677   MEMORYSTATUSEX ms;
3678   ms.dwLength = sizeof(ms);
3679 
3680   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3681   // dwMemoryLoad (% of memory in use)
3682   GlobalMemoryStatusEx(&ms);
3683   _physical_memory = ms.ullTotalPhys;
3684 
3685   if (FLAG_IS_DEFAULT(MaxRAM)) {
3686     // Adjust MaxRAM according to the maximum virtual address space available.
3687     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3688   }
3689 
3690   OSVERSIONINFOEX oi;
3691   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3692   GetVersionEx((OSVERSIONINFO*)&oi);
3693   switch (oi.dwPlatformId) {
3694   case VER_PLATFORM_WIN32_NT:
3695     {
3696       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3697       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3698           oi.wProductType == VER_NT_SERVER) {
3699         _is_windows_server = true;
3700       }
3701     }
3702     break;
3703   default: fatal("Unknown platform");
3704   }
3705 
3706   _default_stack_size = os::current_stack_size();
3707   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3708   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3709          "stack size not a multiple of page size");
3710 
3711   initialize_performance_counter();
3712 }
3713 
3714 
3715 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3716                                       int ebuflen) {
3717   char path[MAX_PATH];
3718   DWORD size;
3719   DWORD pathLen = (DWORD)sizeof(path);
3720   HINSTANCE result = NULL;
3721 
3722   // only allow library name without path component
3723   assert(strchr(name, '\\') == NULL, "path not allowed");
3724   assert(strchr(name, ':') == NULL, "path not allowed");
3725   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3726     jio_snprintf(ebuf, ebuflen,
3727                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3728     return NULL;
3729   }
3730 
3731   // search system directory
3732   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3733     if (size >= pathLen) {
3734       return NULL; // truncated
3735     }
3736     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3737       return NULL; // truncated
3738     }
3739     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3740       return result;
3741     }
3742   }
3743 
3744   // try Windows directory
3745   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3746     if (size >= pathLen) {
3747       return NULL; // truncated
3748     }
3749     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3750       return NULL; // truncated
3751     }
3752     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3753       return result;
3754     }
3755   }
3756 
3757   jio_snprintf(ebuf, ebuflen,
3758                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3759   return NULL;
3760 }
3761 
3762 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3763 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3764 
3765 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3766   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3767   return TRUE;
3768 }
3769 
3770 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3771   // Basic approach:
3772   //  - Each exiting thread registers its intent to exit and then does so.
3773   //  - A thread trying to terminate the process must wait for all
3774   //    threads currently exiting to complete their exit.
3775 
3776   if (os::win32::has_exit_bug()) {
3777     // The array holds handles of the threads that have started exiting by calling
3778     // _endthreadex().
3779     // Should be large enough to avoid blocking the exiting thread due to lack of
3780     // a free slot.
3781     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3782     static int handle_count = 0;
3783 
3784     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3785     static CRITICAL_SECTION crit_sect;
3786     static volatile DWORD process_exiting = 0;
3787     int i, j;
3788     DWORD res;
3789     HANDLE hproc, hthr;
3790 
3791     // We only attempt to register threads until a process exiting
3792     // thread manages to set the process_exiting flag. Any threads
3793     // that come through here after the process_exiting flag is set
3794     // are unregistered and will be caught in the SuspendThread()
3795     // infinite loop below.
3796     bool registered = false;
3797 
3798     // The first thread that reached this point, initializes the critical section.
3799     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3800       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3801     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3802       if (what != EPT_THREAD) {
3803         // Atomically set process_exiting before the critical section
3804         // to increase the visibility between racing threads.
3805         Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
3806       }
3807       EnterCriticalSection(&crit_sect);
3808 
3809       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3810         // Remove from the array those handles of the threads that have completed exiting.
3811         for (i = 0, j = 0; i < handle_count; ++i) {
3812           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3813           if (res == WAIT_TIMEOUT) {
3814             handles[j++] = handles[i];
3815           } else {
3816             if (res == WAIT_FAILED) {
3817               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3818                       GetLastError(), __FILE__, __LINE__);
3819             }
3820             // Don't keep the handle, if we failed waiting for it.
3821             CloseHandle(handles[i]);
3822           }
3823         }
3824 
3825         // If there's no free slot in the array of the kept handles, we'll have to
3826         // wait until at least one thread completes exiting.
3827         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3828           // Raise the priority of the oldest exiting thread to increase its chances
3829           // to complete sooner.
3830           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3831           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3832           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3833             i = (res - WAIT_OBJECT_0);
3834             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3835             for (; i < handle_count; ++i) {
3836               handles[i] = handles[i + 1];
3837             }
3838           } else {
3839             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3840                     (res == WAIT_FAILED ? "failed" : "timed out"),
3841                     GetLastError(), __FILE__, __LINE__);
3842             // Don't keep handles, if we failed waiting for them.
3843             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3844               CloseHandle(handles[i]);
3845             }
3846             handle_count = 0;
3847           }
3848         }
3849 
3850         // Store a duplicate of the current thread handle in the array of handles.
3851         hproc = GetCurrentProcess();
3852         hthr = GetCurrentThread();
3853         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3854                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3855           warning("DuplicateHandle failed (%u) in %s: %d\n",
3856                   GetLastError(), __FILE__, __LINE__);
3857 
3858           // We can't register this thread (no more handles) so this thread
3859           // may be racing with a thread that is calling exit(). If the thread
3860           // that is calling exit() has managed to set the process_exiting
3861           // flag, then this thread will be caught in the SuspendThread()
3862           // infinite loop below which closes that race. A small timing
3863           // window remains before the process_exiting flag is set, but it
3864           // is only exposed when we are out of handles.
3865         } else {
3866           ++handle_count;
3867           registered = true;
3868 
3869           // The current exiting thread has stored its handle in the array, and now
3870           // should leave the critical section before calling _endthreadex().
3871         }
3872 
3873       } else if (what != EPT_THREAD && handle_count > 0) {
3874         jlong start_time, finish_time, timeout_left;
3875         // Before ending the process, make sure all the threads that had called
3876         // _endthreadex() completed.
3877 
3878         // Set the priority level of the current thread to the same value as
3879         // the priority level of exiting threads.
3880         // This is to ensure it will be given a fair chance to execute if
3881         // the timeout expires.
3882         hthr = GetCurrentThread();
3883         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3884         start_time = os::javaTimeNanos();
3885         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3886         for (i = 0; ; ) {
3887           int portion_count = handle_count - i;
3888           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3889             portion_count = MAXIMUM_WAIT_OBJECTS;
3890           }
3891           for (j = 0; j < portion_count; ++j) {
3892             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3893           }
3894           timeout_left = (finish_time - start_time) / 1000000L;
3895           if (timeout_left < 0) {
3896             timeout_left = 0;
3897           }
3898           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3899           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3900             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3901                     (res == WAIT_FAILED ? "failed" : "timed out"),
3902                     GetLastError(), __FILE__, __LINE__);
3903             // Reset portion_count so we close the remaining
3904             // handles due to this error.
3905             portion_count = handle_count - i;
3906           }
3907           for (j = 0; j < portion_count; ++j) {
3908             CloseHandle(handles[i + j]);
3909           }
3910           if ((i += portion_count) >= handle_count) {
3911             break;
3912           }
3913           start_time = os::javaTimeNanos();
3914         }
3915         handle_count = 0;
3916       }
3917 
3918       LeaveCriticalSection(&crit_sect);
3919     }
3920 
3921     if (!registered &&
3922         OrderAccess::load_acquire(&process_exiting) != 0 &&
3923         process_exiting != GetCurrentThreadId()) {
3924       // Some other thread is about to call exit(), so we don't let
3925       // the current unregistered thread proceed to exit() or _endthreadex()
3926       while (true) {
3927         SuspendThread(GetCurrentThread());
3928         // Avoid busy-wait loop, if SuspendThread() failed.
3929         Sleep(EXIT_TIMEOUT);
3930       }
3931     }
3932   }
3933 
3934   // We are here if either
3935   // - there's no 'race at exit' bug on this OS release;
3936   // - initialization of the critical section failed (unlikely);
3937   // - the current thread has registered itself and left the critical section;
3938   // - the process-exiting thread has raised the flag and left the critical section.
3939   if (what == EPT_THREAD) {
3940     _endthreadex((unsigned)exit_code);
3941   } else if (what == EPT_PROCESS) {
3942     ::exit(exit_code);
3943   } else {
3944     _exit(exit_code);
3945   }
3946 
3947   // Should not reach here
3948   return exit_code;
3949 }
3950 
3951 #undef EXIT_TIMEOUT
3952 
3953 void os::win32::setmode_streams() {
3954   _setmode(_fileno(stdin), _O_BINARY);
3955   _setmode(_fileno(stdout), _O_BINARY);
3956   _setmode(_fileno(stderr), _O_BINARY);
3957 }
3958 
3959 
3960 bool os::is_debugger_attached() {
3961   return IsDebuggerPresent() ? true : false;
3962 }
3963 
3964 
3965 void os::wait_for_keypress_at_exit(void) {
3966   if (PauseAtExit) {
3967     fprintf(stderr, "Press any key to continue...\n");
3968     fgetc(stdin);
3969   }
3970 }
3971 
3972 
3973 bool os::message_box(const char* title, const char* message) {
3974   int result = MessageBox(NULL, message, title,
3975                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3976   return result == IDYES;
3977 }
3978 
3979 #ifndef PRODUCT
3980 #ifndef _WIN64
3981 // Helpers to check whether NX protection is enabled
3982 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3983   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3984       pex->ExceptionRecord->NumberParameters > 0 &&
3985       pex->ExceptionRecord->ExceptionInformation[0] ==
3986       EXCEPTION_INFO_EXEC_VIOLATION) {
3987     return EXCEPTION_EXECUTE_HANDLER;
3988   }
3989   return EXCEPTION_CONTINUE_SEARCH;
3990 }
3991 
3992 void nx_check_protection() {
3993   // If NX is enabled we'll get an exception calling into code on the stack
3994   char code[] = { (char)0xC3 }; // ret
3995   void *code_ptr = (void *)code;
3996   __try {
3997     __asm call code_ptr
3998   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3999     tty->print_raw_cr("NX protection detected.");
4000   }
4001 }
4002 #endif // _WIN64
4003 #endif // PRODUCT
4004 
4005 // This is called _before_ the global arguments have been parsed
4006 void os::init(void) {
4007   _initial_pid = _getpid();
4008 
4009   init_random(1234567);
4010 
4011   win32::initialize_system_info();
4012   win32::setmode_streams();
4013   init_page_sizes((size_t) win32::vm_page_size());
4014 
4015   // This may be overridden later when argument processing is done.
4016   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4017 
4018   // Initialize main_process and main_thread
4019   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4020   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4021                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4022     fatal("DuplicateHandle failed\n");
4023   }
4024   main_thread_id = (int) GetCurrentThreadId();
4025 
4026   // initialize fast thread access - only used for 32-bit
4027   win32::initialize_thread_ptr_offset();
4028 }
4029 
4030 // To install functions for atexit processing
4031 extern "C" {
4032   static void perfMemory_exit_helper() {
4033     perfMemory_exit();
4034   }
4035 }
4036 
4037 static jint initSock();
4038 
4039 // this is called _after_ the global arguments have been parsed
4040 jint os::init_2(void) {
4041 
4042   // This could be set any time but all platforms
4043   // have to set it the same so we have to mirror Solaris.
4044   DEBUG_ONLY(os::set_mutex_init_done();)
4045 
4046   // Setup Windows Exceptions
4047 
4048 #if INCLUDE_AOT
4049   // If AOT is enabled we need to install a vectored exception handler
4050   // in order to forward implicit exceptions from code in AOT
4051   // generated DLLs.  This is necessary since these DLLs are not
4052   // registered for structured exceptions like codecache methods are.
4053   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4054     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4055   }
4056 #endif
4057 
4058   // for debugging float code generation bugs
4059   if (ForceFloatExceptions) {
4060 #ifndef  _WIN64
4061     static long fp_control_word = 0;
4062     __asm { fstcw fp_control_word }
4063     // see Intel PPro Manual, Vol. 2, p 7-16
4064     const long precision = 0x20;
4065     const long underflow = 0x10;
4066     const long overflow  = 0x08;
4067     const long zero_div  = 0x04;
4068     const long denorm    = 0x02;
4069     const long invalid   = 0x01;
4070     fp_control_word |= invalid;
4071     __asm { fldcw fp_control_word }
4072 #endif
4073   }
4074 
4075   // If stack_commit_size is 0, windows will reserve the default size,
4076   // but only commit a small portion of it.
4077   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4078   size_t default_reserve_size = os::win32::default_stack_size();
4079   size_t actual_reserve_size = stack_commit_size;
4080   if (stack_commit_size < default_reserve_size) {
4081     // If stack_commit_size == 0, we want this too
4082     actual_reserve_size = default_reserve_size;
4083   }
4084 
4085   // Check minimum allowable stack size for thread creation and to initialize
4086   // the java system classes, including StackOverflowError - depends on page
4087   // size.  Add two 4K pages for compiler2 recursion in main thread.
4088   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4089   // class initialization depending on 32 or 64 bit VM.
4090   size_t min_stack_allowed =
4091             (size_t)(JavaThread::stack_guard_zone_size() +
4092                      JavaThread::stack_shadow_zone_size() +
4093                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4094 
4095   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4096 
4097   if (actual_reserve_size < min_stack_allowed) {
4098     tty->print_cr("\nThe Java thread stack size specified is too small. "
4099                   "Specify at least %dk",
4100                   min_stack_allowed / K);
4101     return JNI_ERR;
4102   }
4103 
4104   JavaThread::set_stack_size_at_create(stack_commit_size);
4105 
4106   // Calculate theoretical max. size of Threads to guard gainst artifical
4107   // out-of-memory situations, where all available address-space has been
4108   // reserved by thread stacks.
4109   assert(actual_reserve_size != 0, "Must have a stack");
4110 
4111   // Calculate the thread limit when we should start doing Virtual Memory
4112   // banging. Currently when the threads will have used all but 200Mb of space.
4113   //
4114   // TODO: consider performing a similar calculation for commit size instead
4115   // as reserve size, since on a 64-bit platform we'll run into that more
4116   // often than running out of virtual memory space.  We can use the
4117   // lower value of the two calculations as the os_thread_limit.
4118   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4119   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4120 
4121   // at exit methods are called in the reverse order of their registration.
4122   // there is no limit to the number of functions registered. atexit does
4123   // not set errno.
4124 
4125   if (PerfAllowAtExitRegistration) {
4126     // only register atexit functions if PerfAllowAtExitRegistration is set.
4127     // atexit functions can be delayed until process exit time, which
4128     // can be problematic for embedded VM situations. Embedded VMs should
4129     // call DestroyJavaVM() to assure that VM resources are released.
4130 
4131     // note: perfMemory_exit_helper atexit function may be removed in
4132     // the future if the appropriate cleanup code can be added to the
4133     // VM_Exit VMOperation's doit method.
4134     if (atexit(perfMemory_exit_helper) != 0) {
4135       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4136     }
4137   }
4138 
4139 #ifndef _WIN64
4140   // Print something if NX is enabled (win32 on AMD64)
4141   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4142 #endif
4143 
4144   // initialize thread priority policy
4145   prio_init();
4146 
4147   if (UseNUMA && !ForceNUMA) {
4148     UseNUMA = false; // We don't fully support this yet
4149   }
4150 
4151   if (UseNUMAInterleaving) {
4152     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4153     bool success = numa_interleaving_init();
4154     if (!success) UseNUMAInterleaving = false;
4155   }
4156 
4157   if (initSock() != JNI_OK) {
4158     return JNI_ERR;
4159   }
4160 
4161   SymbolEngine::recalc_search_path();
4162 
4163   // Initialize data for jdk.internal.misc.Signal
4164   if (!ReduceSignalUsage) {
4165     jdk_misc_signal_init();
4166   }
4167 
4168   return JNI_OK;
4169 }
4170 
4171 // Mark the polling page as unreadable
4172 void os::make_polling_page_unreadable(void) {
4173   DWORD old_status;
4174   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4175                       PAGE_NOACCESS, &old_status)) {
4176     fatal("Could not disable polling page");
4177   }
4178 }
4179 
4180 // Mark the polling page as readable
4181 void os::make_polling_page_readable(void) {
4182   DWORD old_status;
4183   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4184                       PAGE_READONLY, &old_status)) {
4185     fatal("Could not enable polling page");
4186   }
4187 }
4188 
4189 // combine the high and low DWORD into a ULONGLONG
4190 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4191   ULONGLONG value = high_word;
4192   value <<= sizeof(high_word) * 8;
4193   value |= low_word;
4194   return value;
4195 }
4196 
4197 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4198 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4199   ::memset((void*)sbuf, 0, sizeof(struct stat));
4200   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4201   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4202                                   file_data.ftLastWriteTime.dwLowDateTime);
4203   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4204                                   file_data.ftCreationTime.dwLowDateTime);
4205   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4206                                   file_data.ftLastAccessTime.dwLowDateTime);
4207   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4208     sbuf->st_mode |= S_IFDIR;
4209   } else {
4210     sbuf->st_mode |= S_IFREG;
4211   }
4212 }
4213 
4214 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
4215 // Creates an UNC path from a single byte path. Return buffer is
4216 // allocated in C heap and needs to be freed by the caller.
4217 // Returns NULL on error.
4218 static wchar_t* create_unc_path(const char* path, errno_t &err) {
4219   wchar_t* wpath = NULL;
4220   size_t converted_chars = 0;
4221   size_t path_len = strlen(path) + 1; // includes the terminating NULL
4222   if (path[0] == '\\' && path[1] == '\\') {
4223     if (path[2] == '?' && path[3] == '\\'){
4224       // if it already has a \\?\ don't do the prefix
4225       wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
4226       if (wpath != NULL) {
4227         err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
4228       } else {
4229         err = ENOMEM;
4230       }
4231     } else {
4232       // only UNC pathname includes double slashes here
4233       wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
4234       if (wpath != NULL) {
4235         ::wcscpy(wpath, L"\\\\?\\UNC\0");
4236         err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
4237       } else {
4238         err = ENOMEM;
4239       }
4240     }
4241   } else {
4242     wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
4243     if (wpath != NULL) {
4244       ::wcscpy(wpath, L"\\\\?\\\0");
4245       err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
4246     } else {
4247       err = ENOMEM;
4248     }
4249   }
4250   return wpath;
4251 }
4252 
4253 static void destroy_unc_path(wchar_t* wpath) {
4254   os::free(wpath);
4255 }
4256 
4257 int os::stat(const char *path, struct stat *sbuf) {
4258   char* pathbuf = (char*)os::strdup(path, mtInternal);
4259   if (pathbuf == NULL) {
4260     errno = ENOMEM;
4261     return -1;
4262   }
4263   os::native_path(pathbuf);
4264   int ret;
4265   WIN32_FILE_ATTRIBUTE_DATA file_data;
4266   // Not using stat() to avoid the problem described in JDK-6539723
4267   if (strlen(path) < MAX_PATH) {
4268     BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
4269     if (!bret) {
4270       errno = ::GetLastError();
4271       ret = -1;
4272     }
4273     else {
4274       file_attribute_data_to_stat(sbuf, file_data);
4275       ret = 0;
4276     }
4277   } else {
4278     errno_t err = ERROR_SUCCESS;
4279     wchar_t* wpath = create_unc_path(pathbuf, err);
4280     if (err != ERROR_SUCCESS) {
4281       if (wpath != NULL) {
4282         destroy_unc_path(wpath);
4283       }
4284       os::free(pathbuf);
4285       errno = err;
4286       return -1;
4287     }
4288     BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
4289     if (!bret) {
4290       errno = ::GetLastError();
4291       ret = -1;
4292     } else {
4293       file_attribute_data_to_stat(sbuf, file_data);
4294       ret = 0;
4295     }
4296     destroy_unc_path(wpath);
4297   }
4298   os::free(pathbuf);
4299   return ret;
4300 }
4301 
4302 static HANDLE create_read_only_file_handle(const char* file) {
4303   if (file == NULL) {
4304     return INVALID_HANDLE_VALUE;
4305   }
4306 
4307   char* nativepath = (char*)os::strdup(file, mtInternal);
4308   if (nativepath == NULL) {
4309     errno = ENOMEM;
4310     return INVALID_HANDLE_VALUE;
4311   }
4312   os::native_path(nativepath);
4313 
4314   size_t len = strlen(nativepath);
4315   HANDLE handle = INVALID_HANDLE_VALUE;
4316 
4317   if (len < MAX_PATH) {
4318     handle = ::CreateFile(nativepath, 0, FILE_SHARE_READ,
4319                           NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4320   } else {
4321     errno_t err = ERROR_SUCCESS;
4322     wchar_t* wfile = create_unc_path(nativepath, err);
4323     if (err != ERROR_SUCCESS) {
4324       if (wfile != NULL) {
4325         destroy_unc_path(wfile);
4326       }
4327       os::free(nativepath);
4328       return INVALID_HANDLE_VALUE;
4329     }
4330     handle = ::CreateFileW(wfile, 0, FILE_SHARE_READ,
4331                            NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4332     destroy_unc_path(wfile);
4333   }
4334 
4335   os::free(nativepath);
4336   return handle;
4337 }
4338 
4339 bool os::same_files(const char* file1, const char* file2) {
4340 
4341   if (file1 == NULL && file2 == NULL) {
4342     return true;
4343   }
4344 
4345   if (file1 == NULL || file2 == NULL) {
4346     return false;
4347   }
4348 
4349   if (strcmp(file1, file2) == 0) {
4350     return true;
4351   }
4352 
4353   HANDLE handle1 = create_read_only_file_handle(file1);
4354   HANDLE handle2 = create_read_only_file_handle(file2);
4355   bool result = false;
4356 
4357   // if we could open both paths...
4358   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4359     BY_HANDLE_FILE_INFORMATION fileInfo1;
4360     BY_HANDLE_FILE_INFORMATION fileInfo2;
4361     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4362       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4363       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4364       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4365         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4366         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4367         result = true;
4368       }
4369     }
4370   }
4371 
4372   //free the handles
4373   if (handle1 != INVALID_HANDLE_VALUE) {
4374     ::CloseHandle(handle1);
4375   }
4376 
4377   if (handle2 != INVALID_HANDLE_VALUE) {
4378     ::CloseHandle(handle2);
4379   }
4380 
4381   return result;
4382 }
4383 
4384 
4385 #define FT2INT64(ft) \
4386   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4387 
4388 
4389 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4390 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4391 // of a thread.
4392 //
4393 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4394 // the fast estimate available on the platform.
4395 
4396 // current_thread_cpu_time() is not optimized for Windows yet
4397 jlong os::current_thread_cpu_time() {
4398   // return user + sys since the cost is the same
4399   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4400 }
4401 
4402 jlong os::thread_cpu_time(Thread* thread) {
4403   // consistent with what current_thread_cpu_time() returns.
4404   return os::thread_cpu_time(thread, true /* user+sys */);
4405 }
4406 
4407 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4408   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4409 }
4410 
4411 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4412   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4413   // If this function changes, os::is_thread_cpu_time_supported() should too
4414   FILETIME CreationTime;
4415   FILETIME ExitTime;
4416   FILETIME KernelTime;
4417   FILETIME UserTime;
4418 
4419   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4420                       &ExitTime, &KernelTime, &UserTime) == 0) {
4421     return -1;
4422   } else if (user_sys_cpu_time) {
4423     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4424   } else {
4425     return FT2INT64(UserTime) * 100;
4426   }
4427 }
4428 
4429 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4430   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4431   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4432   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4433   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4434 }
4435 
4436 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4437   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4438   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4439   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4440   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4441 }
4442 
4443 bool os::is_thread_cpu_time_supported() {
4444   // see os::thread_cpu_time
4445   FILETIME CreationTime;
4446   FILETIME ExitTime;
4447   FILETIME KernelTime;
4448   FILETIME UserTime;
4449 
4450   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4451                       &KernelTime, &UserTime) == 0) {
4452     return false;
4453   } else {
4454     return true;
4455   }
4456 }
4457 
4458 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4459 // It does have primitives (PDH API) to get CPU usage and run queue length.
4460 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4461 // If we wanted to implement loadavg on Windows, we have a few options:
4462 //
4463 // a) Query CPU usage and run queue length and "fake" an answer by
4464 //    returning the CPU usage if it's under 100%, and the run queue
4465 //    length otherwise.  It turns out that querying is pretty slow
4466 //    on Windows, on the order of 200 microseconds on a fast machine.
4467 //    Note that on the Windows the CPU usage value is the % usage
4468 //    since the last time the API was called (and the first call
4469 //    returns 100%), so we'd have to deal with that as well.
4470 //
4471 // b) Sample the "fake" answer using a sampling thread and store
4472 //    the answer in a global variable.  The call to loadavg would
4473 //    just return the value of the global, avoiding the slow query.
4474 //
4475 // c) Sample a better answer using exponential decay to smooth the
4476 //    value.  This is basically the algorithm used by UNIX kernels.
4477 //
4478 // Note that sampling thread starvation could affect both (b) and (c).
4479 int os::loadavg(double loadavg[], int nelem) {
4480   return -1;
4481 }
4482 
4483 
4484 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4485 bool os::dont_yield() {
4486   return DontYieldALot;
4487 }
4488 
4489 // This method is a slightly reworked copy of JDK's sysOpen
4490 // from src/windows/hpi/src/sys_api_md.c
4491 
4492 int os::open(const char *path, int oflag, int mode) {
4493   char* pathbuf = (char*)os::strdup(path, mtInternal);
4494   if (pathbuf == NULL) {
4495     errno = ENOMEM;
4496     return -1;
4497   }
4498   os::native_path(pathbuf);
4499   int ret;
4500   if (strlen(path) < MAX_PATH) {
4501     ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4502   } else {
4503     errno_t err = ERROR_SUCCESS;
4504     wchar_t* wpath = create_unc_path(pathbuf, err);
4505     if (err != ERROR_SUCCESS) {
4506       if (wpath != NULL) {
4507         destroy_unc_path(wpath);
4508       }
4509       os::free(pathbuf);
4510       errno = err;
4511       return -1;
4512     }
4513     ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
4514     if (ret == -1) {
4515       errno = ::GetLastError();
4516     }
4517     destroy_unc_path(wpath);
4518   }
4519   os::free(pathbuf);
4520   return ret;
4521 }
4522 
4523 FILE* os::open(int fd, const char* mode) {
4524   return ::_fdopen(fd, mode);
4525 }
4526 
4527 // Is a (classpath) directory empty?
4528 bool os::dir_is_empty(const char* path) {
4529   char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal);
4530   if (search_path == NULL) {
4531     errno = ENOMEM;
4532     return false;
4533   }
4534   strcpy(search_path, path);
4535   os::native_path(search_path);
4536   // Append "*", or possibly "\\*", to path
4537   if (search_path[1] == ':' &&
4538        (search_path[2] == '\0' ||
4539          (search_path[2] == '\\' && search_path[3] == '\0'))) {
4540     // No '\\' needed for cases like "Z:" or "Z:\"
4541     strcat(search_path, "*");
4542   }
4543   else {
4544     strcat(search_path, "\\*");
4545   }
4546   errno_t err = ERROR_SUCCESS;
4547   wchar_t* wpath = create_unc_path(search_path, err);
4548   if (err != ERROR_SUCCESS) {
4549     if (wpath != NULL) {
4550       destroy_unc_path(wpath);
4551     }
4552     os::free(search_path);
4553     errno = err;
4554     return false;
4555   }
4556   WIN32_FIND_DATAW fd;
4557   HANDLE f = ::FindFirstFileW(wpath, &fd);
4558   destroy_unc_path(wpath);
4559   bool is_empty = true;
4560   if (f != INVALID_HANDLE_VALUE) {
4561     while (is_empty && ::FindNextFileW(f, &fd)) {
4562       // An empty directory contains only the current directory file
4563       // and the previous directory file.
4564       if ((wcscmp(fd.cFileName, L".") != 0) &&
4565           (wcscmp(fd.cFileName, L"..") != 0)) {
4566         is_empty = false;
4567       }
4568     }
4569     FindClose(f);
4570   }
4571   os::free(search_path);
4572   return is_empty;
4573 }
4574 
4575 // create binary file, rewriting existing file if required
4576 int os::create_binary_file(const char* path, bool rewrite_existing) {
4577   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4578   if (!rewrite_existing) {
4579     oflags |= _O_EXCL;
4580   }
4581   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4582 }
4583 
4584 // return current position of file pointer
4585 jlong os::current_file_offset(int fd) {
4586   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4587 }
4588 
4589 // move file pointer to the specified offset
4590 jlong os::seek_to_file_offset(int fd, jlong offset) {
4591   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4592 }
4593 
4594 
4595 jlong os::lseek(int fd, jlong offset, int whence) {
4596   return (jlong) ::_lseeki64(fd, offset, whence);
4597 }
4598 
4599 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4600   OVERLAPPED ov;
4601   DWORD nread;
4602   BOOL result;
4603 
4604   ZeroMemory(&ov, sizeof(ov));
4605   ov.Offset = (DWORD)offset;
4606   ov.OffsetHigh = (DWORD)(offset >> 32);
4607 
4608   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4609 
4610   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4611 
4612   return result ? nread : 0;
4613 }
4614 
4615 
4616 // This method is a slightly reworked copy of JDK's sysNativePath
4617 // from src/windows/hpi/src/path_md.c
4618 
4619 // Convert a pathname to native format.  On win32, this involves forcing all
4620 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4621 // sometimes rejects '/') and removing redundant separators.  The input path is
4622 // assumed to have been converted into the character encoding used by the local
4623 // system.  Because this might be a double-byte encoding, care is taken to
4624 // treat double-byte lead characters correctly.
4625 //
4626 // This procedure modifies the given path in place, as the result is never
4627 // longer than the original.  There is no error return; this operation always
4628 // succeeds.
4629 char * os::native_path(char *path) {
4630   char *src = path, *dst = path, *end = path;
4631   char *colon = NULL;  // If a drive specifier is found, this will
4632                        // point to the colon following the drive letter
4633 
4634   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4635   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4636           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4637 
4638   // Check for leading separators
4639 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4640   while (isfilesep(*src)) {
4641     src++;
4642   }
4643 
4644   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4645     // Remove leading separators if followed by drive specifier.  This
4646     // hack is necessary to support file URLs containing drive
4647     // specifiers (e.g., "file://c:/path").  As a side effect,
4648     // "/c:/path" can be used as an alternative to "c:/path".
4649     *dst++ = *src++;
4650     colon = dst;
4651     *dst++ = ':';
4652     src++;
4653   } else {
4654     src = path;
4655     if (isfilesep(src[0]) && isfilesep(src[1])) {
4656       // UNC pathname: Retain first separator; leave src pointed at
4657       // second separator so that further separators will be collapsed
4658       // into the second separator.  The result will be a pathname
4659       // beginning with "\\\\" followed (most likely) by a host name.
4660       src = dst = path + 1;
4661       path[0] = '\\';     // Force first separator to '\\'
4662     }
4663   }
4664 
4665   end = dst;
4666 
4667   // Remove redundant separators from remainder of path, forcing all
4668   // separators to be '\\' rather than '/'. Also, single byte space
4669   // characters are removed from the end of the path because those
4670   // are not legal ending characters on this operating system.
4671   //
4672   while (*src != '\0') {
4673     if (isfilesep(*src)) {
4674       *dst++ = '\\'; src++;
4675       while (isfilesep(*src)) src++;
4676       if (*src == '\0') {
4677         // Check for trailing separator
4678         end = dst;
4679         if (colon == dst - 2) break;  // "z:\\"
4680         if (dst == path + 1) break;   // "\\"
4681         if (dst == path + 2 && isfilesep(path[0])) {
4682           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4683           // beginning of a UNC pathname.  Even though it is not, by
4684           // itself, a valid UNC pathname, we leave it as is in order
4685           // to be consistent with the path canonicalizer as well
4686           // as the win32 APIs, which treat this case as an invalid
4687           // UNC pathname rather than as an alias for the root
4688           // directory of the current drive.
4689           break;
4690         }
4691         end = --dst;  // Path does not denote a root directory, so
4692                       // remove trailing separator
4693         break;
4694       }
4695       end = dst;
4696     } else {
4697       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4698         *dst++ = *src++;
4699         if (*src) *dst++ = *src++;
4700         end = dst;
4701       } else {  // Copy a single-byte character
4702         char c = *src++;
4703         *dst++ = c;
4704         // Space is not a legal ending character
4705         if (c != ' ') end = dst;
4706       }
4707     }
4708   }
4709 
4710   *end = '\0';
4711 
4712   // For "z:", add "." to work around a bug in the C runtime library
4713   if (colon == dst - 1) {
4714     path[2] = '.';
4715     path[3] = '\0';
4716   }
4717 
4718   return path;
4719 }
4720 
4721 // This code is a copy of JDK's sysSetLength
4722 // from src/windows/hpi/src/sys_api_md.c
4723 
4724 int os::ftruncate(int fd, jlong length) {
4725   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4726   long high = (long)(length >> 32);
4727   DWORD ret;
4728 
4729   if (h == (HANDLE)(-1)) {
4730     return -1;
4731   }
4732 
4733   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4734   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4735     return -1;
4736   }
4737 
4738   if (::SetEndOfFile(h) == FALSE) {
4739     return -1;
4740   }
4741 
4742   return 0;
4743 }
4744 
4745 int os::get_fileno(FILE* fp) {
4746   return _fileno(fp);
4747 }
4748 
4749 // This code is a copy of JDK's sysSync
4750 // from src/windows/hpi/src/sys_api_md.c
4751 // except for the legacy workaround for a bug in Win 98
4752 
4753 int os::fsync(int fd) {
4754   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4755 
4756   if ((!::FlushFileBuffers(handle)) &&
4757       (GetLastError() != ERROR_ACCESS_DENIED)) {
4758     // from winerror.h
4759     return -1;
4760   }
4761   return 0;
4762 }
4763 
4764 static int nonSeekAvailable(int, long *);
4765 static int stdinAvailable(int, long *);
4766 
4767 // This code is a copy of JDK's sysAvailable
4768 // from src/windows/hpi/src/sys_api_md.c
4769 
4770 int os::available(int fd, jlong *bytes) {
4771   jlong cur, end;
4772   struct _stati64 stbuf64;
4773 
4774   if (::_fstati64(fd, &stbuf64) >= 0) {
4775     int mode = stbuf64.st_mode;
4776     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4777       int ret;
4778       long lpbytes;
4779       if (fd == 0) {
4780         ret = stdinAvailable(fd, &lpbytes);
4781       } else {
4782         ret = nonSeekAvailable(fd, &lpbytes);
4783       }
4784       (*bytes) = (jlong)(lpbytes);
4785       return ret;
4786     }
4787     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4788       return FALSE;
4789     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4790       return FALSE;
4791     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4792       return FALSE;
4793     }
4794     *bytes = end - cur;
4795     return TRUE;
4796   } else {
4797     return FALSE;
4798   }
4799 }
4800 
4801 void os::flockfile(FILE* fp) {
4802   _lock_file(fp);
4803 }
4804 
4805 void os::funlockfile(FILE* fp) {
4806   _unlock_file(fp);
4807 }
4808 
4809 // This code is a copy of JDK's nonSeekAvailable
4810 // from src/windows/hpi/src/sys_api_md.c
4811 
4812 static int nonSeekAvailable(int fd, long *pbytes) {
4813   // This is used for available on non-seekable devices
4814   // (like both named and anonymous pipes, such as pipes
4815   //  connected to an exec'd process).
4816   // Standard Input is a special case.
4817   HANDLE han;
4818 
4819   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4820     return FALSE;
4821   }
4822 
4823   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4824     // PeekNamedPipe fails when at EOF.  In that case we
4825     // simply make *pbytes = 0 which is consistent with the
4826     // behavior we get on Solaris when an fd is at EOF.
4827     // The only alternative is to raise an Exception,
4828     // which isn't really warranted.
4829     //
4830     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4831       return FALSE;
4832     }
4833     *pbytes = 0;
4834   }
4835   return TRUE;
4836 }
4837 
4838 #define MAX_INPUT_EVENTS 2000
4839 
4840 // This code is a copy of JDK's stdinAvailable
4841 // from src/windows/hpi/src/sys_api_md.c
4842 
4843 static int stdinAvailable(int fd, long *pbytes) {
4844   HANDLE han;
4845   DWORD numEventsRead = 0;  // Number of events read from buffer
4846   DWORD numEvents = 0;      // Number of events in buffer
4847   DWORD i = 0;              // Loop index
4848   DWORD curLength = 0;      // Position marker
4849   DWORD actualLength = 0;   // Number of bytes readable
4850   BOOL error = FALSE;       // Error holder
4851   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4852 
4853   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4854     return FALSE;
4855   }
4856 
4857   // Construct an array of input records in the console buffer
4858   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4859   if (error == 0) {
4860     return nonSeekAvailable(fd, pbytes);
4861   }
4862 
4863   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4864   if (numEvents > MAX_INPUT_EVENTS) {
4865     numEvents = MAX_INPUT_EVENTS;
4866   }
4867 
4868   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4869   if (lpBuffer == NULL) {
4870     return FALSE;
4871   }
4872 
4873   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4874   if (error == 0) {
4875     os::free(lpBuffer);
4876     return FALSE;
4877   }
4878 
4879   // Examine input records for the number of bytes available
4880   for (i=0; i<numEvents; i++) {
4881     if (lpBuffer[i].EventType == KEY_EVENT) {
4882 
4883       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4884                                       &(lpBuffer[i].Event);
4885       if (keyRecord->bKeyDown == TRUE) {
4886         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4887         curLength++;
4888         if (*keyPressed == '\r') {
4889           actualLength = curLength;
4890         }
4891       }
4892     }
4893   }
4894 
4895   if (lpBuffer != NULL) {
4896     os::free(lpBuffer);
4897   }
4898 
4899   *pbytes = (long) actualLength;
4900   return TRUE;
4901 }
4902 
4903 // Map a block of memory.
4904 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4905                         char *addr, size_t bytes, bool read_only,
4906                         bool allow_exec) {
4907   HANDLE hFile;
4908   char* base;
4909 
4910   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4911                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4912   if (hFile == NULL) {
4913     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4914     return NULL;
4915   }
4916 
4917   if (allow_exec) {
4918     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4919     // unless it comes from a PE image (which the shared archive is not.)
4920     // Even VirtualProtect refuses to give execute access to mapped memory
4921     // that was not previously executable.
4922     //
4923     // Instead, stick the executable region in anonymous memory.  Yuck.
4924     // Penalty is that ~4 pages will not be shareable - in the future
4925     // we might consider DLLizing the shared archive with a proper PE
4926     // header so that mapping executable + sharing is possible.
4927 
4928     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4929                                 PAGE_READWRITE);
4930     if (base == NULL) {
4931       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4932       CloseHandle(hFile);
4933       return NULL;
4934     }
4935 
4936     // Record virtual memory allocation
4937     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4938 
4939     DWORD bytes_read;
4940     OVERLAPPED overlapped;
4941     overlapped.Offset = (DWORD)file_offset;
4942     overlapped.OffsetHigh = 0;
4943     overlapped.hEvent = NULL;
4944     // ReadFile guarantees that if the return value is true, the requested
4945     // number of bytes were read before returning.
4946     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4947     if (!res) {
4948       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4949       release_memory(base, bytes);
4950       CloseHandle(hFile);
4951       return NULL;
4952     }
4953   } else {
4954     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4955                                     NULL /* file_name */);
4956     if (hMap == NULL) {
4957       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4958       CloseHandle(hFile);
4959       return NULL;
4960     }
4961 
4962     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4963     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4964                                   (DWORD)bytes, addr);
4965     if (base == NULL) {
4966       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4967       CloseHandle(hMap);
4968       CloseHandle(hFile);
4969       return NULL;
4970     }
4971 
4972     if (CloseHandle(hMap) == 0) {
4973       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4974       CloseHandle(hFile);
4975       return base;
4976     }
4977   }
4978 
4979   if (allow_exec) {
4980     DWORD old_protect;
4981     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4982     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4983 
4984     if (!res) {
4985       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4986       // Don't consider this a hard error, on IA32 even if the
4987       // VirtualProtect fails, we should still be able to execute
4988       CloseHandle(hFile);
4989       return base;
4990     }
4991   }
4992 
4993   if (CloseHandle(hFile) == 0) {
4994     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4995     return base;
4996   }
4997 
4998   return base;
4999 }
5000 
5001 
5002 // Remap a block of memory.
5003 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5004                           char *addr, size_t bytes, bool read_only,
5005                           bool allow_exec) {
5006   // This OS does not allow existing memory maps to be remapped so we
5007   // would have to unmap the memory before we remap it.
5008 
5009   // Because there is a small window between unmapping memory and mapping
5010   // it in again with different protections, CDS archives are mapped RW
5011   // on windows, so this function isn't called.
5012   ShouldNotReachHere();
5013   return NULL;
5014 }
5015 
5016 
5017 // Unmap a block of memory.
5018 // Returns true=success, otherwise false.
5019 
5020 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5021   MEMORY_BASIC_INFORMATION mem_info;
5022   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5023     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5024     return false;
5025   }
5026 
5027   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5028   // Instead, executable region was allocated using VirtualAlloc(). See
5029   // pd_map_memory() above.
5030   //
5031   // The following flags should match the 'exec_access' flages used for
5032   // VirtualProtect() in pd_map_memory().
5033   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5034       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5035     return pd_release_memory(addr, bytes);
5036   }
5037 
5038   BOOL result = UnmapViewOfFile(addr);
5039   if (result == 0) {
5040     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5041     return false;
5042   }
5043   return true;
5044 }
5045 
5046 void os::pause() {
5047   char filename[MAX_PATH];
5048   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5049     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5050   } else {
5051     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5052   }
5053 
5054   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5055   if (fd != -1) {
5056     struct stat buf;
5057     ::close(fd);
5058     while (::stat(filename, &buf) == 0) {
5059       Sleep(100);
5060     }
5061   } else {
5062     jio_fprintf(stderr,
5063                 "Could not open pause file '%s', continuing immediately.\n", filename);
5064   }
5065 }
5066 
5067 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5068 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5069 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5070 
5071 os::ThreadCrashProtection::ThreadCrashProtection() {
5072 }
5073 
5074 // See the caveats for this class in os_windows.hpp
5075 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5076 // into this method and returns false. If no OS EXCEPTION was raised, returns
5077 // true.
5078 // The callback is supposed to provide the method that should be protected.
5079 //
5080 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5081 
5082   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5083 
5084   _protected_thread = Thread::current_or_null();
5085   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5086 
5087   bool success = true;
5088   __try {
5089     _crash_protection = this;
5090     cb.call();
5091   } __except(EXCEPTION_EXECUTE_HANDLER) {
5092     // only for protection, nothing to do
5093     success = false;
5094   }
5095   _crash_protection = NULL;
5096   _protected_thread = NULL;
5097   Thread::muxRelease(&_crash_mux);
5098   return success;
5099 }
5100 
5101 
5102 class HighResolutionInterval : public CHeapObj<mtThread> {
5103   // The default timer resolution seems to be 10 milliseconds.
5104   // (Where is this written down?)
5105   // If someone wants to sleep for only a fraction of the default,
5106   // then we set the timer resolution down to 1 millisecond for
5107   // the duration of their interval.
5108   // We carefully set the resolution back, since otherwise we
5109   // seem to incur an overhead (3%?) that we don't need.
5110   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5111   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5112   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5113   // timeBeginPeriod() if the relative error exceeded some threshold.
5114   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5115   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5116   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5117   // resolution timers running.
5118  private:
5119   jlong resolution;
5120  public:
5121   HighResolutionInterval(jlong ms) {
5122     resolution = ms % 10L;
5123     if (resolution != 0) {
5124       MMRESULT result = timeBeginPeriod(1L);
5125     }
5126   }
5127   ~HighResolutionInterval() {
5128     if (resolution != 0) {
5129       MMRESULT result = timeEndPeriod(1L);
5130     }
5131     resolution = 0L;
5132   }
5133 };
5134 
5135 // An Event wraps a win32 "CreateEvent" kernel handle.
5136 //
5137 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5138 //
5139 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5140 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5141 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5142 //     In addition, an unpark() operation might fetch the handle field, but the
5143 //     event could recycle between the fetch and the SetEvent() operation.
5144 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5145 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5146 //     on an stale but recycled handle would be harmless, but in practice this might
5147 //     confuse other non-Sun code, so it's not a viable approach.
5148 //
5149 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5150 //     with the Event.  The event handle is never closed.  This could be construed
5151 //     as handle leakage, but only up to the maximum # of threads that have been extant
5152 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5153 //     permit a process to have hundreds of thousands of open handles.
5154 //
5155 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5156 //     and release unused handles.
5157 //
5158 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5159 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5160 //
5161 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5162 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5163 //
5164 // We use (2).
5165 //
5166 // TODO-FIXME:
5167 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5168 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5169 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5170 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5171 //     into a single win32 CreateEvent() handle.
5172 //
5173 // Assumption:
5174 //    Only one parker can exist on an event, which is why we allocate
5175 //    them per-thread. Multiple unparkers can coexist.
5176 //
5177 // _Event transitions in park()
5178 //   -1 => -1 : illegal
5179 //    1 =>  0 : pass - return immediately
5180 //    0 => -1 : block; then set _Event to 0 before returning
5181 //
5182 // _Event transitions in unpark()
5183 //    0 => 1 : just return
5184 //    1 => 1 : just return
5185 //   -1 => either 0 or 1; must signal target thread
5186 //         That is, we can safely transition _Event from -1 to either
5187 //         0 or 1.
5188 //
5189 // _Event serves as a restricted-range semaphore.
5190 //   -1 : thread is blocked, i.e. there is a waiter
5191 //    0 : neutral: thread is running or ready,
5192 //        could have been signaled after a wait started
5193 //    1 : signaled - thread is running or ready
5194 //
5195 // Another possible encoding of _Event would be with
5196 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5197 //
5198 
5199 int os::PlatformEvent::park(jlong Millis) {
5200   // Transitions for _Event:
5201   //   -1 => -1 : illegal
5202   //    1 =>  0 : pass - return immediately
5203   //    0 => -1 : block; then set _Event to 0 before returning
5204 
5205   guarantee(_ParkHandle != NULL , "Invariant");
5206   guarantee(Millis > 0          , "Invariant");
5207 
5208   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5209   // the initial park() operation.
5210   // Consider: use atomic decrement instead of CAS-loop
5211 
5212   int v;
5213   for (;;) {
5214     v = _Event;
5215     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5216   }
5217   guarantee((v == 0) || (v == 1), "invariant");
5218   if (v != 0) return OS_OK;
5219 
5220   // Do this the hard way by blocking ...
5221   // TODO: consider a brief spin here, gated on the success of recent
5222   // spin attempts by this thread.
5223   //
5224   // We decompose long timeouts into series of shorter timed waits.
5225   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5226   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5227   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5228   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5229   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5230   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5231   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5232   // for the already waited time.  This policy does not admit any new outcomes.
5233   // In the future, however, we might want to track the accumulated wait time and
5234   // adjust Millis accordingly if we encounter a spurious wakeup.
5235 
5236   const int MAXTIMEOUT = 0x10000000;
5237   DWORD rv = WAIT_TIMEOUT;
5238   while (_Event < 0 && Millis > 0) {
5239     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5240     if (Millis > MAXTIMEOUT) {
5241       prd = MAXTIMEOUT;
5242     }
5243     HighResolutionInterval *phri = NULL;
5244     if (!ForceTimeHighResolution) {
5245       phri = new HighResolutionInterval(prd);
5246     }
5247     rv = ::WaitForSingleObject(_ParkHandle, prd);
5248     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5249     if (rv == WAIT_TIMEOUT) {
5250       Millis -= prd;
5251     }
5252     delete phri; // if it is NULL, harmless
5253   }
5254   v = _Event;
5255   _Event = 0;
5256   // see comment at end of os::PlatformEvent::park() below:
5257   OrderAccess::fence();
5258   // If we encounter a nearly simultanous timeout expiry and unpark()
5259   // we return OS_OK indicating we awoke via unpark().
5260   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5261   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5262 }
5263 
5264 void os::PlatformEvent::park() {
5265   // Transitions for _Event:
5266   //   -1 => -1 : illegal
5267   //    1 =>  0 : pass - return immediately
5268   //    0 => -1 : block; then set _Event to 0 before returning
5269 
5270   guarantee(_ParkHandle != NULL, "Invariant");
5271   // Invariant: Only the thread associated with the Event/PlatformEvent
5272   // may call park().
5273   // Consider: use atomic decrement instead of CAS-loop
5274   int v;
5275   for (;;) {
5276     v = _Event;
5277     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5278   }
5279   guarantee((v == 0) || (v == 1), "invariant");
5280   if (v != 0) return;
5281 
5282   // Do this the hard way by blocking ...
5283   // TODO: consider a brief spin here, gated on the success of recent
5284   // spin attempts by this thread.
5285   while (_Event < 0) {
5286     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5287     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5288   }
5289 
5290   // Usually we'll find _Event == 0 at this point, but as
5291   // an optional optimization we clear it, just in case can
5292   // multiple unpark() operations drove _Event up to 1.
5293   _Event = 0;
5294   OrderAccess::fence();
5295   guarantee(_Event >= 0, "invariant");
5296 }
5297 
5298 void os::PlatformEvent::unpark() {
5299   guarantee(_ParkHandle != NULL, "Invariant");
5300 
5301   // Transitions for _Event:
5302   //    0 => 1 : just return
5303   //    1 => 1 : just return
5304   //   -1 => either 0 or 1; must signal target thread
5305   //         That is, we can safely transition _Event from -1 to either
5306   //         0 or 1.
5307   // See also: "Semaphores in Plan 9" by Mullender & Cox
5308   //
5309   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5310   // that it will take two back-to-back park() calls for the owning
5311   // thread to block. This has the benefit of forcing a spurious return
5312   // from the first park() call after an unpark() call which will help
5313   // shake out uses of park() and unpark() without condition variables.
5314 
5315   if (Atomic::xchg(1, &_Event) >= 0) return;
5316 
5317   ::SetEvent(_ParkHandle);
5318 }
5319 
5320 
5321 // JSR166
5322 // -------------------------------------------------------
5323 
5324 // The Windows implementation of Park is very straightforward: Basic
5325 // operations on Win32 Events turn out to have the right semantics to
5326 // use them directly. We opportunistically resuse the event inherited
5327 // from Monitor.
5328 
5329 void Parker::park(bool isAbsolute, jlong time) {
5330   guarantee(_ParkEvent != NULL, "invariant");
5331   // First, demultiplex/decode time arguments
5332   if (time < 0) { // don't wait
5333     return;
5334   } else if (time == 0 && !isAbsolute) {
5335     time = INFINITE;
5336   } else if (isAbsolute) {
5337     time -= os::javaTimeMillis(); // convert to relative time
5338     if (time <= 0) {  // already elapsed
5339       return;
5340     }
5341   } else { // relative
5342     time /= 1000000;  // Must coarsen from nanos to millis
5343     if (time == 0) {  // Wait for the minimal time unit if zero
5344       time = 1;
5345     }
5346   }
5347 
5348   JavaThread* thread = JavaThread::current();
5349 
5350   // Don't wait if interrupted or already triggered
5351   if (Thread::is_interrupted(thread, false) ||
5352       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5353     ResetEvent(_ParkEvent);
5354     return;
5355   } else {
5356     ThreadBlockInVM tbivm(thread);
5357     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5358     thread->set_suspend_equivalent();
5359 
5360     WaitForSingleObject(_ParkEvent, time);
5361     ResetEvent(_ParkEvent);
5362 
5363     // If externally suspended while waiting, re-suspend
5364     if (thread->handle_special_suspend_equivalent_condition()) {
5365       thread->java_suspend_self();
5366     }
5367   }
5368 }
5369 
5370 void Parker::unpark() {
5371   guarantee(_ParkEvent != NULL, "invariant");
5372   SetEvent(_ParkEvent);
5373 }
5374 
5375 // Platform Monitor implementation
5376 
5377 // Must already be locked
5378 int os::PlatformMonitor::wait(jlong millis) {
5379   assert(millis >= 0, "negative timeout");
5380   int ret = OS_TIMEOUT;
5381   int status = SleepConditionVariableCS(&_cond, &_mutex,
5382                                         millis == 0 ? INFINITE : millis);
5383   if (status != 0) {
5384     ret = OS_OK;
5385   }
5386   #ifndef PRODUCT
5387   else {
5388     DWORD err = GetLastError();
5389     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5390   }
5391   #endif
5392   return ret;
5393 }
5394 
5395 // Run the specified command in a separate process. Return its exit value,
5396 // or -1 on failure (e.g. can't create a new process).
5397 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5398   STARTUPINFO si;
5399   PROCESS_INFORMATION pi;
5400   DWORD exit_code;
5401 
5402   char * cmd_string;
5403   const char * cmd_prefix = "cmd /C ";
5404   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5405   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5406   if (cmd_string == NULL) {
5407     return -1;
5408   }
5409   cmd_string[0] = '\0';
5410   strcat(cmd_string, cmd_prefix);
5411   strcat(cmd_string, cmd);
5412 
5413   // now replace all '\n' with '&'
5414   char * substring = cmd_string;
5415   while ((substring = strchr(substring, '\n')) != NULL) {
5416     substring[0] = '&';
5417     substring++;
5418   }
5419   memset(&si, 0, sizeof(si));
5420   si.cb = sizeof(si);
5421   memset(&pi, 0, sizeof(pi));
5422   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5423                             cmd_string,    // command line
5424                             NULL,   // process security attribute
5425                             NULL,   // thread security attribute
5426                             TRUE,   // inherits system handles
5427                             0,      // no creation flags
5428                             NULL,   // use parent's environment block
5429                             NULL,   // use parent's starting directory
5430                             &si,    // (in) startup information
5431                             &pi);   // (out) process information
5432 
5433   if (rslt) {
5434     // Wait until child process exits.
5435     WaitForSingleObject(pi.hProcess, INFINITE);
5436 
5437     GetExitCodeProcess(pi.hProcess, &exit_code);
5438 
5439     // Close process and thread handles.
5440     CloseHandle(pi.hProcess);
5441     CloseHandle(pi.hThread);
5442   } else {
5443     exit_code = -1;
5444   }
5445 
5446   FREE_C_HEAP_ARRAY(char, cmd_string);
5447   return (int)exit_code;
5448 }
5449 
5450 bool os::find(address addr, outputStream* st) {
5451   int offset = -1;
5452   bool result = false;
5453   char buf[256];
5454   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5455     st->print(PTR_FORMAT " ", addr);
5456     if (strlen(buf) < sizeof(buf) - 1) {
5457       char* p = strrchr(buf, '\\');
5458       if (p) {
5459         st->print("%s", p + 1);
5460       } else {
5461         st->print("%s", buf);
5462       }
5463     } else {
5464         // The library name is probably truncated. Let's omit the library name.
5465         // See also JDK-8147512.
5466     }
5467     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5468       st->print("::%s + 0x%x", buf, offset);
5469     }
5470     st->cr();
5471     result = true;
5472   }
5473   return result;
5474 }
5475 
5476 static jint initSock() {
5477   WSADATA wsadata;
5478 
5479   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5480     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5481                 ::GetLastError());
5482     return JNI_ERR;
5483   }
5484   return JNI_OK;
5485 }
5486 
5487 struct hostent* os::get_host_by_name(char* name) {
5488   return (struct hostent*)gethostbyname(name);
5489 }
5490 
5491 int os::socket_close(int fd) {
5492   return ::closesocket(fd);
5493 }
5494 
5495 int os::socket(int domain, int type, int protocol) {
5496   return ::socket(domain, type, protocol);
5497 }
5498 
5499 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5500   return ::connect(fd, him, len);
5501 }
5502 
5503 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5504   return ::recv(fd, buf, (int)nBytes, flags);
5505 }
5506 
5507 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5508   return ::send(fd, buf, (int)nBytes, flags);
5509 }
5510 
5511 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5512   return ::send(fd, buf, (int)nBytes, flags);
5513 }
5514 
5515 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5516 #if defined(IA32)
5517   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5518 #elif defined (AMD64)
5519   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5520 #endif
5521 
5522 // returns true if thread could be suspended,
5523 // false otherwise
5524 static bool do_suspend(HANDLE* h) {
5525   if (h != NULL) {
5526     if (SuspendThread(*h) != ~0) {
5527       return true;
5528     }
5529   }
5530   return false;
5531 }
5532 
5533 // resume the thread
5534 // calling resume on an active thread is a no-op
5535 static void do_resume(HANDLE* h) {
5536   if (h != NULL) {
5537     ResumeThread(*h);
5538   }
5539 }
5540 
5541 // retrieve a suspend/resume context capable handle
5542 // from the tid. Caller validates handle return value.
5543 void get_thread_handle_for_extended_context(HANDLE* h,
5544                                             OSThread::thread_id_t tid) {
5545   if (h != NULL) {
5546     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5547   }
5548 }
5549 
5550 // Thread sampling implementation
5551 //
5552 void os::SuspendedThreadTask::internal_do_task() {
5553   CONTEXT    ctxt;
5554   HANDLE     h = NULL;
5555 
5556   // get context capable handle for thread
5557   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5558 
5559   // sanity
5560   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5561     return;
5562   }
5563 
5564   // suspend the thread
5565   if (do_suspend(&h)) {
5566     ctxt.ContextFlags = sampling_context_flags;
5567     // get thread context
5568     GetThreadContext(h, &ctxt);
5569     SuspendedThreadTaskContext context(_thread, &ctxt);
5570     // pass context to Thread Sampling impl
5571     do_task(context);
5572     // resume thread
5573     do_resume(&h);
5574   }
5575 
5576   // close handle
5577   CloseHandle(h);
5578 }
5579 
5580 bool os::start_debugging(char *buf, int buflen) {
5581   int len = (int)strlen(buf);
5582   char *p = &buf[len];
5583 
5584   jio_snprintf(p, buflen-len,
5585              "\n\n"
5586              "Do you want to debug the problem?\n\n"
5587              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5588              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5589              "Otherwise, select 'No' to abort...",
5590              os::current_process_id(), os::current_thread_id());
5591 
5592   bool yes = os::message_box("Unexpected Error", buf);
5593 
5594   if (yes) {
5595     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5596     // exception. If VM is running inside a debugger, the debugger will
5597     // catch the exception. Otherwise, the breakpoint exception will reach
5598     // the default windows exception handler, which can spawn a debugger and
5599     // automatically attach to the dying VM.
5600     os::breakpoint();
5601     yes = false;
5602   }
5603   return yes;
5604 }
5605 
5606 void* os::get_default_process_handle() {
5607   return (void*)GetModuleHandle(NULL);
5608 }
5609 
5610 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5611 // which is used to find statically linked in agents.
5612 // Additionally for windows, takes into account __stdcall names.
5613 // Parameters:
5614 //            sym_name: Symbol in library we are looking for
5615 //            lib_name: Name of library to look in, NULL for shared libs.
5616 //            is_absolute_path == true if lib_name is absolute path to agent
5617 //                                     such as "C:/a/b/L.dll"
5618 //            == false if only the base name of the library is passed in
5619 //               such as "L"
5620 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5621                                     bool is_absolute_path) {
5622   char *agent_entry_name;
5623   size_t len;
5624   size_t name_len;
5625   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5626   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5627   const char *start;
5628 
5629   if (lib_name != NULL) {
5630     len = name_len = strlen(lib_name);
5631     if (is_absolute_path) {
5632       // Need to strip path, prefix and suffix
5633       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5634         lib_name = ++start;
5635       } else {
5636         // Need to check for drive prefix
5637         if ((start = strchr(lib_name, ':')) != NULL) {
5638           lib_name = ++start;
5639         }
5640       }
5641       if (len <= (prefix_len + suffix_len)) {
5642         return NULL;
5643       }
5644       lib_name += prefix_len;
5645       name_len = strlen(lib_name) - suffix_len;
5646     }
5647   }
5648   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5649   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5650   if (agent_entry_name == NULL) {
5651     return NULL;
5652   }
5653   if (lib_name != NULL) {
5654     const char *p = strrchr(sym_name, '@');
5655     if (p != NULL && p != sym_name) {
5656       // sym_name == _Agent_OnLoad@XX
5657       strncpy(agent_entry_name, sym_name, (p - sym_name));
5658       agent_entry_name[(p-sym_name)] = '\0';
5659       // agent_entry_name == _Agent_OnLoad
5660       strcat(agent_entry_name, "_");
5661       strncat(agent_entry_name, lib_name, name_len);
5662       strcat(agent_entry_name, p);
5663       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5664     } else {
5665       strcpy(agent_entry_name, sym_name);
5666       strcat(agent_entry_name, "_");
5667       strncat(agent_entry_name, lib_name, name_len);
5668     }
5669   } else {
5670     strcpy(agent_entry_name, sym_name);
5671   }
5672   return agent_entry_name;
5673 }
5674 
5675 #ifndef PRODUCT
5676 
5677 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5678 // contiguous memory block at a particular address.
5679 // The test first tries to find a good approximate address to allocate at by using the same
5680 // method to allocate some memory at any address. The test then tries to allocate memory in
5681 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5682 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5683 // the previously allocated memory is available for allocation. The only actual failure
5684 // that is reported is when the test tries to allocate at a particular location but gets a
5685 // different valid one. A NULL return value at this point is not considered an error but may
5686 // be legitimate.
5687 void TestReserveMemorySpecial_test() {
5688   if (!UseLargePages) {
5689     return;
5690   }
5691   // save current value of globals
5692   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5693   bool old_use_numa_interleaving = UseNUMAInterleaving;
5694 
5695   // set globals to make sure we hit the correct code path
5696   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5697 
5698   // do an allocation at an address selected by the OS to get a good one.
5699   const size_t large_allocation_size = os::large_page_size() * 4;
5700   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5701   if (result == NULL) {
5702   } else {
5703     os::release_memory_special(result, large_allocation_size);
5704 
5705     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5706     // we managed to get it once.
5707     const size_t expected_allocation_size = os::large_page_size();
5708     char* expected_location = result + os::large_page_size();
5709     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5710     if (actual_location == NULL) {
5711     } else {
5712       // release memory
5713       os::release_memory_special(actual_location, expected_allocation_size);
5714       // only now check, after releasing any memory to avoid any leaks.
5715       assert(actual_location == expected_location,
5716              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5717              expected_location, expected_allocation_size, actual_location);
5718     }
5719   }
5720 
5721   // restore globals
5722   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5723   UseNUMAInterleaving = old_use_numa_interleaving;
5724 }
5725 #endif // PRODUCT
5726 
5727 /*
5728   All the defined signal names for Windows.
5729 
5730   NOTE that not all of these names are accepted by FindSignal!
5731 
5732   For various reasons some of these may be rejected at runtime.
5733 
5734   Here are the names currently accepted by a user of sun.misc.Signal with
5735   1.4.1 (ignoring potential interaction with use of chaining, etc):
5736 
5737      (LIST TBD)
5738 
5739 */
5740 int os::get_signal_number(const char* name) {
5741   static const struct {
5742     const char* name;
5743     int         number;
5744   } siglabels [] =
5745     // derived from version 6.0 VC98/include/signal.h
5746   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5747   "FPE",        SIGFPE,         // floating point exception
5748   "SEGV",       SIGSEGV,        // segment violation
5749   "INT",        SIGINT,         // interrupt
5750   "TERM",       SIGTERM,        // software term signal from kill
5751   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5752   "ILL",        SIGILL};        // illegal instruction
5753   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5754     if (strcmp(name, siglabels[i].name) == 0) {
5755       return siglabels[i].number;
5756     }
5757   }
5758   return -1;
5759 }
5760 
5761 // Fast current thread access
5762 
5763 int os::win32::_thread_ptr_offset = 0;
5764 
5765 static void call_wrapper_dummy() {}
5766 
5767 // We need to call the os_exception_wrapper once so that it sets
5768 // up the offset from FS of the thread pointer.
5769 void os::win32::initialize_thread_ptr_offset() {
5770   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5771                            NULL, NULL, NULL, NULL);
5772 }
5773 
5774 bool os::supports_map_sync() {
5775   return false;
5776 }