1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/sharedRuntime.hpp"
  60 #include "runtime/statSampler.hpp"
  61 #include "runtime/stubRoutines.hpp"
  62 #include "runtime/thread.inline.hpp"
  63 #include "runtime/threadCritical.hpp"
  64 #include "runtime/timer.hpp"
  65 #include "runtime/vm_version.hpp"
  66 #include "services/attachListener.hpp"
  67 #include "services/memTracker.hpp"
  68 #include "services/runtimeService.hpp"
  69 #include "utilities/align.hpp"
  70 #include "utilities/decoder.hpp"
  71 #include "utilities/defaultStream.hpp"
  72 #include "utilities/events.hpp"
  73 #include "utilities/growableArray.hpp"
  74 #include "utilities/macros.hpp"
  75 #include "utilities/vmError.hpp"
  76 #include "symbolengine.hpp"
  77 #include "windbghelp.hpp"
  78 
  79 
  80 #ifdef _DEBUG
  81 #include <crtdbg.h>
  82 #endif
  83 
  84 
  85 #include <windows.h>
  86 #include <sys/types.h>
  87 #include <sys/stat.h>
  88 #include <sys/timeb.h>
  89 #include <objidl.h>
  90 #include <shlobj.h>
  91 
  92 #include <malloc.h>
  93 #include <signal.h>
  94 #include <direct.h>
  95 #include <errno.h>
  96 #include <fcntl.h>
  97 #include <io.h>
  98 #include <process.h>              // For _beginthreadex(), _endthreadex()
  99 #include <imagehlp.h>             // For os::dll_address_to_function_name
 100 // for enumerating dll libraries
 101 #include <vdmdbg.h>
 102 #include <psapi.h>
 103 #include <mmsystem.h>
 104 #include <winsock2.h>
 105 
 106 // for timer info max values which include all bits
 107 #define ALL_64_BITS CONST64(-1)
 108 
 109 // For DLL loading/load error detection
 110 // Values of PE COFF
 111 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 112 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 113 
 114 static HANDLE main_process;
 115 static HANDLE main_thread;
 116 static int    main_thread_id;
 117 
 118 static FILETIME process_creation_time;
 119 static FILETIME process_exit_time;
 120 static FILETIME process_user_time;
 121 static FILETIME process_kernel_time;
 122 
 123 #ifdef _M_AMD64
 124   #define __CPU__ amd64
 125 #else
 126   #define __CPU__ i486
 127 #endif
 128 
 129 #if INCLUDE_AOT
 130 PVOID  topLevelVectoredExceptionHandler = NULL;
 131 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 132 #endif
 133 
 134 // save DLL module handle, used by GetModuleFileName
 135 
 136 HINSTANCE vm_lib_handle;
 137 
 138 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 139   switch (reason) {
 140   case DLL_PROCESS_ATTACH:
 141     vm_lib_handle = hinst;
 142     if (ForceTimeHighResolution) {
 143       timeBeginPeriod(1L);
 144     }
 145     WindowsDbgHelp::pre_initialize();
 146     SymbolEngine::pre_initialize();
 147     break;
 148   case DLL_PROCESS_DETACH:
 149     if (ForceTimeHighResolution) {
 150       timeEndPeriod(1L);
 151     }
 152 #if INCLUDE_AOT
 153     if (topLevelVectoredExceptionHandler != NULL) {
 154       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 155       topLevelVectoredExceptionHandler = NULL;
 156     }
 157 #endif
 158     break;
 159   default:
 160     break;
 161   }
 162   return true;
 163 }
 164 
 165 static inline double fileTimeAsDouble(FILETIME* time) {
 166   const double high  = (double) ((unsigned int) ~0);
 167   const double split = 10000000.0;
 168   double result = (time->dwLowDateTime / split) +
 169                    time->dwHighDateTime * (high/split);
 170   return result;
 171 }
 172 
 173 // Implementation of os
 174 
 175 bool os::unsetenv(const char* name) {
 176   assert(name != NULL, "Null pointer");
 177   return (SetEnvironmentVariable(name, NULL) == TRUE);
 178 }
 179 
 180 // No setuid programs under Windows.
 181 bool os::have_special_privileges() {
 182   return false;
 183 }
 184 
 185 
 186 // This method is  a periodic task to check for misbehaving JNI applications
 187 // under CheckJNI, we can add any periodic checks here.
 188 // For Windows at the moment does nothing
 189 void os::run_periodic_checks() {
 190   return;
 191 }
 192 
 193 // previous UnhandledExceptionFilter, if there is one
 194 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 195 
 196 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 197 
 198 void os::init_system_properties_values() {
 199   // sysclasspath, java_home, dll_dir
 200   {
 201     char *home_path;
 202     char *dll_path;
 203     char *pslash;
 204     const char *bin = "\\bin";
 205     char home_dir[MAX_PATH + 1];
 206     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 207 
 208     if (alt_home_dir != NULL)  {
 209       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 210       home_dir[MAX_PATH] = '\0';
 211     } else {
 212       os::jvm_path(home_dir, sizeof(home_dir));
 213       // Found the full path to jvm.dll.
 214       // Now cut the path to <java_home>/jre if we can.
 215       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 216       pslash = strrchr(home_dir, '\\');
 217       if (pslash != NULL) {
 218         *pslash = '\0';                   // get rid of \{client|server}
 219         pslash = strrchr(home_dir, '\\');
 220         if (pslash != NULL) {
 221           *pslash = '\0';                 // get rid of \bin
 222         }
 223       }
 224     }
 225 
 226     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 227     if (home_path == NULL) {
 228       return;
 229     }
 230     strcpy(home_path, home_dir);
 231     Arguments::set_java_home(home_path);
 232     FREE_C_HEAP_ARRAY(char, home_path);
 233 
 234     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 235                                 mtInternal);
 236     if (dll_path == NULL) {
 237       return;
 238     }
 239     strcpy(dll_path, home_dir);
 240     strcat(dll_path, bin);
 241     Arguments::set_dll_dir(dll_path);
 242     FREE_C_HEAP_ARRAY(char, dll_path);
 243 
 244     if (!set_boot_path('\\', ';')) {
 245       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 246     }
 247   }
 248 
 249 // library_path
 250 #define EXT_DIR "\\lib\\ext"
 251 #define BIN_DIR "\\bin"
 252 #define PACKAGE_DIR "\\Sun\\Java"
 253   {
 254     // Win32 library search order (See the documentation for LoadLibrary):
 255     //
 256     // 1. The directory from which application is loaded.
 257     // 2. The system wide Java Extensions directory (Java only)
 258     // 3. System directory (GetSystemDirectory)
 259     // 4. Windows directory (GetWindowsDirectory)
 260     // 5. The PATH environment variable
 261     // 6. The current directory
 262 
 263     char *library_path;
 264     char tmp[MAX_PATH];
 265     char *path_str = ::getenv("PATH");
 266 
 267     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 268                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 269 
 270     library_path[0] = '\0';
 271 
 272     GetModuleFileName(NULL, tmp, sizeof(tmp));
 273     *(strrchr(tmp, '\\')) = '\0';
 274     strcat(library_path, tmp);
 275 
 276     GetWindowsDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279     strcat(library_path, PACKAGE_DIR BIN_DIR);
 280 
 281     GetSystemDirectory(tmp, sizeof(tmp));
 282     strcat(library_path, ";");
 283     strcat(library_path, tmp);
 284 
 285     GetWindowsDirectory(tmp, sizeof(tmp));
 286     strcat(library_path, ";");
 287     strcat(library_path, tmp);
 288 
 289     if (path_str) {
 290       strcat(library_path, ";");
 291       strcat(library_path, path_str);
 292     }
 293 
 294     strcat(library_path, ";.");
 295 
 296     Arguments::set_library_path(library_path);
 297     FREE_C_HEAP_ARRAY(char, library_path);
 298   }
 299 
 300   // Default extensions directory
 301   {
 302     char path[MAX_PATH];
 303     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 304     GetWindowsDirectory(path, MAX_PATH);
 305     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 306             path, PACKAGE_DIR, EXT_DIR);
 307     Arguments::set_ext_dirs(buf);
 308   }
 309   #undef EXT_DIR
 310   #undef BIN_DIR
 311   #undef PACKAGE_DIR
 312 
 313 #ifndef _WIN64
 314   // set our UnhandledExceptionFilter and save any previous one
 315   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 316 #endif
 317 
 318   // Done
 319   return;
 320 }
 321 
 322 void os::breakpoint() {
 323   DebugBreak();
 324 }
 325 
 326 // Invoked from the BREAKPOINT Macro
 327 extern "C" void breakpoint() {
 328   os::breakpoint();
 329 }
 330 
 331 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 332 // So far, this method is only used by Native Memory Tracking, which is
 333 // only supported on Windows XP or later.
 334 //
 335 int os::get_native_stack(address* stack, int frames, int toSkip) {
 336   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 337   for (int index = captured; index < frames; index ++) {
 338     stack[index] = NULL;
 339   }
 340   return captured;
 341 }
 342 
 343 
 344 // os::current_stack_base()
 345 //
 346 //   Returns the base of the stack, which is the stack's
 347 //   starting address.  This function must be called
 348 //   while running on the stack of the thread being queried.
 349 
 350 address os::current_stack_base() {
 351   MEMORY_BASIC_INFORMATION minfo;
 352   address stack_bottom;
 353   size_t stack_size;
 354 
 355   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 356   stack_bottom =  (address)minfo.AllocationBase;
 357   stack_size = minfo.RegionSize;
 358 
 359   // Add up the sizes of all the regions with the same
 360   // AllocationBase.
 361   while (1) {
 362     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 363     if (stack_bottom == (address)minfo.AllocationBase) {
 364       stack_size += minfo.RegionSize;
 365     } else {
 366       break;
 367     }
 368   }
 369   return stack_bottom + stack_size;
 370 }
 371 
 372 size_t os::current_stack_size() {
 373   size_t sz;
 374   MEMORY_BASIC_INFORMATION minfo;
 375   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 376   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 377   return sz;
 378 }
 379 
 380 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 381   MEMORY_BASIC_INFORMATION minfo;
 382   committed_start = NULL;
 383   committed_size = 0;
 384   address top = start + size;
 385   const address start_addr = start;
 386   while (start < top) {
 387     VirtualQuery(start, &minfo, sizeof(minfo));
 388     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 389       if (committed_start != NULL) {
 390         break;
 391       }
 392     } else {  // committed
 393       if (committed_start == NULL) {
 394         committed_start = start;
 395       }
 396       size_t offset = start - (address)minfo.BaseAddress;
 397       committed_size += minfo.RegionSize - offset;
 398     }
 399     start = (address)minfo.BaseAddress + minfo.RegionSize;
 400   }
 401 
 402   if (committed_start == NULL) {
 403     assert(committed_size == 0, "Sanity");
 404     return false;
 405   } else {
 406     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 407     // current region may go beyond the limit, trim to the limit
 408     committed_size = MIN2(committed_size, size_t(top - committed_start));
 409     return true;
 410   }
 411 }
 412 
 413 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 414   const struct tm* time_struct_ptr = localtime(clock);
 415   if (time_struct_ptr != NULL) {
 416     *res = *time_struct_ptr;
 417     return res;
 418   }
 419   return NULL;
 420 }
 421 
 422 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 423   const struct tm* time_struct_ptr = gmtime(clock);
 424   if (time_struct_ptr != NULL) {
 425     *res = *time_struct_ptr;
 426     return res;
 427   }
 428   return NULL;
 429 }
 430 
 431 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 432 
 433 // Thread start routine for all newly created threads
 434 static unsigned __stdcall thread_native_entry(Thread* thread) {
 435 
 436   thread->record_stack_base_and_size();
 437 
 438   // Try to randomize the cache line index of hot stack frames.
 439   // This helps when threads of the same stack traces evict each other's
 440   // cache lines. The threads can be either from the same JVM instance, or
 441   // from different JVM instances. The benefit is especially true for
 442   // processors with hyperthreading technology.
 443   static int counter = 0;
 444   int pid = os::current_process_id();
 445   _alloca(((pid ^ counter++) & 7) * 128);
 446 
 447   thread->initialize_thread_current();
 448 
 449   OSThread* osthr = thread->osthread();
 450   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 451 
 452   if (UseNUMA) {
 453     int lgrp_id = os::numa_get_group_id();
 454     if (lgrp_id != -1) {
 455       thread->set_lgrp_id(lgrp_id);
 456     }
 457   }
 458 
 459   // Diagnostic code to investigate JDK-6573254
 460   int res = 30115;  // non-java thread
 461   if (thread->is_Java_thread()) {
 462     res = 20115;    // java thread
 463   }
 464 
 465   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 466 
 467   // Install a win32 structured exception handler around every thread created
 468   // by VM, so VM can generate error dump when an exception occurred in non-
 469   // Java thread (e.g. VM thread).
 470   __try {
 471     thread->call_run();
 472   } __except(topLevelExceptionFilter(
 473                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 474     // Nothing to do.
 475   }
 476 
 477   // Note: at this point the thread object may already have deleted itself.
 478   // Do not dereference it from here on out.
 479 
 480   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 481 
 482   // One less thread is executing
 483   // When the VMThread gets here, the main thread may have already exited
 484   // which frees the CodeHeap containing the Atomic::add code
 485   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 486     Atomic::dec(&os::win32::_os_thread_count);
 487   }
 488 
 489   // Thread must not return from exit_process_or_thread(), but if it does,
 490   // let it proceed to exit normally
 491   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 492 }
 493 
 494 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 495                                   int thread_id) {
 496   // Allocate the OSThread object
 497   OSThread* osthread = new OSThread(NULL, NULL);
 498   if (osthread == NULL) return NULL;
 499 
 500   // Initialize the JDK library's interrupt event.
 501   // This should really be done when OSThread is constructed,
 502   // but there is no way for a constructor to report failure to
 503   // allocate the event.
 504   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 505   if (interrupt_event == NULL) {
 506     delete osthread;
 507     return NULL;
 508   }
 509   osthread->set_interrupt_event(interrupt_event);
 510 
 511   // Store info on the Win32 thread into the OSThread
 512   osthread->set_thread_handle(thread_handle);
 513   osthread->set_thread_id(thread_id);
 514 
 515   if (UseNUMA) {
 516     int lgrp_id = os::numa_get_group_id();
 517     if (lgrp_id != -1) {
 518       thread->set_lgrp_id(lgrp_id);
 519     }
 520   }
 521 
 522   // Initial thread state is INITIALIZED, not SUSPENDED
 523   osthread->set_state(INITIALIZED);
 524 
 525   return osthread;
 526 }
 527 
 528 
 529 bool os::create_attached_thread(JavaThread* thread) {
 530 #ifdef ASSERT
 531   thread->verify_not_published();
 532 #endif
 533   HANDLE thread_h;
 534   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 535                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 536     fatal("DuplicateHandle failed\n");
 537   }
 538   OSThread* osthread = create_os_thread(thread, thread_h,
 539                                         (int)current_thread_id());
 540   if (osthread == NULL) {
 541     return false;
 542   }
 543 
 544   // Initial thread state is RUNNABLE
 545   osthread->set_state(RUNNABLE);
 546 
 547   thread->set_osthread(osthread);
 548 
 549   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 550     os::current_thread_id());
 551 
 552   return true;
 553 }
 554 
 555 bool os::create_main_thread(JavaThread* thread) {
 556 #ifdef ASSERT
 557   thread->verify_not_published();
 558 #endif
 559   if (_starting_thread == NULL) {
 560     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 561     if (_starting_thread == NULL) {
 562       return false;
 563     }
 564   }
 565 
 566   // The primordial thread is runnable from the start)
 567   _starting_thread->set_state(RUNNABLE);
 568 
 569   thread->set_osthread(_starting_thread);
 570   return true;
 571 }
 572 
 573 // Helper function to trace _beginthreadex attributes,
 574 //  similar to os::Posix::describe_pthread_attr()
 575 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 576                                                size_t stacksize, unsigned initflag) {
 577   stringStream ss(buf, buflen);
 578   if (stacksize == 0) {
 579     ss.print("stacksize: default, ");
 580   } else {
 581     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 582   }
 583   ss.print("flags: ");
 584   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 585   #define ALL(X) \
 586     X(CREATE_SUSPENDED) \
 587     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 588   ALL(PRINT_FLAG)
 589   #undef ALL
 590   #undef PRINT_FLAG
 591   return buf;
 592 }
 593 
 594 // Allocate and initialize a new OSThread
 595 bool os::create_thread(Thread* thread, ThreadType thr_type,
 596                        size_t stack_size) {
 597   unsigned thread_id;
 598 
 599   // Allocate the OSThread object
 600   OSThread* osthread = new OSThread(NULL, NULL);
 601   if (osthread == NULL) {
 602     return false;
 603   }
 604 
 605   // Initialize the JDK library's interrupt event.
 606   // This should really be done when OSThread is constructed,
 607   // but there is no way for a constructor to report failure to
 608   // allocate the event.
 609   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 610   if (interrupt_event == NULL) {
 611     delete osthread;
 612     return false;
 613   }
 614   osthread->set_interrupt_event(interrupt_event);
 615   osthread->set_interrupted(false);
 616 
 617   thread->set_osthread(osthread);
 618 
 619   if (stack_size == 0) {
 620     switch (thr_type) {
 621     case os::java_thread:
 622       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 623       if (JavaThread::stack_size_at_create() > 0) {
 624         stack_size = JavaThread::stack_size_at_create();
 625       }
 626       break;
 627     case os::compiler_thread:
 628       if (CompilerThreadStackSize > 0) {
 629         stack_size = (size_t)(CompilerThreadStackSize * K);
 630         break;
 631       } // else fall through:
 632         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 633     case os::vm_thread:
 634     case os::pgc_thread:
 635     case os::cgc_thread:
 636     case os::watcher_thread:
 637       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 638       break;
 639     }
 640   }
 641 
 642   // Create the Win32 thread
 643   //
 644   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 645   // does not specify stack size. Instead, it specifies the size of
 646   // initially committed space. The stack size is determined by
 647   // PE header in the executable. If the committed "stack_size" is larger
 648   // than default value in the PE header, the stack is rounded up to the
 649   // nearest multiple of 1MB. For example if the launcher has default
 650   // stack size of 320k, specifying any size less than 320k does not
 651   // affect the actual stack size at all, it only affects the initial
 652   // commitment. On the other hand, specifying 'stack_size' larger than
 653   // default value may cause significant increase in memory usage, because
 654   // not only the stack space will be rounded up to MB, but also the
 655   // entire space is committed upfront.
 656   //
 657   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 658   // for CreateThread() that can treat 'stack_size' as stack size. However we
 659   // are not supposed to call CreateThread() directly according to MSDN
 660   // document because JVM uses C runtime library. The good news is that the
 661   // flag appears to work with _beginthredex() as well.
 662 
 663   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 664   HANDLE thread_handle =
 665     (HANDLE)_beginthreadex(NULL,
 666                            (unsigned)stack_size,
 667                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 668                            thread,
 669                            initflag,
 670                            &thread_id);
 671 
 672   char buf[64];
 673   if (thread_handle != NULL) {
 674     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 675       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 676   } else {
 677     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 678       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 679     // Log some OS information which might explain why creating the thread failed.
 680     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 681     LogStream st(Log(os, thread)::info());
 682     os::print_memory_info(&st);
 683   }
 684 
 685   if (thread_handle == NULL) {
 686     // Need to clean up stuff we've allocated so far
 687     CloseHandle(osthread->interrupt_event());
 688     thread->set_osthread(NULL);
 689     delete osthread;
 690     return false;
 691   }
 692 
 693   Atomic::inc(&os::win32::_os_thread_count);
 694 
 695   // Store info on the Win32 thread into the OSThread
 696   osthread->set_thread_handle(thread_handle);
 697   osthread->set_thread_id(thread_id);
 698 
 699   // Initial thread state is INITIALIZED, not SUSPENDED
 700   osthread->set_state(INITIALIZED);
 701 
 702   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 703   return true;
 704 }
 705 
 706 
 707 // Free Win32 resources related to the OSThread
 708 void os::free_thread(OSThread* osthread) {
 709   assert(osthread != NULL, "osthread not set");
 710 
 711   // We are told to free resources of the argument thread,
 712   // but we can only really operate on the current thread.
 713   assert(Thread::current()->osthread() == osthread,
 714          "os::free_thread but not current thread");
 715 
 716   CloseHandle(osthread->thread_handle());
 717   CloseHandle(osthread->interrupt_event());
 718   delete osthread;
 719 }
 720 
 721 static jlong first_filetime;
 722 static jlong initial_performance_count;
 723 static jlong performance_frequency;
 724 
 725 
 726 jlong as_long(LARGE_INTEGER x) {
 727   jlong result = 0; // initialization to avoid warning
 728   set_high(&result, x.HighPart);
 729   set_low(&result, x.LowPart);
 730   return result;
 731 }
 732 
 733 
 734 jlong os::elapsed_counter() {
 735   LARGE_INTEGER count;
 736   QueryPerformanceCounter(&count);
 737   return as_long(count) - initial_performance_count;
 738 }
 739 
 740 
 741 jlong os::elapsed_frequency() {
 742   return performance_frequency;
 743 }
 744 
 745 
 746 julong os::available_memory() {
 747   return win32::available_memory();
 748 }
 749 
 750 julong os::win32::available_memory() {
 751   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 752   // value if total memory is larger than 4GB
 753   MEMORYSTATUSEX ms;
 754   ms.dwLength = sizeof(ms);
 755   GlobalMemoryStatusEx(&ms);
 756 
 757   return (julong)ms.ullAvailPhys;
 758 }
 759 
 760 julong os::physical_memory() {
 761   return win32::physical_memory();
 762 }
 763 
 764 bool os::has_allocatable_memory_limit(julong* limit) {
 765   MEMORYSTATUSEX ms;
 766   ms.dwLength = sizeof(ms);
 767   GlobalMemoryStatusEx(&ms);
 768 #ifdef _LP64
 769   *limit = (julong)ms.ullAvailVirtual;
 770   return true;
 771 #else
 772   // Limit to 1400m because of the 2gb address space wall
 773   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 774   return true;
 775 #endif
 776 }
 777 
 778 int os::active_processor_count() {
 779   // User has overridden the number of active processors
 780   if (ActiveProcessorCount > 0) {
 781     log_trace(os)("active_processor_count: "
 782                   "active processor count set by user : %d",
 783                   ActiveProcessorCount);
 784     return ActiveProcessorCount;
 785   }
 786 
 787   DWORD_PTR lpProcessAffinityMask = 0;
 788   DWORD_PTR lpSystemAffinityMask = 0;
 789   int proc_count = processor_count();
 790   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 791       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 792     // Nof active processors is number of bits in process affinity mask
 793     int bitcount = 0;
 794     while (lpProcessAffinityMask != 0) {
 795       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 796       bitcount++;
 797     }
 798     return bitcount;
 799   } else {
 800     return proc_count;
 801   }
 802 }
 803 
 804 void os::set_native_thread_name(const char *name) {
 805 
 806   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 807   //
 808   // Note that unfortunately this only works if the process
 809   // is already attached to a debugger; debugger must observe
 810   // the exception below to show the correct name.
 811 
 812   // If there is no debugger attached skip raising the exception
 813   if (!IsDebuggerPresent()) {
 814     return;
 815   }
 816 
 817   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 818   struct {
 819     DWORD dwType;     // must be 0x1000
 820     LPCSTR szName;    // pointer to name (in user addr space)
 821     DWORD dwThreadID; // thread ID (-1=caller thread)
 822     DWORD dwFlags;    // reserved for future use, must be zero
 823   } info;
 824 
 825   info.dwType = 0x1000;
 826   info.szName = name;
 827   info.dwThreadID = -1;
 828   info.dwFlags = 0;
 829 
 830   __try {
 831     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 832   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 833 }
 834 
 835 bool os::distribute_processes(uint length, uint* distribution) {
 836   // Not yet implemented.
 837   return false;
 838 }
 839 
 840 bool os::bind_to_processor(uint processor_id) {
 841   // Not yet implemented.
 842   return false;
 843 }
 844 
 845 void os::win32::initialize_performance_counter() {
 846   LARGE_INTEGER count;
 847   QueryPerformanceFrequency(&count);
 848   performance_frequency = as_long(count);
 849   QueryPerformanceCounter(&count);
 850   initial_performance_count = as_long(count);
 851 }
 852 
 853 
 854 double os::elapsedTime() {
 855   return (double) elapsed_counter() / (double) elapsed_frequency();
 856 }
 857 
 858 
 859 // Windows format:
 860 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 861 // Java format:
 862 //   Java standards require the number of milliseconds since 1/1/1970
 863 
 864 // Constant offset - calculated using offset()
 865 static jlong  _offset   = 116444736000000000;
 866 // Fake time counter for reproducible results when debugging
 867 static jlong  fake_time = 0;
 868 
 869 #ifdef ASSERT
 870 // Just to be safe, recalculate the offset in debug mode
 871 static jlong _calculated_offset = 0;
 872 static int   _has_calculated_offset = 0;
 873 
 874 jlong offset() {
 875   if (_has_calculated_offset) return _calculated_offset;
 876   SYSTEMTIME java_origin;
 877   java_origin.wYear          = 1970;
 878   java_origin.wMonth         = 1;
 879   java_origin.wDayOfWeek     = 0; // ignored
 880   java_origin.wDay           = 1;
 881   java_origin.wHour          = 0;
 882   java_origin.wMinute        = 0;
 883   java_origin.wSecond        = 0;
 884   java_origin.wMilliseconds  = 0;
 885   FILETIME jot;
 886   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 887     fatal("Error = %d\nWindows error", GetLastError());
 888   }
 889   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 890   _has_calculated_offset = 1;
 891   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 892   return _calculated_offset;
 893 }
 894 #else
 895 jlong offset() {
 896   return _offset;
 897 }
 898 #endif
 899 
 900 jlong windows_to_java_time(FILETIME wt) {
 901   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 902   return (a - offset()) / 10000;
 903 }
 904 
 905 // Returns time ticks in (10th of micro seconds)
 906 jlong windows_to_time_ticks(FILETIME wt) {
 907   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 908   return (a - offset());
 909 }
 910 
 911 FILETIME java_to_windows_time(jlong l) {
 912   jlong a = (l * 10000) + offset();
 913   FILETIME result;
 914   result.dwHighDateTime = high(a);
 915   result.dwLowDateTime  = low(a);
 916   return result;
 917 }
 918 
 919 bool os::supports_vtime() { return true; }
 920 bool os::enable_vtime() { return false; }
 921 bool os::vtime_enabled() { return false; }
 922 
 923 double os::elapsedVTime() {
 924   FILETIME created;
 925   FILETIME exited;
 926   FILETIME kernel;
 927   FILETIME user;
 928   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 929     // the resolution of windows_to_java_time() should be sufficient (ms)
 930     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 931   } else {
 932     return elapsedTime();
 933   }
 934 }
 935 
 936 jlong os::javaTimeMillis() {
 937   FILETIME wt;
 938   GetSystemTimeAsFileTime(&wt);
 939   return windows_to_java_time(wt);
 940 }
 941 
 942 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 943   FILETIME wt;
 944   GetSystemTimeAsFileTime(&wt);
 945   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 946   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 947   seconds = secs;
 948   nanos = jlong(ticks - (secs*10000000)) * 100;
 949 }
 950 
 951 jlong os::javaTimeNanos() {
 952     LARGE_INTEGER current_count;
 953     QueryPerformanceCounter(&current_count);
 954     double current = as_long(current_count);
 955     double freq = performance_frequency;
 956     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 957     return time;
 958 }
 959 
 960 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 961   jlong freq = performance_frequency;
 962   if (freq < NANOSECS_PER_SEC) {
 963     // the performance counter is 64 bits and we will
 964     // be multiplying it -- so no wrap in 64 bits
 965     info_ptr->max_value = ALL_64_BITS;
 966   } else if (freq > NANOSECS_PER_SEC) {
 967     // use the max value the counter can reach to
 968     // determine the max value which could be returned
 969     julong max_counter = (julong)ALL_64_BITS;
 970     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 971   } else {
 972     // the performance counter is 64 bits and we will
 973     // be using it directly -- so no wrap in 64 bits
 974     info_ptr->max_value = ALL_64_BITS;
 975   }
 976 
 977   // using a counter, so no skipping
 978   info_ptr->may_skip_backward = false;
 979   info_ptr->may_skip_forward = false;
 980 
 981   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 982 }
 983 
 984 char* os::local_time_string(char *buf, size_t buflen) {
 985   SYSTEMTIME st;
 986   GetLocalTime(&st);
 987   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 988                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 989   return buf;
 990 }
 991 
 992 bool os::getTimesSecs(double* process_real_time,
 993                       double* process_user_time,
 994                       double* process_system_time) {
 995   HANDLE h_process = GetCurrentProcess();
 996   FILETIME create_time, exit_time, kernel_time, user_time;
 997   BOOL result = GetProcessTimes(h_process,
 998                                 &create_time,
 999                                 &exit_time,
1000                                 &kernel_time,
1001                                 &user_time);
1002   if (result != 0) {
1003     FILETIME wt;
1004     GetSystemTimeAsFileTime(&wt);
1005     jlong rtc_millis = windows_to_java_time(wt);
1006     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1007     *process_user_time =
1008       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1009     *process_system_time =
1010       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1011     return true;
1012   } else {
1013     return false;
1014   }
1015 }
1016 
1017 void os::shutdown() {
1018   // allow PerfMemory to attempt cleanup of any persistent resources
1019   perfMemory_exit();
1020 
1021   // flush buffered output, finish log files
1022   ostream_abort();
1023 
1024   // Check for abort hook
1025   abort_hook_t abort_hook = Arguments::abort_hook();
1026   if (abort_hook != NULL) {
1027     abort_hook();
1028   }
1029 }
1030 
1031 
1032 static HANDLE dumpFile = NULL;
1033 
1034 // Check if dump file can be created.
1035 void os::check_dump_limit(char* buffer, size_t buffsz) {
1036   bool status = true;
1037   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1038     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1039     status = false;
1040   }
1041 
1042 #ifndef ASSERT
1043   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1044     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1045     status = false;
1046   }
1047 #endif
1048 
1049   if (status) {
1050     const char* cwd = get_current_directory(NULL, 0);
1051     int pid = current_process_id();
1052     if (cwd != NULL) {
1053       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1054     } else {
1055       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1056     }
1057 
1058     if (dumpFile == NULL &&
1059        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1060                  == INVALID_HANDLE_VALUE) {
1061       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1062       status = false;
1063     }
1064   }
1065   VMError::record_coredump_status(buffer, status);
1066 }
1067 
1068 void os::abort(bool dump_core, void* siginfo, const void* context) {
1069   EXCEPTION_POINTERS ep;
1070   MINIDUMP_EXCEPTION_INFORMATION mei;
1071   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1072 
1073   HANDLE hProcess = GetCurrentProcess();
1074   DWORD processId = GetCurrentProcessId();
1075   MINIDUMP_TYPE dumpType;
1076 
1077   shutdown();
1078   if (!dump_core || dumpFile == NULL) {
1079     if (dumpFile != NULL) {
1080       CloseHandle(dumpFile);
1081     }
1082     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1083   }
1084 
1085   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1086     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1087 
1088   if (siginfo != NULL && context != NULL) {
1089     ep.ContextRecord = (PCONTEXT) context;
1090     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1091 
1092     mei.ThreadId = GetCurrentThreadId();
1093     mei.ExceptionPointers = &ep;
1094     pmei = &mei;
1095   } else {
1096     pmei = NULL;
1097   }
1098 
1099   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1100   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1101   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1102       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1103     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1104   }
1105   CloseHandle(dumpFile);
1106   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1107 }
1108 
1109 // Die immediately, no exit hook, no abort hook, no cleanup.
1110 void os::die() {
1111   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1112 }
1113 
1114 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1115 //  * dirent_md.c       1.15 00/02/02
1116 //
1117 // The declarations for DIR and struct dirent are in jvm_win32.h.
1118 
1119 // Caller must have already run dirname through JVM_NativePath, which removes
1120 // duplicate slashes and converts all instances of '/' into '\\'.
1121 
1122 DIR * os::opendir(const char *dirname) {
1123   assert(dirname != NULL, "just checking");   // hotspot change
1124   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1125   DWORD fattr;                                // hotspot change
1126   char alt_dirname[4] = { 0, 0, 0, 0 };
1127 
1128   if (dirp == 0) {
1129     errno = ENOMEM;
1130     return 0;
1131   }
1132 
1133   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1134   // as a directory in FindFirstFile().  We detect this case here and
1135   // prepend the current drive name.
1136   //
1137   if (dirname[1] == '\0' && dirname[0] == '\\') {
1138     alt_dirname[0] = _getdrive() + 'A' - 1;
1139     alt_dirname[1] = ':';
1140     alt_dirname[2] = '\\';
1141     alt_dirname[3] = '\0';
1142     dirname = alt_dirname;
1143   }
1144 
1145   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1146   if (dirp->path == 0) {
1147     free(dirp);
1148     errno = ENOMEM;
1149     return 0;
1150   }
1151   strcpy(dirp->path, dirname);
1152 
1153   fattr = GetFileAttributes(dirp->path);
1154   if (fattr == 0xffffffff) {
1155     free(dirp->path);
1156     free(dirp);
1157     errno = ENOENT;
1158     return 0;
1159   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1160     free(dirp->path);
1161     free(dirp);
1162     errno = ENOTDIR;
1163     return 0;
1164   }
1165 
1166   // Append "*.*", or possibly "\\*.*", to path
1167   if (dirp->path[1] == ':' &&
1168       (dirp->path[2] == '\0' ||
1169       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1170     // No '\\' needed for cases like "Z:" or "Z:\"
1171     strcat(dirp->path, "*.*");
1172   } else {
1173     strcat(dirp->path, "\\*.*");
1174   }
1175 
1176   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1177   if (dirp->handle == INVALID_HANDLE_VALUE) {
1178     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1179       free(dirp->path);
1180       free(dirp);
1181       errno = EACCES;
1182       return 0;
1183     }
1184   }
1185   return dirp;
1186 }
1187 
1188 struct dirent * os::readdir(DIR *dirp) {
1189   assert(dirp != NULL, "just checking");      // hotspot change
1190   if (dirp->handle == INVALID_HANDLE_VALUE) {
1191     return NULL;
1192   }
1193 
1194   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1195 
1196   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1197     if (GetLastError() == ERROR_INVALID_HANDLE) {
1198       errno = EBADF;
1199       return NULL;
1200     }
1201     FindClose(dirp->handle);
1202     dirp->handle = INVALID_HANDLE_VALUE;
1203   }
1204 
1205   return &dirp->dirent;
1206 }
1207 
1208 int os::closedir(DIR *dirp) {
1209   assert(dirp != NULL, "just checking");      // hotspot change
1210   if (dirp->handle != INVALID_HANDLE_VALUE) {
1211     if (!FindClose(dirp->handle)) {
1212       errno = EBADF;
1213       return -1;
1214     }
1215     dirp->handle = INVALID_HANDLE_VALUE;
1216   }
1217   free(dirp->path);
1218   free(dirp);
1219   return 0;
1220 }
1221 
1222 // This must be hard coded because it's the system's temporary
1223 // directory not the java application's temp directory, ala java.io.tmpdir.
1224 const char* os::get_temp_directory() {
1225   static char path_buf[MAX_PATH];
1226   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1227     return path_buf;
1228   } else {
1229     path_buf[0] = '\0';
1230     return path_buf;
1231   }
1232 }
1233 
1234 // Needs to be in os specific directory because windows requires another
1235 // header file <direct.h>
1236 const char* os::get_current_directory(char *buf, size_t buflen) {
1237   int n = static_cast<int>(buflen);
1238   if (buflen > INT_MAX)  n = INT_MAX;
1239   return _getcwd(buf, n);
1240 }
1241 
1242 //-----------------------------------------------------------
1243 // Helper functions for fatal error handler
1244 #ifdef _WIN64
1245 // Helper routine which returns true if address in
1246 // within the NTDLL address space.
1247 //
1248 static bool _addr_in_ntdll(address addr) {
1249   HMODULE hmod;
1250   MODULEINFO minfo;
1251 
1252   hmod = GetModuleHandle("NTDLL.DLL");
1253   if (hmod == NULL) return false;
1254   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1255                                           &minfo, sizeof(MODULEINFO))) {
1256     return false;
1257   }
1258 
1259   if ((addr >= minfo.lpBaseOfDll) &&
1260       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1261     return true;
1262   } else {
1263     return false;
1264   }
1265 }
1266 #endif
1267 
1268 struct _modinfo {
1269   address addr;
1270   char*   full_path;   // point to a char buffer
1271   int     buflen;      // size of the buffer
1272   address base_addr;
1273 };
1274 
1275 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1276                                   address top_address, void * param) {
1277   struct _modinfo *pmod = (struct _modinfo *)param;
1278   if (!pmod) return -1;
1279 
1280   if (base_addr   <= pmod->addr &&
1281       top_address > pmod->addr) {
1282     // if a buffer is provided, copy path name to the buffer
1283     if (pmod->full_path) {
1284       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1285     }
1286     pmod->base_addr = base_addr;
1287     return 1;
1288   }
1289   return 0;
1290 }
1291 
1292 bool os::dll_address_to_library_name(address addr, char* buf,
1293                                      int buflen, int* offset) {
1294   // buf is not optional, but offset is optional
1295   assert(buf != NULL, "sanity check");
1296 
1297 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1298 //       return the full path to the DLL file, sometimes it returns path
1299 //       to the corresponding PDB file (debug info); sometimes it only
1300 //       returns partial path, which makes life painful.
1301 
1302   struct _modinfo mi;
1303   mi.addr      = addr;
1304   mi.full_path = buf;
1305   mi.buflen    = buflen;
1306   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1307     // buf already contains path name
1308     if (offset) *offset = addr - mi.base_addr;
1309     return true;
1310   }
1311 
1312   buf[0] = '\0';
1313   if (offset) *offset = -1;
1314   return false;
1315 }
1316 
1317 bool os::dll_address_to_function_name(address addr, char *buf,
1318                                       int buflen, int *offset,
1319                                       bool demangle) {
1320   // buf is not optional, but offset is optional
1321   assert(buf != NULL, "sanity check");
1322 
1323   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1324     return true;
1325   }
1326   if (offset != NULL)  *offset  = -1;
1327   buf[0] = '\0';
1328   return false;
1329 }
1330 
1331 // save the start and end address of jvm.dll into param[0] and param[1]
1332 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1333                            address top_address, void * param) {
1334   if (!param) return -1;
1335 
1336   if (base_addr   <= (address)_locate_jvm_dll &&
1337       top_address > (address)_locate_jvm_dll) {
1338     ((address*)param)[0] = base_addr;
1339     ((address*)param)[1] = top_address;
1340     return 1;
1341   }
1342   return 0;
1343 }
1344 
1345 address vm_lib_location[2];    // start and end address of jvm.dll
1346 
1347 // check if addr is inside jvm.dll
1348 bool os::address_is_in_vm(address addr) {
1349   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1350     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1351       assert(false, "Can't find jvm module.");
1352       return false;
1353     }
1354   }
1355 
1356   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1357 }
1358 
1359 // print module info; param is outputStream*
1360 static int _print_module(const char* fname, address base_address,
1361                          address top_address, void* param) {
1362   if (!param) return -1;
1363 
1364   outputStream* st = (outputStream*)param;
1365 
1366   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1367   return 0;
1368 }
1369 
1370 // Loads .dll/.so and
1371 // in case of error it checks if .dll/.so was built for the
1372 // same architecture as Hotspot is running on
1373 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1374   log_info(os)("attempting shared library load of %s", name);
1375 
1376   void * result = LoadLibrary(name);
1377   if (result != NULL) {
1378     Events::log(NULL, "Loaded shared library %s", name);
1379     // Recalculate pdb search path if a DLL was loaded successfully.
1380     SymbolEngine::recalc_search_path();
1381     log_info(os)("shared library load of %s was successful", name);
1382     return result;
1383   }
1384   DWORD errcode = GetLastError();
1385   // Read system error message into ebuf
1386   // It may or may not be overwritten below (in the for loop and just above)
1387   lasterror(ebuf, (size_t) ebuflen);
1388   ebuf[ebuflen - 1] = '\0';
1389   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1390   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1391 
1392   if (errcode == ERROR_MOD_NOT_FOUND) {
1393     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1394     ebuf[ebuflen - 1] = '\0';
1395     return NULL;
1396   }
1397 
1398   // Parsing dll below
1399   // If we can read dll-info and find that dll was built
1400   // for an architecture other than Hotspot is running in
1401   // - then print to buffer "DLL was built for a different architecture"
1402   // else call os::lasterror to obtain system error message
1403   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1404   if (fd < 0) {
1405     return NULL;
1406   }
1407 
1408   uint32_t signature_offset;
1409   uint16_t lib_arch = 0;
1410   bool failed_to_get_lib_arch =
1411     ( // Go to position 3c in the dll
1412      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1413      ||
1414      // Read location of signature
1415      (sizeof(signature_offset) !=
1416      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1417      ||
1418      // Go to COFF File Header in dll
1419      // that is located after "signature" (4 bytes long)
1420      (os::seek_to_file_offset(fd,
1421      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1422      ||
1423      // Read field that contains code of architecture
1424      // that dll was built for
1425      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1426     );
1427 
1428   ::close(fd);
1429   if (failed_to_get_lib_arch) {
1430     // file i/o error - report os::lasterror(...) msg
1431     return NULL;
1432   }
1433 
1434   typedef struct {
1435     uint16_t arch_code;
1436     char* arch_name;
1437   } arch_t;
1438 
1439   static const arch_t arch_array[] = {
1440     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1441     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1442   };
1443 #if (defined _M_AMD64)
1444   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1445 #elif (defined _M_IX86)
1446   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1447 #else
1448   #error Method os::dll_load requires that one of following \
1449          is defined :_M_AMD64 or _M_IX86
1450 #endif
1451 
1452 
1453   // Obtain a string for printf operation
1454   // lib_arch_str shall contain string what platform this .dll was built for
1455   // running_arch_str shall string contain what platform Hotspot was built for
1456   char *running_arch_str = NULL, *lib_arch_str = NULL;
1457   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1458     if (lib_arch == arch_array[i].arch_code) {
1459       lib_arch_str = arch_array[i].arch_name;
1460     }
1461     if (running_arch == arch_array[i].arch_code) {
1462       running_arch_str = arch_array[i].arch_name;
1463     }
1464   }
1465 
1466   assert(running_arch_str,
1467          "Didn't find running architecture code in arch_array");
1468 
1469   // If the architecture is right
1470   // but some other error took place - report os::lasterror(...) msg
1471   if (lib_arch == running_arch) {
1472     return NULL;
1473   }
1474 
1475   if (lib_arch_str != NULL) {
1476     ::_snprintf(ebuf, ebuflen - 1,
1477                 "Can't load %s-bit .dll on a %s-bit platform",
1478                 lib_arch_str, running_arch_str);
1479   } else {
1480     // don't know what architecture this dll was build for
1481     ::_snprintf(ebuf, ebuflen - 1,
1482                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1483                 lib_arch, running_arch_str);
1484   }
1485 
1486   return NULL;
1487 }
1488 
1489 void os::print_dll_info(outputStream *st) {
1490   st->print_cr("Dynamic libraries:");
1491   get_loaded_modules_info(_print_module, (void *)st);
1492 }
1493 
1494 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1495   HANDLE   hProcess;
1496 
1497 # define MAX_NUM_MODULES 128
1498   HMODULE     modules[MAX_NUM_MODULES];
1499   static char filename[MAX_PATH];
1500   int         result = 0;
1501 
1502   int pid = os::current_process_id();
1503   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1504                          FALSE, pid);
1505   if (hProcess == NULL) return 0;
1506 
1507   DWORD size_needed;
1508   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1509     CloseHandle(hProcess);
1510     return 0;
1511   }
1512 
1513   // number of modules that are currently loaded
1514   int num_modules = size_needed / sizeof(HMODULE);
1515 
1516   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1517     // Get Full pathname:
1518     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1519       filename[0] = '\0';
1520     }
1521 
1522     MODULEINFO modinfo;
1523     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1524       modinfo.lpBaseOfDll = NULL;
1525       modinfo.SizeOfImage = 0;
1526     }
1527 
1528     // Invoke callback function
1529     result = callback(filename, (address)modinfo.lpBaseOfDll,
1530                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1531     if (result) break;
1532   }
1533 
1534   CloseHandle(hProcess);
1535   return result;
1536 }
1537 
1538 bool os::get_host_name(char* buf, size_t buflen) {
1539   DWORD size = (DWORD)buflen;
1540   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1541 }
1542 
1543 void os::get_summary_os_info(char* buf, size_t buflen) {
1544   stringStream sst(buf, buflen);
1545   os::win32::print_windows_version(&sst);
1546   // chop off newline character
1547   char* nl = strchr(buf, '\n');
1548   if (nl != NULL) *nl = '\0';
1549 }
1550 
1551 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1552 #if _MSC_VER >= 1900
1553   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1554   int result = ::vsnprintf(buf, len, fmt, args);
1555   // If an encoding error occurred (result < 0) then it's not clear
1556   // whether the buffer is NUL terminated, so ensure it is.
1557   if ((result < 0) && (len > 0)) {
1558     buf[len - 1] = '\0';
1559   }
1560   return result;
1561 #else
1562   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1563   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1564   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1565   // go straight to _vscprintf.  The output is going to be truncated in
1566   // that case, except in the unusual case of empty output.  More
1567   // importantly, the documentation for various versions of Visual Studio
1568   // are inconsistent about the behavior of _vsnprintf when len == 0,
1569   // including it possibly being an error.
1570   int result = -1;
1571   if (len > 0) {
1572     result = _vsnprintf(buf, len, fmt, args);
1573     // If output (including NUL terminator) is truncated, the buffer
1574     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1575     if ((result < 0) || ((size_t)result >= len)) {
1576       buf[len - 1] = '\0';
1577     }
1578   }
1579   if (result < 0) {
1580     result = _vscprintf(fmt, args);
1581   }
1582   return result;
1583 #endif // _MSC_VER dispatch
1584 }
1585 
1586 static inline time_t get_mtime(const char* filename) {
1587   struct stat st;
1588   int ret = os::stat(filename, &st);
1589   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1590   return st.st_mtime;
1591 }
1592 
1593 int os::compare_file_modified_times(const char* file1, const char* file2) {
1594   time_t t1 = get_mtime(file1);
1595   time_t t2 = get_mtime(file2);
1596   return t1 - t2;
1597 }
1598 
1599 void os::print_os_info_brief(outputStream* st) {
1600   os::print_os_info(st);
1601 }
1602 
1603 void os::print_os_info(outputStream* st) {
1604 #ifdef ASSERT
1605   char buffer[1024];
1606   st->print("HostName: ");
1607   if (get_host_name(buffer, sizeof(buffer))) {
1608     st->print("%s ", buffer);
1609   } else {
1610     st->print("N/A ");
1611   }
1612 #endif
1613   st->print("OS:");
1614   os::win32::print_windows_version(st);
1615 
1616 #ifdef _LP64
1617   VM_Version::print_platform_virtualization_info(st);
1618 #endif
1619 }
1620 
1621 void os::win32::print_windows_version(outputStream* st) {
1622   OSVERSIONINFOEX osvi;
1623   VS_FIXEDFILEINFO *file_info;
1624   TCHAR kernel32_path[MAX_PATH];
1625   UINT len, ret;
1626 
1627   // Use the GetVersionEx information to see if we're on a server or
1628   // workstation edition of Windows. Starting with Windows 8.1 we can't
1629   // trust the OS version information returned by this API.
1630   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1631   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1632   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1633     st->print_cr("Call to GetVersionEx failed");
1634     return;
1635   }
1636   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1637 
1638   // Get the full path to \Windows\System32\kernel32.dll and use that for
1639   // determining what version of Windows we're running on.
1640   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1641   ret = GetSystemDirectory(kernel32_path, len);
1642   if (ret == 0 || ret > len) {
1643     st->print_cr("Call to GetSystemDirectory failed");
1644     return;
1645   }
1646   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1647 
1648   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1649   if (version_size == 0) {
1650     st->print_cr("Call to GetFileVersionInfoSize failed");
1651     return;
1652   }
1653 
1654   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1655   if (version_info == NULL) {
1656     st->print_cr("Failed to allocate version_info");
1657     return;
1658   }
1659 
1660   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1661     os::free(version_info);
1662     st->print_cr("Call to GetFileVersionInfo failed");
1663     return;
1664   }
1665 
1666   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1667     os::free(version_info);
1668     st->print_cr("Call to VerQueryValue failed");
1669     return;
1670   }
1671 
1672   int major_version = HIWORD(file_info->dwProductVersionMS);
1673   int minor_version = LOWORD(file_info->dwProductVersionMS);
1674   int build_number = HIWORD(file_info->dwProductVersionLS);
1675   int build_minor = LOWORD(file_info->dwProductVersionLS);
1676   int os_vers = major_version * 1000 + minor_version;
1677   os::free(version_info);
1678 
1679   st->print(" Windows ");
1680   switch (os_vers) {
1681 
1682   case 6000:
1683     if (is_workstation) {
1684       st->print("Vista");
1685     } else {
1686       st->print("Server 2008");
1687     }
1688     break;
1689 
1690   case 6001:
1691     if (is_workstation) {
1692       st->print("7");
1693     } else {
1694       st->print("Server 2008 R2");
1695     }
1696     break;
1697 
1698   case 6002:
1699     if (is_workstation) {
1700       st->print("8");
1701     } else {
1702       st->print("Server 2012");
1703     }
1704     break;
1705 
1706   case 6003:
1707     if (is_workstation) {
1708       st->print("8.1");
1709     } else {
1710       st->print("Server 2012 R2");
1711     }
1712     break;
1713 
1714   case 10000:
1715     if (is_workstation) {
1716       st->print("10");
1717     } else {
1718       // distinguish Windows Server 2016 and 2019 by build number
1719       // Windows server 2019 GA 10/2018 build number is 17763
1720       if (build_number > 17762) {
1721         st->print("Server 2019");
1722       } else {
1723         st->print("Server 2016");
1724       }
1725     }
1726     break;
1727 
1728   default:
1729     // Unrecognized windows, print out its major and minor versions
1730     st->print("%d.%d", major_version, minor_version);
1731     break;
1732   }
1733 
1734   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1735   // find out whether we are running on 64 bit processor or not
1736   SYSTEM_INFO si;
1737   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1738   GetNativeSystemInfo(&si);
1739   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1740     st->print(" , 64 bit");
1741   }
1742 
1743   st->print(" Build %d", build_number);
1744   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1745   st->cr();
1746 }
1747 
1748 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1749   // Nothing to do for now.
1750 }
1751 
1752 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1753   HKEY key;
1754   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1755                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1756   if (status == ERROR_SUCCESS) {
1757     DWORD size = (DWORD)buflen;
1758     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1759     if (status != ERROR_SUCCESS) {
1760         strncpy(buf, "## __CPU__", buflen);
1761     }
1762     RegCloseKey(key);
1763   } else {
1764     // Put generic cpu info to return
1765     strncpy(buf, "## __CPU__", buflen);
1766   }
1767 }
1768 
1769 void os::print_memory_info(outputStream* st) {
1770   st->print("Memory:");
1771   st->print(" %dk page", os::vm_page_size()>>10);
1772 
1773   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1774   // value if total memory is larger than 4GB
1775   MEMORYSTATUSEX ms;
1776   ms.dwLength = sizeof(ms);
1777   int r1 = GlobalMemoryStatusEx(&ms);
1778 
1779   if (r1 != 0) {
1780     st->print(", system-wide physical " INT64_FORMAT "M ",
1781              (int64_t) ms.ullTotalPhys >> 20);
1782     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1783 
1784     st->print("TotalPageFile size " INT64_FORMAT "M ",
1785              (int64_t) ms.ullTotalPageFile >> 20);
1786     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1787              (int64_t) ms.ullAvailPageFile >> 20);
1788 
1789     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1790 #if defined(_M_IX86)
1791     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1792              (int64_t) ms.ullTotalVirtual >> 20);
1793     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1794 #endif
1795   } else {
1796     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1797   }
1798 
1799   // extended memory statistics for a process
1800   PROCESS_MEMORY_COUNTERS_EX pmex;
1801   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1802   pmex.cb = sizeof(pmex);
1803   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1804 
1805   if (r2 != 0) {
1806     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1807              (int64_t) pmex.WorkingSetSize >> 20);
1808     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1809 
1810     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1811              (int64_t) pmex.PrivateUsage >> 20);
1812     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1813   } else {
1814     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1815   }
1816 
1817   st->cr();
1818 }
1819 
1820 bool os::signal_sent_by_kill(const void* siginfo) {
1821   // TODO: Is this possible?
1822   return false;
1823 }
1824 
1825 void os::print_siginfo(outputStream *st, const void* siginfo) {
1826   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1827   st->print("siginfo:");
1828 
1829   char tmp[64];
1830   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1831     strcpy(tmp, "EXCEPTION_??");
1832   }
1833   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1834 
1835   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1836        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1837        er->NumberParameters >= 2) {
1838     switch (er->ExceptionInformation[0]) {
1839     case 0: st->print(", reading address"); break;
1840     case 1: st->print(", writing address"); break;
1841     case 8: st->print(", data execution prevention violation at address"); break;
1842     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1843                        er->ExceptionInformation[0]);
1844     }
1845     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1846   } else {
1847     int num = er->NumberParameters;
1848     if (num > 0) {
1849       st->print(", ExceptionInformation=");
1850       for (int i = 0; i < num; i++) {
1851         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1852       }
1853     }
1854   }
1855   st->cr();
1856 }
1857 
1858 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1859   // TODO: Can we kill thread?
1860   return false;
1861 }
1862 
1863 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1864   // do nothing
1865 }
1866 
1867 static char saved_jvm_path[MAX_PATH] = {0};
1868 
1869 // Find the full path to the current module, jvm.dll
1870 void os::jvm_path(char *buf, jint buflen) {
1871   // Error checking.
1872   if (buflen < MAX_PATH) {
1873     assert(false, "must use a large-enough buffer");
1874     buf[0] = '\0';
1875     return;
1876   }
1877   // Lazy resolve the path to current module.
1878   if (saved_jvm_path[0] != 0) {
1879     strcpy(buf, saved_jvm_path);
1880     return;
1881   }
1882 
1883   buf[0] = '\0';
1884   if (Arguments::sun_java_launcher_is_altjvm()) {
1885     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1886     // for a JAVA_HOME environment variable and fix up the path so it
1887     // looks like jvm.dll is installed there (append a fake suffix
1888     // hotspot/jvm.dll).
1889     char* java_home_var = ::getenv("JAVA_HOME");
1890     if (java_home_var != NULL && java_home_var[0] != 0 &&
1891         strlen(java_home_var) < (size_t)buflen) {
1892       strncpy(buf, java_home_var, buflen);
1893 
1894       // determine if this is a legacy image or modules image
1895       // modules image doesn't have "jre" subdirectory
1896       size_t len = strlen(buf);
1897       char* jrebin_p = buf + len;
1898       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1899       if (0 != _access(buf, 0)) {
1900         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1901       }
1902       len = strlen(buf);
1903       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1904     }
1905   }
1906 
1907   if (buf[0] == '\0') {
1908     GetModuleFileName(vm_lib_handle, buf, buflen);
1909   }
1910   strncpy(saved_jvm_path, buf, MAX_PATH);
1911   saved_jvm_path[MAX_PATH - 1] = '\0';
1912 }
1913 
1914 
1915 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1916 #ifndef _WIN64
1917   st->print("_");
1918 #endif
1919 }
1920 
1921 
1922 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1923 #ifndef _WIN64
1924   st->print("@%d", args_size  * sizeof(int));
1925 #endif
1926 }
1927 
1928 // This method is a copy of JDK's sysGetLastErrorString
1929 // from src/windows/hpi/src/system_md.c
1930 
1931 size_t os::lasterror(char* buf, size_t len) {
1932   DWORD errval;
1933 
1934   if ((errval = GetLastError()) != 0) {
1935     // DOS error
1936     size_t n = (size_t)FormatMessage(
1937                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1938                                      NULL,
1939                                      errval,
1940                                      0,
1941                                      buf,
1942                                      (DWORD)len,
1943                                      NULL);
1944     if (n > 3) {
1945       // Drop final '.', CR, LF
1946       if (buf[n - 1] == '\n') n--;
1947       if (buf[n - 1] == '\r') n--;
1948       if (buf[n - 1] == '.') n--;
1949       buf[n] = '\0';
1950     }
1951     return n;
1952   }
1953 
1954   if (errno != 0) {
1955     // C runtime error that has no corresponding DOS error code
1956     const char* s = os::strerror(errno);
1957     size_t n = strlen(s);
1958     if (n >= len) n = len - 1;
1959     strncpy(buf, s, n);
1960     buf[n] = '\0';
1961     return n;
1962   }
1963 
1964   return 0;
1965 }
1966 
1967 int os::get_last_error() {
1968   DWORD error = GetLastError();
1969   if (error == 0) {
1970     error = errno;
1971   }
1972   return (int)error;
1973 }
1974 
1975 // sun.misc.Signal
1976 // NOTE that this is a workaround for an apparent kernel bug where if
1977 // a signal handler for SIGBREAK is installed then that signal handler
1978 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1979 // See bug 4416763.
1980 static void (*sigbreakHandler)(int) = NULL;
1981 
1982 static void UserHandler(int sig, void *siginfo, void *context) {
1983   os::signal_notify(sig);
1984   // We need to reinstate the signal handler each time...
1985   os::signal(sig, (void*)UserHandler);
1986 }
1987 
1988 void* os::user_handler() {
1989   return (void*) UserHandler;
1990 }
1991 
1992 void* os::signal(int signal_number, void* handler) {
1993   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1994     void (*oldHandler)(int) = sigbreakHandler;
1995     sigbreakHandler = (void (*)(int)) handler;
1996     return (void*) oldHandler;
1997   } else {
1998     return (void*)::signal(signal_number, (void (*)(int))handler);
1999   }
2000 }
2001 
2002 void os::signal_raise(int signal_number) {
2003   raise(signal_number);
2004 }
2005 
2006 // The Win32 C runtime library maps all console control events other than ^C
2007 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2008 // logoff, and shutdown events.  We therefore install our own console handler
2009 // that raises SIGTERM for the latter cases.
2010 //
2011 static BOOL WINAPI consoleHandler(DWORD event) {
2012   switch (event) {
2013   case CTRL_C_EVENT:
2014     if (VMError::is_error_reported()) {
2015       // Ctrl-C is pressed during error reporting, likely because the error
2016       // handler fails to abort. Let VM die immediately.
2017       os::die();
2018     }
2019 
2020     os::signal_raise(SIGINT);
2021     return TRUE;
2022     break;
2023   case CTRL_BREAK_EVENT:
2024     if (sigbreakHandler != NULL) {
2025       (*sigbreakHandler)(SIGBREAK);
2026     }
2027     return TRUE;
2028     break;
2029   case CTRL_LOGOFF_EVENT: {
2030     // Don't terminate JVM if it is running in a non-interactive session,
2031     // such as a service process.
2032     USEROBJECTFLAGS flags;
2033     HANDLE handle = GetProcessWindowStation();
2034     if (handle != NULL &&
2035         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2036         sizeof(USEROBJECTFLAGS), NULL)) {
2037       // If it is a non-interactive session, let next handler to deal
2038       // with it.
2039       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2040         return FALSE;
2041       }
2042     }
2043   }
2044   case CTRL_CLOSE_EVENT:
2045   case CTRL_SHUTDOWN_EVENT:
2046     os::signal_raise(SIGTERM);
2047     return TRUE;
2048     break;
2049   default:
2050     break;
2051   }
2052   return FALSE;
2053 }
2054 
2055 // The following code is moved from os.cpp for making this
2056 // code platform specific, which it is by its very nature.
2057 
2058 // Return maximum OS signal used + 1 for internal use only
2059 // Used as exit signal for signal_thread
2060 int os::sigexitnum_pd() {
2061   return NSIG;
2062 }
2063 
2064 // a counter for each possible signal value, including signal_thread exit signal
2065 static volatile jint pending_signals[NSIG+1] = { 0 };
2066 static Semaphore* sig_sem = NULL;
2067 
2068 static void jdk_misc_signal_init() {
2069   // Initialize signal structures
2070   memset((void*)pending_signals, 0, sizeof(pending_signals));
2071 
2072   // Initialize signal semaphore
2073   sig_sem = new Semaphore();
2074 
2075   // Programs embedding the VM do not want it to attempt to receive
2076   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2077   // shutdown hooks mechanism introduced in 1.3.  For example, when
2078   // the VM is run as part of a Windows NT service (i.e., a servlet
2079   // engine in a web server), the correct behavior is for any console
2080   // control handler to return FALSE, not TRUE, because the OS's
2081   // "final" handler for such events allows the process to continue if
2082   // it is a service (while terminating it if it is not a service).
2083   // To make this behavior uniform and the mechanism simpler, we
2084   // completely disable the VM's usage of these console events if -Xrs
2085   // (=ReduceSignalUsage) is specified.  This means, for example, that
2086   // the CTRL-BREAK thread dump mechanism is also disabled in this
2087   // case.  See bugs 4323062, 4345157, and related bugs.
2088 
2089   // Add a CTRL-C handler
2090   SetConsoleCtrlHandler(consoleHandler, TRUE);
2091 }
2092 
2093 void os::signal_notify(int sig) {
2094   if (sig_sem != NULL) {
2095     Atomic::inc(&pending_signals[sig]);
2096     sig_sem->signal();
2097   } else {
2098     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2099     // initialization isn't called.
2100     assert(ReduceSignalUsage, "signal semaphore should be created");
2101   }
2102 }
2103 
2104 static int check_pending_signals() {
2105   while (true) {
2106     for (int i = 0; i < NSIG + 1; i++) {
2107       jint n = pending_signals[i];
2108       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
2109         return i;
2110       }
2111     }
2112     JavaThread *thread = JavaThread::current();
2113 
2114     ThreadBlockInVM tbivm(thread);
2115 
2116     bool threadIsSuspended;
2117     do {
2118       thread->set_suspend_equivalent();
2119       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2120       sig_sem->wait();
2121 
2122       // were we externally suspended while we were waiting?
2123       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2124       if (threadIsSuspended) {
2125         // The semaphore has been incremented, but while we were waiting
2126         // another thread suspended us. We don't want to continue running
2127         // while suspended because that would surprise the thread that
2128         // suspended us.
2129         sig_sem->signal();
2130 
2131         thread->java_suspend_self();
2132       }
2133     } while (threadIsSuspended);
2134   }
2135 }
2136 
2137 int os::signal_wait() {
2138   return check_pending_signals();
2139 }
2140 
2141 // Implicit OS exception handling
2142 
2143 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2144                       address handler) {
2145   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2146   // Save pc in thread
2147 #ifdef _M_AMD64
2148   // Do not blow up if no thread info available.
2149   if (thread) {
2150     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2151   }
2152   // Set pc to handler
2153   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2154 #else
2155   // Do not blow up if no thread info available.
2156   if (thread) {
2157     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2158   }
2159   // Set pc to handler
2160   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2161 #endif
2162 
2163   // Continue the execution
2164   return EXCEPTION_CONTINUE_EXECUTION;
2165 }
2166 
2167 
2168 // Used for PostMortemDump
2169 extern "C" void safepoints();
2170 extern "C" void find(int x);
2171 extern "C" void events();
2172 
2173 // According to Windows API documentation, an illegal instruction sequence should generate
2174 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2175 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2176 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2177 
2178 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2179 
2180 // From "Execution Protection in the Windows Operating System" draft 0.35
2181 // Once a system header becomes available, the "real" define should be
2182 // included or copied here.
2183 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2184 
2185 // Windows Vista/2008 heap corruption check
2186 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2187 
2188 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2189 // C++ compiler contain this error code. Because this is a compiler-generated
2190 // error, the code is not listed in the Win32 API header files.
2191 // The code is actually a cryptic mnemonic device, with the initial "E"
2192 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2193 // ASCII values of "msc".
2194 
2195 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2196 
2197 #define def_excpt(val) { #val, (val) }
2198 
2199 static const struct { const char* name; uint number; } exceptlabels[] = {
2200     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2201     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2202     def_excpt(EXCEPTION_BREAKPOINT),
2203     def_excpt(EXCEPTION_SINGLE_STEP),
2204     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2205     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2206     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2207     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2208     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2209     def_excpt(EXCEPTION_FLT_OVERFLOW),
2210     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2211     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2212     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2213     def_excpt(EXCEPTION_INT_OVERFLOW),
2214     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2215     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2216     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2217     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2218     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2219     def_excpt(EXCEPTION_STACK_OVERFLOW),
2220     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2221     def_excpt(EXCEPTION_GUARD_PAGE),
2222     def_excpt(EXCEPTION_INVALID_HANDLE),
2223     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2224     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2225 };
2226 
2227 #undef def_excpt
2228 
2229 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2230   uint code = static_cast<uint>(exception_code);
2231   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2232     if (exceptlabels[i].number == code) {
2233       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2234       return buf;
2235     }
2236   }
2237 
2238   return NULL;
2239 }
2240 
2241 //-----------------------------------------------------------------------------
2242 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2243   // handle exception caused by idiv; should only happen for -MinInt/-1
2244   // (division by zero is handled explicitly)
2245 #ifdef  _M_AMD64
2246   PCONTEXT ctx = exceptionInfo->ContextRecord;
2247   address pc = (address)ctx->Rip;
2248   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2249   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2250   if (pc[0] == 0xF7) {
2251     // set correct result values and continue after idiv instruction
2252     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2253   } else {
2254     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2255   }
2256   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2257   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2258   // idiv opcode (0xF7).
2259   ctx->Rdx = (DWORD)0;             // remainder
2260   // Continue the execution
2261 #else
2262   PCONTEXT ctx = exceptionInfo->ContextRecord;
2263   address pc = (address)ctx->Eip;
2264   assert(pc[0] == 0xF7, "not an idiv opcode");
2265   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2266   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2267   // set correct result values and continue after idiv instruction
2268   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2269   ctx->Eax = (DWORD)min_jint;      // result
2270   ctx->Edx = (DWORD)0;             // remainder
2271   // Continue the execution
2272 #endif
2273   return EXCEPTION_CONTINUE_EXECUTION;
2274 }
2275 
2276 //-----------------------------------------------------------------------------
2277 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2278   PCONTEXT ctx = exceptionInfo->ContextRecord;
2279 #ifndef  _WIN64
2280   // handle exception caused by native method modifying control word
2281   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2282 
2283   switch (exception_code) {
2284   case EXCEPTION_FLT_DENORMAL_OPERAND:
2285   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2286   case EXCEPTION_FLT_INEXACT_RESULT:
2287   case EXCEPTION_FLT_INVALID_OPERATION:
2288   case EXCEPTION_FLT_OVERFLOW:
2289   case EXCEPTION_FLT_STACK_CHECK:
2290   case EXCEPTION_FLT_UNDERFLOW:
2291     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2292     if (fp_control_word != ctx->FloatSave.ControlWord) {
2293       // Restore FPCW and mask out FLT exceptions
2294       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2295       // Mask out pending FLT exceptions
2296       ctx->FloatSave.StatusWord &=  0xffffff00;
2297       return EXCEPTION_CONTINUE_EXECUTION;
2298     }
2299   }
2300 
2301   if (prev_uef_handler != NULL) {
2302     // We didn't handle this exception so pass it to the previous
2303     // UnhandledExceptionFilter.
2304     return (prev_uef_handler)(exceptionInfo);
2305   }
2306 #else // !_WIN64
2307   // On Windows, the mxcsr control bits are non-volatile across calls
2308   // See also CR 6192333
2309   //
2310   jint MxCsr = INITIAL_MXCSR;
2311   // we can't use StubRoutines::addr_mxcsr_std()
2312   // because in Win64 mxcsr is not saved there
2313   if (MxCsr != ctx->MxCsr) {
2314     ctx->MxCsr = MxCsr;
2315     return EXCEPTION_CONTINUE_EXECUTION;
2316   }
2317 #endif // !_WIN64
2318 
2319   return EXCEPTION_CONTINUE_SEARCH;
2320 }
2321 
2322 static inline void report_error(Thread* t, DWORD exception_code,
2323                                 address addr, void* siginfo, void* context) {
2324   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2325 
2326   // If UseOsErrorReporting, this will return here and save the error file
2327   // somewhere where we can find it in the minidump.
2328 }
2329 
2330 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2331         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2332   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2333   address addr = (address) exceptionRecord->ExceptionInformation[1];
2334   if (Interpreter::contains(pc)) {
2335     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2336     if (!fr->is_first_java_frame()) {
2337       // get_frame_at_stack_banging_point() is only called when we
2338       // have well defined stacks so java_sender() calls do not need
2339       // to assert safe_for_sender() first.
2340       *fr = fr->java_sender();
2341     }
2342   } else {
2343     // more complex code with compiled code
2344     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2345     CodeBlob* cb = CodeCache::find_blob(pc);
2346     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2347       // Not sure where the pc points to, fallback to default
2348       // stack overflow handling
2349       return false;
2350     } else {
2351       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2352       // in compiled code, the stack banging is performed just after the return pc
2353       // has been pushed on the stack
2354       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2355       if (!fr->is_java_frame()) {
2356         // See java_sender() comment above.
2357         *fr = fr->java_sender();
2358       }
2359     }
2360   }
2361   assert(fr->is_java_frame(), "Safety check");
2362   return true;
2363 }
2364 
2365 #if INCLUDE_AOT
2366 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2367   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2368   address addr = (address) exceptionRecord->ExceptionInformation[1];
2369   address pc = (address) exceptionInfo->ContextRecord->Rip;
2370 
2371   // Handle the case where we get an implicit exception in AOT generated
2372   // code.  AOT DLL's loaded are not registered for structured exceptions.
2373   // If the exception occurred in the codeCache or AOT code, pass control
2374   // to our normal exception handler.
2375   CodeBlob* cb = CodeCache::find_blob(pc);
2376   if (cb != NULL) {
2377     return topLevelExceptionFilter(exceptionInfo);
2378   }
2379 
2380   return EXCEPTION_CONTINUE_SEARCH;
2381 }
2382 #endif
2383 
2384 //-----------------------------------------------------------------------------
2385 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2386   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2387   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2388 #ifdef _M_AMD64
2389   address pc = (address) exceptionInfo->ContextRecord->Rip;
2390 #else
2391   address pc = (address) exceptionInfo->ContextRecord->Eip;
2392 #endif
2393   Thread* t = Thread::current_or_null_safe();
2394 
2395   // Handle SafeFetch32 and SafeFetchN exceptions.
2396   if (StubRoutines::is_safefetch_fault(pc)) {
2397     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2398   }
2399 
2400 #ifndef _WIN64
2401   // Execution protection violation - win32 running on AMD64 only
2402   // Handled first to avoid misdiagnosis as a "normal" access violation;
2403   // This is safe to do because we have a new/unique ExceptionInformation
2404   // code for this condition.
2405   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2406     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2407     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2408     address addr = (address) exceptionRecord->ExceptionInformation[1];
2409 
2410     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2411       int page_size = os::vm_page_size();
2412 
2413       // Make sure the pc and the faulting address are sane.
2414       //
2415       // If an instruction spans a page boundary, and the page containing
2416       // the beginning of the instruction is executable but the following
2417       // page is not, the pc and the faulting address might be slightly
2418       // different - we still want to unguard the 2nd page in this case.
2419       //
2420       // 15 bytes seems to be a (very) safe value for max instruction size.
2421       bool pc_is_near_addr =
2422         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2423       bool instr_spans_page_boundary =
2424         (align_down((intptr_t) pc ^ (intptr_t) addr,
2425                          (intptr_t) page_size) > 0);
2426 
2427       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2428         static volatile address last_addr =
2429           (address) os::non_memory_address_word();
2430 
2431         // In conservative mode, don't unguard unless the address is in the VM
2432         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2433             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2434 
2435           // Set memory to RWX and retry
2436           address page_start = align_down(addr, page_size);
2437           bool res = os::protect_memory((char*) page_start, page_size,
2438                                         os::MEM_PROT_RWX);
2439 
2440           log_debug(os)("Execution protection violation "
2441                         "at " INTPTR_FORMAT
2442                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2443                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2444 
2445           // Set last_addr so if we fault again at the same address, we don't
2446           // end up in an endless loop.
2447           //
2448           // There are two potential complications here.  Two threads trapping
2449           // at the same address at the same time could cause one of the
2450           // threads to think it already unguarded, and abort the VM.  Likely
2451           // very rare.
2452           //
2453           // The other race involves two threads alternately trapping at
2454           // different addresses and failing to unguard the page, resulting in
2455           // an endless loop.  This condition is probably even more unlikely
2456           // than the first.
2457           //
2458           // Although both cases could be avoided by using locks or thread
2459           // local last_addr, these solutions are unnecessary complication:
2460           // this handler is a best-effort safety net, not a complete solution.
2461           // It is disabled by default and should only be used as a workaround
2462           // in case we missed any no-execute-unsafe VM code.
2463 
2464           last_addr = addr;
2465 
2466           return EXCEPTION_CONTINUE_EXECUTION;
2467         }
2468       }
2469 
2470       // Last unguard failed or not unguarding
2471       tty->print_raw_cr("Execution protection violation");
2472       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2473                    exceptionInfo->ContextRecord);
2474       return EXCEPTION_CONTINUE_SEARCH;
2475     }
2476   }
2477 #endif // _WIN64
2478 
2479   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2480       VM_Version::is_cpuinfo_segv_addr(pc)) {
2481     // Verify that OS save/restore AVX registers.
2482     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2483   }
2484 
2485   if (t != NULL && t->is_Java_thread()) {
2486     JavaThread* thread = (JavaThread*) t;
2487     bool in_java = thread->thread_state() == _thread_in_Java;
2488 
2489     // Handle potential stack overflows up front.
2490     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2491       if (thread->stack_guards_enabled()) {
2492         if (in_java) {
2493           frame fr;
2494           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2495           address addr = (address) exceptionRecord->ExceptionInformation[1];
2496           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2497             assert(fr.is_java_frame(), "Must be a Java frame");
2498             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2499           }
2500         }
2501         // Yellow zone violation.  The o/s has unprotected the first yellow
2502         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2503         // update the enabled status, even if the zone contains only one page.
2504         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2505         thread->disable_stack_yellow_reserved_zone();
2506         // If not in java code, return and hope for the best.
2507         return in_java
2508             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2509             :  EXCEPTION_CONTINUE_EXECUTION;
2510       } else {
2511         // Fatal red zone violation.
2512         thread->disable_stack_red_zone();
2513         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2514         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2515                       exceptionInfo->ContextRecord);
2516         return EXCEPTION_CONTINUE_SEARCH;
2517       }
2518     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2519       // Either stack overflow or null pointer exception.
2520       if (in_java) {
2521         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2522         address addr = (address) exceptionRecord->ExceptionInformation[1];
2523         address stack_end = thread->stack_end();
2524         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2525           // Stack overflow.
2526           assert(!os::uses_stack_guard_pages(),
2527                  "should be caught by red zone code above.");
2528           return Handle_Exception(exceptionInfo,
2529                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2530         }
2531         // Check for safepoint polling and implicit null
2532         // We only expect null pointers in the stubs (vtable)
2533         // the rest are checked explicitly now.
2534         CodeBlob* cb = CodeCache::find_blob(pc);
2535         if (cb != NULL) {
2536           if (os::is_poll_address(addr)) {
2537             address stub = SharedRuntime::get_poll_stub(pc);
2538             return Handle_Exception(exceptionInfo, stub);
2539           }
2540         }
2541         {
2542 #ifdef _WIN64
2543           // If it's a legal stack address map the entire region in
2544           //
2545           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2546           address addr = (address) exceptionRecord->ExceptionInformation[1];
2547           if (addr > thread->stack_reserved_zone_base() && addr < thread->stack_base()) {
2548             addr = (address)((uintptr_t)addr &
2549                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2550             os::commit_memory((char *)addr, thread->stack_base() - addr,
2551                               !ExecMem);
2552             return EXCEPTION_CONTINUE_EXECUTION;
2553           } else
2554 #endif
2555           {
2556             // Null pointer exception.
2557             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2558               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2559               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2560             }
2561             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2562                          exceptionInfo->ContextRecord);
2563             return EXCEPTION_CONTINUE_SEARCH;
2564           }
2565         }
2566       }
2567 
2568 #ifdef _WIN64
2569       // Special care for fast JNI field accessors.
2570       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2571       // in and the heap gets shrunk before the field access.
2572       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2573         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2574         if (addr != (address)-1) {
2575           return Handle_Exception(exceptionInfo, addr);
2576         }
2577       }
2578 #endif
2579 
2580       // Stack overflow or null pointer exception in native code.
2581       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2582                    exceptionInfo->ContextRecord);
2583       return EXCEPTION_CONTINUE_SEARCH;
2584     } // /EXCEPTION_ACCESS_VIOLATION
2585     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2586 
2587     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2588       CompiledMethod* nm = NULL;
2589       JavaThread* thread = (JavaThread*)t;
2590       if (in_java) {
2591         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2592         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2593       }
2594 
2595       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2596       if (((thread->thread_state() == _thread_in_vm ||
2597            thread->thread_state() == _thread_in_native ||
2598            is_unsafe_arraycopy) &&
2599           thread->doing_unsafe_access()) ||
2600           (nm != NULL && nm->has_unsafe_access())) {
2601         address next_pc =  Assembler::locate_next_instruction(pc);
2602         if (is_unsafe_arraycopy) {
2603           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2604         }
2605         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2606       }
2607     }
2608 
2609     if (in_java) {
2610       switch (exception_code) {
2611       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2612         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2613 
2614       case EXCEPTION_INT_OVERFLOW:
2615         return Handle_IDiv_Exception(exceptionInfo);
2616 
2617       } // switch
2618     }
2619     if (((thread->thread_state() == _thread_in_Java) ||
2620          (thread->thread_state() == _thread_in_native)) &&
2621          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2622       LONG result=Handle_FLT_Exception(exceptionInfo);
2623       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2624     }
2625   }
2626 
2627   if (exception_code != EXCEPTION_BREAKPOINT) {
2628     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2629                  exceptionInfo->ContextRecord);
2630   }
2631   return EXCEPTION_CONTINUE_SEARCH;
2632 }
2633 
2634 #ifndef _WIN64
2635 // Special care for fast JNI accessors.
2636 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2637 // the heap gets shrunk before the field access.
2638 // Need to install our own structured exception handler since native code may
2639 // install its own.
2640 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2641   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2642   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2643     address pc = (address) exceptionInfo->ContextRecord->Eip;
2644     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2645     if (addr != (address)-1) {
2646       return Handle_Exception(exceptionInfo, addr);
2647     }
2648   }
2649   return EXCEPTION_CONTINUE_SEARCH;
2650 }
2651 
2652 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2653   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2654                                                      jobject obj,           \
2655                                                      jfieldID fieldID) {    \
2656     __try {                                                                 \
2657       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2658                                                                  obj,       \
2659                                                                  fieldID);  \
2660     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2661                                               _exception_info())) {         \
2662     }                                                                       \
2663     return 0;                                                               \
2664   }
2665 
2666 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2667 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2668 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2669 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2670 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2671 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2672 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2673 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2674 
2675 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2676   switch (type) {
2677   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2678   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2679   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2680   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2681   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2682   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2683   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2684   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2685   default:        ShouldNotReachHere();
2686   }
2687   return (address)-1;
2688 }
2689 #endif
2690 
2691 // Virtual Memory
2692 
2693 int os::vm_page_size() { return os::win32::vm_page_size(); }
2694 int os::vm_allocation_granularity() {
2695   return os::win32::vm_allocation_granularity();
2696 }
2697 
2698 // Windows large page support is available on Windows 2003. In order to use
2699 // large page memory, the administrator must first assign additional privilege
2700 // to the user:
2701 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2702 //   + select Local Policies -> User Rights Assignment
2703 //   + double click "Lock pages in memory", add users and/or groups
2704 //   + reboot
2705 // Note the above steps are needed for administrator as well, as administrators
2706 // by default do not have the privilege to lock pages in memory.
2707 //
2708 // Note about Windows 2003: although the API supports committing large page
2709 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2710 // scenario, I found through experiment it only uses large page if the entire
2711 // memory region is reserved and committed in a single VirtualAlloc() call.
2712 // This makes Windows large page support more or less like Solaris ISM, in
2713 // that the entire heap must be committed upfront. This probably will change
2714 // in the future, if so the code below needs to be revisited.
2715 
2716 #ifndef MEM_LARGE_PAGES
2717   #define MEM_LARGE_PAGES 0x20000000
2718 #endif
2719 
2720 static HANDLE    _hProcess;
2721 static HANDLE    _hToken;
2722 
2723 // Container for NUMA node list info
2724 class NUMANodeListHolder {
2725  private:
2726   int *_numa_used_node_list;  // allocated below
2727   int _numa_used_node_count;
2728 
2729   void free_node_list() {
2730     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2731   }
2732 
2733  public:
2734   NUMANodeListHolder() {
2735     _numa_used_node_count = 0;
2736     _numa_used_node_list = NULL;
2737     // do rest of initialization in build routine (after function pointers are set up)
2738   }
2739 
2740   ~NUMANodeListHolder() {
2741     free_node_list();
2742   }
2743 
2744   bool build() {
2745     DWORD_PTR proc_aff_mask;
2746     DWORD_PTR sys_aff_mask;
2747     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2748     ULONG highest_node_number;
2749     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2750     free_node_list();
2751     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2752     for (unsigned int i = 0; i <= highest_node_number; i++) {
2753       ULONGLONG proc_mask_numa_node;
2754       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2755       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2756         _numa_used_node_list[_numa_used_node_count++] = i;
2757       }
2758     }
2759     return (_numa_used_node_count > 1);
2760   }
2761 
2762   int get_count() { return _numa_used_node_count; }
2763   int get_node_list_entry(int n) {
2764     // for indexes out of range, returns -1
2765     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2766   }
2767 
2768 } numa_node_list_holder;
2769 
2770 
2771 
2772 static size_t _large_page_size = 0;
2773 
2774 static bool request_lock_memory_privilege() {
2775   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2776                           os::current_process_id());
2777 
2778   LUID luid;
2779   if (_hProcess != NULL &&
2780       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2781       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2782 
2783     TOKEN_PRIVILEGES tp;
2784     tp.PrivilegeCount = 1;
2785     tp.Privileges[0].Luid = luid;
2786     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2787 
2788     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2789     // privilege. Check GetLastError() too. See MSDN document.
2790     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2791         (GetLastError() == ERROR_SUCCESS)) {
2792       return true;
2793     }
2794   }
2795 
2796   return false;
2797 }
2798 
2799 static void cleanup_after_large_page_init() {
2800   if (_hProcess) CloseHandle(_hProcess);
2801   _hProcess = NULL;
2802   if (_hToken) CloseHandle(_hToken);
2803   _hToken = NULL;
2804 }
2805 
2806 static bool numa_interleaving_init() {
2807   bool success = false;
2808   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2809 
2810   // print a warning if UseNUMAInterleaving flag is specified on command line
2811   bool warn_on_failure = use_numa_interleaving_specified;
2812 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2813 
2814   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2815   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2816   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2817 
2818   if (numa_node_list_holder.build()) {
2819     if (log_is_enabled(Debug, os, cpu)) {
2820       Log(os, cpu) log;
2821       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2822       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2823         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2824       }
2825     }
2826     success = true;
2827   } else {
2828     WARN("Process does not cover multiple NUMA nodes.");
2829   }
2830   if (!success) {
2831     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2832   }
2833   return success;
2834 #undef WARN
2835 }
2836 
2837 // this routine is used whenever we need to reserve a contiguous VA range
2838 // but we need to make separate VirtualAlloc calls for each piece of the range
2839 // Reasons for doing this:
2840 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2841 //  * UseNUMAInterleaving requires a separate node for each piece
2842 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2843                                          DWORD prot,
2844                                          bool should_inject_error = false) {
2845   char * p_buf;
2846   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2847   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2848   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2849 
2850   // first reserve enough address space in advance since we want to be
2851   // able to break a single contiguous virtual address range into multiple
2852   // large page commits but WS2003 does not allow reserving large page space
2853   // so we just use 4K pages for reserve, this gives us a legal contiguous
2854   // address space. then we will deallocate that reservation, and re alloc
2855   // using large pages
2856   const size_t size_of_reserve = bytes + chunk_size;
2857   if (bytes > size_of_reserve) {
2858     // Overflowed.
2859     return NULL;
2860   }
2861   p_buf = (char *) VirtualAlloc(addr,
2862                                 size_of_reserve,  // size of Reserve
2863                                 MEM_RESERVE,
2864                                 PAGE_READWRITE);
2865   // If reservation failed, return NULL
2866   if (p_buf == NULL) return NULL;
2867   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2868   os::release_memory(p_buf, bytes + chunk_size);
2869 
2870   // we still need to round up to a page boundary (in case we are using large pages)
2871   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2872   // instead we handle this in the bytes_to_rq computation below
2873   p_buf = align_up(p_buf, page_size);
2874 
2875   // now go through and allocate one chunk at a time until all bytes are
2876   // allocated
2877   size_t  bytes_remaining = bytes;
2878   // An overflow of align_up() would have been caught above
2879   // in the calculation of size_of_reserve.
2880   char * next_alloc_addr = p_buf;
2881   HANDLE hProc = GetCurrentProcess();
2882 
2883 #ifdef ASSERT
2884   // Variable for the failure injection
2885   int ran_num = os::random();
2886   size_t fail_after = ran_num % bytes;
2887 #endif
2888 
2889   int count=0;
2890   while (bytes_remaining) {
2891     // select bytes_to_rq to get to the next chunk_size boundary
2892 
2893     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2894     // Note allocate and commit
2895     char * p_new;
2896 
2897 #ifdef ASSERT
2898     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2899 #else
2900     const bool inject_error_now = false;
2901 #endif
2902 
2903     if (inject_error_now) {
2904       p_new = NULL;
2905     } else {
2906       if (!UseNUMAInterleaving) {
2907         p_new = (char *) VirtualAlloc(next_alloc_addr,
2908                                       bytes_to_rq,
2909                                       flags,
2910                                       prot);
2911       } else {
2912         // get the next node to use from the used_node_list
2913         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2914         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2915         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2916       }
2917     }
2918 
2919     if (p_new == NULL) {
2920       // Free any allocated pages
2921       if (next_alloc_addr > p_buf) {
2922         // Some memory was committed so release it.
2923         size_t bytes_to_release = bytes - bytes_remaining;
2924         // NMT has yet to record any individual blocks, so it
2925         // need to create a dummy 'reserve' record to match
2926         // the release.
2927         MemTracker::record_virtual_memory_reserve((address)p_buf,
2928                                                   bytes_to_release, CALLER_PC);
2929         os::release_memory(p_buf, bytes_to_release);
2930       }
2931 #ifdef ASSERT
2932       if (should_inject_error) {
2933         log_develop_debug(pagesize)("Reserving pages individually failed.");
2934       }
2935 #endif
2936       return NULL;
2937     }
2938 
2939     bytes_remaining -= bytes_to_rq;
2940     next_alloc_addr += bytes_to_rq;
2941     count++;
2942   }
2943   // Although the memory is allocated individually, it is returned as one.
2944   // NMT records it as one block.
2945   if ((flags & MEM_COMMIT) != 0) {
2946     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2947   } else {
2948     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2949   }
2950 
2951   // made it this far, success
2952   return p_buf;
2953 }
2954 
2955 
2956 
2957 void os::large_page_init() {
2958   if (!UseLargePages) return;
2959 
2960   // print a warning if any large page related flag is specified on command line
2961   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2962                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2963   bool success = false;
2964 
2965 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2966   if (request_lock_memory_privilege()) {
2967     size_t s = GetLargePageMinimum();
2968     if (s) {
2969 #if defined(IA32) || defined(AMD64)
2970       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2971         WARN("JVM cannot use large pages bigger than 4mb.");
2972       } else {
2973 #endif
2974         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2975           _large_page_size = LargePageSizeInBytes;
2976         } else {
2977           _large_page_size = s;
2978         }
2979         success = true;
2980 #if defined(IA32) || defined(AMD64)
2981       }
2982 #endif
2983     } else {
2984       WARN("Large page is not supported by the processor.");
2985     }
2986   } else {
2987     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2988   }
2989 #undef WARN
2990 
2991   const size_t default_page_size = (size_t) vm_page_size();
2992   if (success && _large_page_size > default_page_size) {
2993     _page_sizes[0] = _large_page_size;
2994     _page_sizes[1] = default_page_size;
2995     _page_sizes[2] = 0;
2996   }
2997 
2998   cleanup_after_large_page_init();
2999   UseLargePages = success;
3000 }
3001 
3002 int os::create_file_for_heap(const char* dir) {
3003 
3004   const char name_template[] = "/jvmheap.XXXXXX";
3005 
3006   size_t fullname_len = strlen(dir) + strlen(name_template);
3007   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3008   if (fullname == NULL) {
3009     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3010     return -1;
3011   }
3012   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3013   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3014 
3015   os::native_path(fullname);
3016 
3017   char *path = _mktemp(fullname);
3018   if (path == NULL) {
3019     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3020     os::free(fullname);
3021     return -1;
3022   }
3023 
3024   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3025 
3026   os::free(fullname);
3027   if (fd < 0) {
3028     warning("Problem opening file for heap (%s)", os::strerror(errno));
3029     return -1;
3030   }
3031   return fd;
3032 }
3033 
3034 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3035 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3036   assert(fd != -1, "File descriptor is not valid");
3037 
3038   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3039 #ifdef _LP64
3040   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3041     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3042 #else
3043   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3044     0, (DWORD)size, NULL);
3045 #endif
3046   if (fileMapping == NULL) {
3047     if (GetLastError() == ERROR_DISK_FULL) {
3048       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3049     }
3050     else {
3051       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3052     }
3053 
3054     return NULL;
3055   }
3056 
3057   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3058 
3059   CloseHandle(fileMapping);
3060 
3061   return (char*)addr;
3062 }
3063 
3064 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3065   assert(fd != -1, "File descriptor is not valid");
3066   assert(base != NULL, "Base address cannot be NULL");
3067 
3068   release_memory(base, size);
3069   return map_memory_to_file(base, size, fd);
3070 }
3071 
3072 // On win32, one cannot release just a part of reserved memory, it's an
3073 // all or nothing deal.  When we split a reservation, we must break the
3074 // reservation into two reservations.
3075 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3076                                   bool realloc) {
3077   if (size > 0) {
3078     release_memory(base, size);
3079     if (realloc) {
3080       reserve_memory(split, base);
3081     }
3082     if (size != split) {
3083       reserve_memory(size - split, base + split);
3084     }
3085   }
3086 }
3087 
3088 // Multiple threads can race in this code but it's not possible to unmap small sections of
3089 // virtual space to get requested alignment, like posix-like os's.
3090 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3091 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3092   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3093          "Alignment must be a multiple of allocation granularity (page size)");
3094   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3095 
3096   size_t extra_size = size + alignment;
3097   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3098 
3099   char* aligned_base = NULL;
3100 
3101   do {
3102     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3103     if (extra_base == NULL) {
3104       return NULL;
3105     }
3106     // Do manual alignment
3107     aligned_base = align_up(extra_base, alignment);
3108 
3109     if (file_desc != -1) {
3110       os::unmap_memory(extra_base, extra_size);
3111     } else {
3112       os::release_memory(extra_base, extra_size);
3113     }
3114 
3115     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3116 
3117   } while (aligned_base == NULL);
3118 
3119   return aligned_base;
3120 }
3121 
3122 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3123   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3124          "reserve alignment");
3125   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3126   char* res;
3127   // note that if UseLargePages is on, all the areas that require interleaving
3128   // will go thru reserve_memory_special rather than thru here.
3129   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3130   if (!use_individual) {
3131     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3132   } else {
3133     elapsedTimer reserveTimer;
3134     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3135     // in numa interleaving, we have to allocate pages individually
3136     // (well really chunks of NUMAInterleaveGranularity size)
3137     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3138     if (res == NULL) {
3139       warning("NUMA page allocation failed");
3140     }
3141     if (Verbose && PrintMiscellaneous) {
3142       reserveTimer.stop();
3143       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3144                     reserveTimer.milliseconds(), reserveTimer.ticks());
3145     }
3146   }
3147   assert(res == NULL || addr == NULL || addr == res,
3148          "Unexpected address from reserve.");
3149 
3150   return res;
3151 }
3152 
3153 // Reserve memory at an arbitrary address, only if that area is
3154 // available (and not reserved for something else).
3155 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3156   // Windows os::reserve_memory() fails of the requested address range is
3157   // not avilable.
3158   return reserve_memory(bytes, requested_addr);
3159 }
3160 
3161 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3162   assert(file_desc >= 0, "file_desc is not valid");
3163   return map_memory_to_file(requested_addr, bytes, file_desc);
3164 }
3165 
3166 size_t os::large_page_size() {
3167   return _large_page_size;
3168 }
3169 
3170 bool os::can_commit_large_page_memory() {
3171   // Windows only uses large page memory when the entire region is reserved
3172   // and committed in a single VirtualAlloc() call. This may change in the
3173   // future, but with Windows 2003 it's not possible to commit on demand.
3174   return false;
3175 }
3176 
3177 bool os::can_execute_large_page_memory() {
3178   return true;
3179 }
3180 
3181 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3182                                  bool exec) {
3183   assert(UseLargePages, "only for large pages");
3184 
3185   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3186     return NULL; // Fallback to small pages.
3187   }
3188 
3189   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3190   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3191 
3192   // with large pages, there are two cases where we need to use Individual Allocation
3193   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3194   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3195   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3196     log_debug(pagesize)("Reserving large pages individually.");
3197 
3198     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3199     if (p_buf == NULL) {
3200       // give an appropriate warning message
3201       if (UseNUMAInterleaving) {
3202         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3203       }
3204       if (UseLargePagesIndividualAllocation) {
3205         warning("Individually allocated large pages failed, "
3206                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3207       }
3208       return NULL;
3209     }
3210 
3211     return p_buf;
3212 
3213   } else {
3214     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3215 
3216     // normal policy just allocate it all at once
3217     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3218     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3219     if (res != NULL) {
3220       MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, CALLER_PC);
3221     }
3222 
3223     return res;
3224   }
3225 }
3226 
3227 bool os::release_memory_special(char* base, size_t bytes) {
3228   assert(base != NULL, "Sanity check");
3229   return release_memory(base, bytes);
3230 }
3231 
3232 void os::print_statistics() {
3233 }
3234 
3235 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3236   int err = os::get_last_error();
3237   char buf[256];
3238   size_t buf_len = os::lasterror(buf, sizeof(buf));
3239   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3240           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3241           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3242 }
3243 
3244 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3245   if (bytes == 0) {
3246     // Don't bother the OS with noops.
3247     return true;
3248   }
3249   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3250   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3251   // Don't attempt to print anything if the OS call fails. We're
3252   // probably low on resources, so the print itself may cause crashes.
3253 
3254   // unless we have NUMAInterleaving enabled, the range of a commit
3255   // is always within a reserve covered by a single VirtualAlloc
3256   // in that case we can just do a single commit for the requested size
3257   if (!UseNUMAInterleaving) {
3258     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3259       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3260       return false;
3261     }
3262     if (exec) {
3263       DWORD oldprot;
3264       // Windows doc says to use VirtualProtect to get execute permissions
3265       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3266         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3267         return false;
3268       }
3269     }
3270     return true;
3271   } else {
3272 
3273     // when NUMAInterleaving is enabled, the commit might cover a range that
3274     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3275     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3276     // returns represents the number of bytes that can be committed in one step.
3277     size_t bytes_remaining = bytes;
3278     char * next_alloc_addr = addr;
3279     while (bytes_remaining > 0) {
3280       MEMORY_BASIC_INFORMATION alloc_info;
3281       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3282       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3283       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3284                        PAGE_READWRITE) == NULL) {
3285         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3286                                             exec);)
3287         return false;
3288       }
3289       if (exec) {
3290         DWORD oldprot;
3291         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3292                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3293           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3294                                               exec);)
3295           return false;
3296         }
3297       }
3298       bytes_remaining -= bytes_to_rq;
3299       next_alloc_addr += bytes_to_rq;
3300     }
3301   }
3302   // if we made it this far, return true
3303   return true;
3304 }
3305 
3306 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3307                           bool exec) {
3308   // alignment_hint is ignored on this OS
3309   return pd_commit_memory(addr, size, exec);
3310 }
3311 
3312 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3313                                   const char* mesg) {
3314   assert(mesg != NULL, "mesg must be specified");
3315   if (!pd_commit_memory(addr, size, exec)) {
3316     warn_fail_commit_memory(addr, size, exec);
3317     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3318   }
3319 }
3320 
3321 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3322                                   size_t alignment_hint, bool exec,
3323                                   const char* mesg) {
3324   // alignment_hint is ignored on this OS
3325   pd_commit_memory_or_exit(addr, size, exec, mesg);
3326 }
3327 
3328 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3329   if (bytes == 0) {
3330     // Don't bother the OS with noops.
3331     return true;
3332   }
3333   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3334   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3335   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3336 }
3337 
3338 bool os::pd_release_memory(char* addr, size_t bytes) {
3339   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3340 }
3341 
3342 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3343   return os::commit_memory(addr, size, !ExecMem);
3344 }
3345 
3346 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3347   return os::uncommit_memory(addr, size);
3348 }
3349 
3350 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3351   uint count = 0;
3352   bool ret = false;
3353   size_t bytes_remaining = bytes;
3354   char * next_protect_addr = addr;
3355 
3356   // Use VirtualQuery() to get the chunk size.
3357   while (bytes_remaining) {
3358     MEMORY_BASIC_INFORMATION alloc_info;
3359     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3360       return false;
3361     }
3362 
3363     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3364     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3365     // but we don't distinguish here as both cases are protected by same API.
3366     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3367     warning("Failed protecting pages individually for chunk #%u", count);
3368     if (!ret) {
3369       return false;
3370     }
3371 
3372     bytes_remaining -= bytes_to_protect;
3373     next_protect_addr += bytes_to_protect;
3374     count++;
3375   }
3376   return ret;
3377 }
3378 
3379 // Set protections specified
3380 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3381                         bool is_committed) {
3382   unsigned int p = 0;
3383   switch (prot) {
3384   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3385   case MEM_PROT_READ: p = PAGE_READONLY; break;
3386   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3387   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3388   default:
3389     ShouldNotReachHere();
3390   }
3391 
3392   DWORD old_status;
3393 
3394   // Strange enough, but on Win32 one can change protection only for committed
3395   // memory, not a big deal anyway, as bytes less or equal than 64K
3396   if (!is_committed) {
3397     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3398                           "cannot commit protection page");
3399   }
3400   // One cannot use os::guard_memory() here, as on Win32 guard page
3401   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3402   //
3403   // Pages in the region become guard pages. Any attempt to access a guard page
3404   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3405   // the guard page status. Guard pages thus act as a one-time access alarm.
3406   bool ret;
3407   if (UseNUMAInterleaving) {
3408     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3409     // so we must protect the chunks individually.
3410     ret = protect_pages_individually(addr, bytes, p, &old_status);
3411   } else {
3412     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3413   }
3414 #ifdef ASSERT
3415   if (!ret) {
3416     int err = os::get_last_error();
3417     char buf[256];
3418     size_t buf_len = os::lasterror(buf, sizeof(buf));
3419     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3420           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3421           buf_len != 0 ? buf : "<no_error_string>", err);
3422   }
3423 #endif
3424   return ret;
3425 }
3426 
3427 bool os::guard_memory(char* addr, size_t bytes) {
3428   DWORD old_status;
3429   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3430 }
3431 
3432 bool os::unguard_memory(char* addr, size_t bytes) {
3433   DWORD old_status;
3434   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3435 }
3436 
3437 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3438 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3439 void os::numa_make_global(char *addr, size_t bytes)    { }
3440 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3441 bool os::numa_topology_changed()                       { return false; }
3442 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3443 int os::numa_get_group_id()                            { return 0; }
3444 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3445   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3446     // Provide an answer for UMA systems
3447     ids[0] = 0;
3448     return 1;
3449   } else {
3450     // check for size bigger than actual groups_num
3451     size = MIN2(size, numa_get_groups_num());
3452     for (int i = 0; i < (int)size; i++) {
3453       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3454     }
3455     return size;
3456   }
3457 }
3458 
3459 bool os::get_page_info(char *start, page_info* info) {
3460   return false;
3461 }
3462 
3463 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3464                      page_info* page_found) {
3465   return end;
3466 }
3467 
3468 char* os::non_memory_address_word() {
3469   // Must never look like an address returned by reserve_memory,
3470   // even in its subfields (as defined by the CPU immediate fields,
3471   // if the CPU splits constants across multiple instructions).
3472   return (char*)-1;
3473 }
3474 
3475 #define MAX_ERROR_COUNT 100
3476 #define SYS_THREAD_ERROR 0xffffffffUL
3477 
3478 void os::pd_start_thread(Thread* thread) {
3479   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3480   // Returns previous suspend state:
3481   // 0:  Thread was not suspended
3482   // 1:  Thread is running now
3483   // >1: Thread is still suspended.
3484   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3485 }
3486 
3487 
3488 
3489 // Short sleep, direct OS call.
3490 //
3491 // ms = 0, means allow others (if any) to run.
3492 //
3493 void os::naked_short_sleep(jlong ms) {
3494   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3495   Sleep(ms);
3496 }
3497 
3498 // Windows does not provide sleep functionality with nanosecond resolution, so we
3499 // try to approximate this with spinning combined with yielding if another thread
3500 // is ready to run on the current processor.
3501 void os::naked_short_nanosleep(jlong ns) {
3502   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3503 
3504   int64_t start = os::javaTimeNanos();
3505   do {
3506     if (SwitchToThread() == 0) {
3507       // Nothing else is ready to run on this cpu, spin a little
3508       SpinPause();
3509     }
3510   } while (os::javaTimeNanos() - start < ns);
3511 }
3512 
3513 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3514 void os::infinite_sleep() {
3515   while (true) {    // sleep forever ...
3516     Sleep(100000);  // ... 100 seconds at a time
3517   }
3518 }
3519 
3520 typedef BOOL (WINAPI * STTSignature)(void);
3521 
3522 void os::naked_yield() {
3523   // Consider passing back the return value from SwitchToThread().
3524   SwitchToThread();
3525 }
3526 
3527 // Win32 only gives you access to seven real priorities at a time,
3528 // so we compress Java's ten down to seven.  It would be better
3529 // if we dynamically adjusted relative priorities.
3530 
3531 int os::java_to_os_priority[CriticalPriority + 1] = {
3532   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3533   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3534   THREAD_PRIORITY_LOWEST,                       // 2
3535   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3536   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3537   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3538   THREAD_PRIORITY_NORMAL,                       // 6
3539   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3540   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3541   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3542   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3543   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3544 };
3545 
3546 int prio_policy1[CriticalPriority + 1] = {
3547   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3548   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3549   THREAD_PRIORITY_LOWEST,                       // 2
3550   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3551   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3552   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3553   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3554   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3555   THREAD_PRIORITY_HIGHEST,                      // 8
3556   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3557   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3558   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3559 };
3560 
3561 static int prio_init() {
3562   // If ThreadPriorityPolicy is 1, switch tables
3563   if (ThreadPriorityPolicy == 1) {
3564     int i;
3565     for (i = 0; i < CriticalPriority + 1; i++) {
3566       os::java_to_os_priority[i] = prio_policy1[i];
3567     }
3568   }
3569   if (UseCriticalJavaThreadPriority) {
3570     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3571   }
3572   return 0;
3573 }
3574 
3575 OSReturn os::set_native_priority(Thread* thread, int priority) {
3576   if (!UseThreadPriorities) return OS_OK;
3577   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3578   return ret ? OS_OK : OS_ERR;
3579 }
3580 
3581 OSReturn os::get_native_priority(const Thread* const thread,
3582                                  int* priority_ptr) {
3583   if (!UseThreadPriorities) {
3584     *priority_ptr = java_to_os_priority[NormPriority];
3585     return OS_OK;
3586   }
3587   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3588   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3589     assert(false, "GetThreadPriority failed");
3590     return OS_ERR;
3591   }
3592   *priority_ptr = os_prio;
3593   return OS_OK;
3594 }
3595 
3596 void os::interrupt(Thread* thread) {
3597   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
3598 
3599   OSThread* osthread = thread->osthread();
3600   osthread->set_interrupted(true);
3601   // More than one thread can get here with the same value of osthread,
3602   // resulting in multiple notifications.  We do, however, want the store
3603   // to interrupted() to be visible to other threads before we post
3604   // the interrupt event.
3605   OrderAccess::release();
3606   SetEvent(osthread->interrupt_event());
3607   // For JSR166:  unpark after setting status
3608   if (thread->is_Java_thread()) {
3609     ((JavaThread*)thread)->parker()->unpark();
3610   }
3611 
3612   ParkEvent * ev = thread->_ParkEvent;
3613   if (ev != NULL) ev->unpark();
3614 
3615   ev = thread->_SleepEvent;
3616   if (ev != NULL) ev->unpark();
3617 }
3618 
3619 
3620 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3621   debug_only(Thread::check_for_dangling_thread_pointer(thread);)
3622 
3623   OSThread* osthread = thread->osthread();
3624   // There is no synchronization between the setting of the interrupt
3625   // and it being cleared here. It is critical - see 6535709 - that
3626   // we only clear the interrupt state, and reset the interrupt event,
3627   // if we are going to report that we were indeed interrupted - else
3628   // an interrupt can be "lost", leading to spurious wakeups or lost wakeups
3629   // depending on the timing. By checking thread interrupt event to see
3630   // if the thread gets real interrupt thus prevent spurious wakeup.
3631   bool interrupted = osthread->interrupted() && (WaitForSingleObject(osthread->interrupt_event(), 0) == WAIT_OBJECT_0);
3632   if (interrupted && clear_interrupted) {
3633     osthread->set_interrupted(false);
3634     ResetEvent(osthread->interrupt_event());
3635   } // Otherwise leave the interrupted state alone
3636 
3637   return interrupted;
3638 }
3639 
3640 // GetCurrentThreadId() returns DWORD
3641 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3642 
3643 static int _initial_pid = 0;
3644 
3645 int os::current_process_id() {
3646   return (_initial_pid ? _initial_pid : _getpid());
3647 }
3648 
3649 int    os::win32::_vm_page_size              = 0;
3650 int    os::win32::_vm_allocation_granularity = 0;
3651 int    os::win32::_processor_type            = 0;
3652 // Processor level is not available on non-NT systems, use vm_version instead
3653 int    os::win32::_processor_level           = 0;
3654 julong os::win32::_physical_memory           = 0;
3655 size_t os::win32::_default_stack_size        = 0;
3656 
3657 intx          os::win32::_os_thread_limit    = 0;
3658 volatile intx os::win32::_os_thread_count    = 0;
3659 
3660 bool   os::win32::_is_windows_server         = false;
3661 
3662 // 6573254
3663 // Currently, the bug is observed across all the supported Windows releases,
3664 // including the latest one (as of this writing - Windows Server 2012 R2)
3665 bool   os::win32::_has_exit_bug              = true;
3666 
3667 void os::win32::initialize_system_info() {
3668   SYSTEM_INFO si;
3669   GetSystemInfo(&si);
3670   _vm_page_size    = si.dwPageSize;
3671   _vm_allocation_granularity = si.dwAllocationGranularity;
3672   _processor_type  = si.dwProcessorType;
3673   _processor_level = si.wProcessorLevel;
3674   set_processor_count(si.dwNumberOfProcessors);
3675 
3676   MEMORYSTATUSEX ms;
3677   ms.dwLength = sizeof(ms);
3678 
3679   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3680   // dwMemoryLoad (% of memory in use)
3681   GlobalMemoryStatusEx(&ms);
3682   _physical_memory = ms.ullTotalPhys;
3683 
3684   if (FLAG_IS_DEFAULT(MaxRAM)) {
3685     // Adjust MaxRAM according to the maximum virtual address space available.
3686     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3687   }
3688 
3689   OSVERSIONINFOEX oi;
3690   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3691   GetVersionEx((OSVERSIONINFO*)&oi);
3692   switch (oi.dwPlatformId) {
3693   case VER_PLATFORM_WIN32_NT:
3694     {
3695       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3696       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3697           oi.wProductType == VER_NT_SERVER) {
3698         _is_windows_server = true;
3699       }
3700     }
3701     break;
3702   default: fatal("Unknown platform");
3703   }
3704 
3705   _default_stack_size = os::current_stack_size();
3706   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3707   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3708          "stack size not a multiple of page size");
3709 
3710   initialize_performance_counter();
3711 }
3712 
3713 
3714 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3715                                       int ebuflen) {
3716   char path[MAX_PATH];
3717   DWORD size;
3718   DWORD pathLen = (DWORD)sizeof(path);
3719   HINSTANCE result = NULL;
3720 
3721   // only allow library name without path component
3722   assert(strchr(name, '\\') == NULL, "path not allowed");
3723   assert(strchr(name, ':') == NULL, "path not allowed");
3724   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3725     jio_snprintf(ebuf, ebuflen,
3726                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3727     return NULL;
3728   }
3729 
3730   // search system directory
3731   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3732     if (size >= pathLen) {
3733       return NULL; // truncated
3734     }
3735     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3736       return NULL; // truncated
3737     }
3738     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3739       return result;
3740     }
3741   }
3742 
3743   // try Windows directory
3744   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3745     if (size >= pathLen) {
3746       return NULL; // truncated
3747     }
3748     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3749       return NULL; // truncated
3750     }
3751     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3752       return result;
3753     }
3754   }
3755 
3756   jio_snprintf(ebuf, ebuflen,
3757                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3758   return NULL;
3759 }
3760 
3761 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3762 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3763 
3764 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3765   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3766   return TRUE;
3767 }
3768 
3769 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3770   // Basic approach:
3771   //  - Each exiting thread registers its intent to exit and then does so.
3772   //  - A thread trying to terminate the process must wait for all
3773   //    threads currently exiting to complete their exit.
3774 
3775   if (os::win32::has_exit_bug()) {
3776     // The array holds handles of the threads that have started exiting by calling
3777     // _endthreadex().
3778     // Should be large enough to avoid blocking the exiting thread due to lack of
3779     // a free slot.
3780     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3781     static int handle_count = 0;
3782 
3783     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3784     static CRITICAL_SECTION crit_sect;
3785     static volatile DWORD process_exiting = 0;
3786     int i, j;
3787     DWORD res;
3788     HANDLE hproc, hthr;
3789 
3790     // We only attempt to register threads until a process exiting
3791     // thread manages to set the process_exiting flag. Any threads
3792     // that come through here after the process_exiting flag is set
3793     // are unregistered and will be caught in the SuspendThread()
3794     // infinite loop below.
3795     bool registered = false;
3796 
3797     // The first thread that reached this point, initializes the critical section.
3798     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3799       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3800     } else if (OrderAccess::load_acquire(&process_exiting) == 0) {
3801       if (what != EPT_THREAD) {
3802         // Atomically set process_exiting before the critical section
3803         // to increase the visibility between racing threads.
3804         Atomic::cmpxchg(GetCurrentThreadId(), &process_exiting, (DWORD)0);
3805       }
3806       EnterCriticalSection(&crit_sect);
3807 
3808       if (what == EPT_THREAD && OrderAccess::load_acquire(&process_exiting) == 0) {
3809         // Remove from the array those handles of the threads that have completed exiting.
3810         for (i = 0, j = 0; i < handle_count; ++i) {
3811           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3812           if (res == WAIT_TIMEOUT) {
3813             handles[j++] = handles[i];
3814           } else {
3815             if (res == WAIT_FAILED) {
3816               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3817                       GetLastError(), __FILE__, __LINE__);
3818             }
3819             // Don't keep the handle, if we failed waiting for it.
3820             CloseHandle(handles[i]);
3821           }
3822         }
3823 
3824         // If there's no free slot in the array of the kept handles, we'll have to
3825         // wait until at least one thread completes exiting.
3826         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3827           // Raise the priority of the oldest exiting thread to increase its chances
3828           // to complete sooner.
3829           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3830           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3831           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3832             i = (res - WAIT_OBJECT_0);
3833             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3834             for (; i < handle_count; ++i) {
3835               handles[i] = handles[i + 1];
3836             }
3837           } else {
3838             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3839                     (res == WAIT_FAILED ? "failed" : "timed out"),
3840                     GetLastError(), __FILE__, __LINE__);
3841             // Don't keep handles, if we failed waiting for them.
3842             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3843               CloseHandle(handles[i]);
3844             }
3845             handle_count = 0;
3846           }
3847         }
3848 
3849         // Store a duplicate of the current thread handle in the array of handles.
3850         hproc = GetCurrentProcess();
3851         hthr = GetCurrentThread();
3852         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3853                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3854           warning("DuplicateHandle failed (%u) in %s: %d\n",
3855                   GetLastError(), __FILE__, __LINE__);
3856 
3857           // We can't register this thread (no more handles) so this thread
3858           // may be racing with a thread that is calling exit(). If the thread
3859           // that is calling exit() has managed to set the process_exiting
3860           // flag, then this thread will be caught in the SuspendThread()
3861           // infinite loop below which closes that race. A small timing
3862           // window remains before the process_exiting flag is set, but it
3863           // is only exposed when we are out of handles.
3864         } else {
3865           ++handle_count;
3866           registered = true;
3867 
3868           // The current exiting thread has stored its handle in the array, and now
3869           // should leave the critical section before calling _endthreadex().
3870         }
3871 
3872       } else if (what != EPT_THREAD && handle_count > 0) {
3873         jlong start_time, finish_time, timeout_left;
3874         // Before ending the process, make sure all the threads that had called
3875         // _endthreadex() completed.
3876 
3877         // Set the priority level of the current thread to the same value as
3878         // the priority level of exiting threads.
3879         // This is to ensure it will be given a fair chance to execute if
3880         // the timeout expires.
3881         hthr = GetCurrentThread();
3882         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3883         start_time = os::javaTimeNanos();
3884         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3885         for (i = 0; ; ) {
3886           int portion_count = handle_count - i;
3887           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3888             portion_count = MAXIMUM_WAIT_OBJECTS;
3889           }
3890           for (j = 0; j < portion_count; ++j) {
3891             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3892           }
3893           timeout_left = (finish_time - start_time) / 1000000L;
3894           if (timeout_left < 0) {
3895             timeout_left = 0;
3896           }
3897           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3898           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3899             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3900                     (res == WAIT_FAILED ? "failed" : "timed out"),
3901                     GetLastError(), __FILE__, __LINE__);
3902             // Reset portion_count so we close the remaining
3903             // handles due to this error.
3904             portion_count = handle_count - i;
3905           }
3906           for (j = 0; j < portion_count; ++j) {
3907             CloseHandle(handles[i + j]);
3908           }
3909           if ((i += portion_count) >= handle_count) {
3910             break;
3911           }
3912           start_time = os::javaTimeNanos();
3913         }
3914         handle_count = 0;
3915       }
3916 
3917       LeaveCriticalSection(&crit_sect);
3918     }
3919 
3920     if (!registered &&
3921         OrderAccess::load_acquire(&process_exiting) != 0 &&
3922         process_exiting != GetCurrentThreadId()) {
3923       // Some other thread is about to call exit(), so we don't let
3924       // the current unregistered thread proceed to exit() or _endthreadex()
3925       while (true) {
3926         SuspendThread(GetCurrentThread());
3927         // Avoid busy-wait loop, if SuspendThread() failed.
3928         Sleep(EXIT_TIMEOUT);
3929       }
3930     }
3931   }
3932 
3933   // We are here if either
3934   // - there's no 'race at exit' bug on this OS release;
3935   // - initialization of the critical section failed (unlikely);
3936   // - the current thread has registered itself and left the critical section;
3937   // - the process-exiting thread has raised the flag and left the critical section.
3938   if (what == EPT_THREAD) {
3939     _endthreadex((unsigned)exit_code);
3940   } else if (what == EPT_PROCESS) {
3941     ::exit(exit_code);
3942   } else {
3943     _exit(exit_code);
3944   }
3945 
3946   // Should not reach here
3947   return exit_code;
3948 }
3949 
3950 #undef EXIT_TIMEOUT
3951 
3952 void os::win32::setmode_streams() {
3953   _setmode(_fileno(stdin), _O_BINARY);
3954   _setmode(_fileno(stdout), _O_BINARY);
3955   _setmode(_fileno(stderr), _O_BINARY);
3956 }
3957 
3958 
3959 bool os::is_debugger_attached() {
3960   return IsDebuggerPresent() ? true : false;
3961 }
3962 
3963 
3964 void os::wait_for_keypress_at_exit(void) {
3965   if (PauseAtExit) {
3966     fprintf(stderr, "Press any key to continue...\n");
3967     fgetc(stdin);
3968   }
3969 }
3970 
3971 
3972 bool os::message_box(const char* title, const char* message) {
3973   int result = MessageBox(NULL, message, title,
3974                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3975   return result == IDYES;
3976 }
3977 
3978 #ifndef PRODUCT
3979 #ifndef _WIN64
3980 // Helpers to check whether NX protection is enabled
3981 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3982   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3983       pex->ExceptionRecord->NumberParameters > 0 &&
3984       pex->ExceptionRecord->ExceptionInformation[0] ==
3985       EXCEPTION_INFO_EXEC_VIOLATION) {
3986     return EXCEPTION_EXECUTE_HANDLER;
3987   }
3988   return EXCEPTION_CONTINUE_SEARCH;
3989 }
3990 
3991 void nx_check_protection() {
3992   // If NX is enabled we'll get an exception calling into code on the stack
3993   char code[] = { (char)0xC3 }; // ret
3994   void *code_ptr = (void *)code;
3995   __try {
3996     __asm call code_ptr
3997   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3998     tty->print_raw_cr("NX protection detected.");
3999   }
4000 }
4001 #endif // _WIN64
4002 #endif // PRODUCT
4003 
4004 // This is called _before_ the global arguments have been parsed
4005 void os::init(void) {
4006   _initial_pid = _getpid();
4007 
4008   init_random(1234567);
4009 
4010   win32::initialize_system_info();
4011   win32::setmode_streams();
4012   init_page_sizes((size_t) win32::vm_page_size());
4013 
4014   // This may be overridden later when argument processing is done.
4015   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4016 
4017   // Initialize main_process and main_thread
4018   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
4019   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4020                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
4021     fatal("DuplicateHandle failed\n");
4022   }
4023   main_thread_id = (int) GetCurrentThreadId();
4024 
4025   // initialize fast thread access - only used for 32-bit
4026   win32::initialize_thread_ptr_offset();
4027 }
4028 
4029 // To install functions for atexit processing
4030 extern "C" {
4031   static void perfMemory_exit_helper() {
4032     perfMemory_exit();
4033   }
4034 }
4035 
4036 static jint initSock();
4037 
4038 // this is called _after_ the global arguments have been parsed
4039 jint os::init_2(void) {
4040 
4041   // This could be set any time but all platforms
4042   // have to set it the same so we have to mirror Solaris.
4043   DEBUG_ONLY(os::set_mutex_init_done();)
4044 
4045   // Setup Windows Exceptions
4046 
4047 #if INCLUDE_AOT
4048   // If AOT is enabled we need to install a vectored exception handler
4049   // in order to forward implicit exceptions from code in AOT
4050   // generated DLLs.  This is necessary since these DLLs are not
4051   // registered for structured exceptions like codecache methods are.
4052   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4053     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4054   }
4055 #endif
4056 
4057   // for debugging float code generation bugs
4058   if (ForceFloatExceptions) {
4059 #ifndef  _WIN64
4060     static long fp_control_word = 0;
4061     __asm { fstcw fp_control_word }
4062     // see Intel PPro Manual, Vol. 2, p 7-16
4063     const long precision = 0x20;
4064     const long underflow = 0x10;
4065     const long overflow  = 0x08;
4066     const long zero_div  = 0x04;
4067     const long denorm    = 0x02;
4068     const long invalid   = 0x01;
4069     fp_control_word |= invalid;
4070     __asm { fldcw fp_control_word }
4071 #endif
4072   }
4073 
4074   // If stack_commit_size is 0, windows will reserve the default size,
4075   // but only commit a small portion of it.
4076   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4077   size_t default_reserve_size = os::win32::default_stack_size();
4078   size_t actual_reserve_size = stack_commit_size;
4079   if (stack_commit_size < default_reserve_size) {
4080     // If stack_commit_size == 0, we want this too
4081     actual_reserve_size = default_reserve_size;
4082   }
4083 
4084   // Check minimum allowable stack size for thread creation and to initialize
4085   // the java system classes, including StackOverflowError - depends on page
4086   // size.  Add two 4K pages for compiler2 recursion in main thread.
4087   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4088   // class initialization depending on 32 or 64 bit VM.
4089   size_t min_stack_allowed =
4090             (size_t)(JavaThread::stack_guard_zone_size() +
4091                      JavaThread::stack_shadow_zone_size() +
4092                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4093 
4094   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4095 
4096   if (actual_reserve_size < min_stack_allowed) {
4097     tty->print_cr("\nThe Java thread stack size specified is too small. "
4098                   "Specify at least %dk",
4099                   min_stack_allowed / K);
4100     return JNI_ERR;
4101   }
4102 
4103   JavaThread::set_stack_size_at_create(stack_commit_size);
4104 
4105   // Calculate theoretical max. size of Threads to guard gainst artifical
4106   // out-of-memory situations, where all available address-space has been
4107   // reserved by thread stacks.
4108   assert(actual_reserve_size != 0, "Must have a stack");
4109 
4110   // Calculate the thread limit when we should start doing Virtual Memory
4111   // banging. Currently when the threads will have used all but 200Mb of space.
4112   //
4113   // TODO: consider performing a similar calculation for commit size instead
4114   // as reserve size, since on a 64-bit platform we'll run into that more
4115   // often than running out of virtual memory space.  We can use the
4116   // lower value of the two calculations as the os_thread_limit.
4117   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4118   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4119 
4120   // at exit methods are called in the reverse order of their registration.
4121   // there is no limit to the number of functions registered. atexit does
4122   // not set errno.
4123 
4124   if (PerfAllowAtExitRegistration) {
4125     // only register atexit functions if PerfAllowAtExitRegistration is set.
4126     // atexit functions can be delayed until process exit time, which
4127     // can be problematic for embedded VM situations. Embedded VMs should
4128     // call DestroyJavaVM() to assure that VM resources are released.
4129 
4130     // note: perfMemory_exit_helper atexit function may be removed in
4131     // the future if the appropriate cleanup code can be added to the
4132     // VM_Exit VMOperation's doit method.
4133     if (atexit(perfMemory_exit_helper) != 0) {
4134       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4135     }
4136   }
4137 
4138 #ifndef _WIN64
4139   // Print something if NX is enabled (win32 on AMD64)
4140   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4141 #endif
4142 
4143   // initialize thread priority policy
4144   prio_init();
4145 
4146   if (UseNUMA && !ForceNUMA) {
4147     UseNUMA = false; // We don't fully support this yet
4148   }
4149 
4150   if (UseNUMAInterleaving) {
4151     // first check whether this Windows OS supports VirtualAllocExNuma, if not ignore this flag
4152     bool success = numa_interleaving_init();
4153     if (!success) UseNUMAInterleaving = false;
4154   }
4155 
4156   if (initSock() != JNI_OK) {
4157     return JNI_ERR;
4158   }
4159 
4160   SymbolEngine::recalc_search_path();
4161 
4162   // Initialize data for jdk.internal.misc.Signal
4163   if (!ReduceSignalUsage) {
4164     jdk_misc_signal_init();
4165   }
4166 
4167   return JNI_OK;
4168 }
4169 
4170 // Mark the polling page as unreadable
4171 void os::make_polling_page_unreadable(void) {
4172   DWORD old_status;
4173   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4174                       PAGE_NOACCESS, &old_status)) {
4175     fatal("Could not disable polling page");
4176   }
4177 }
4178 
4179 // Mark the polling page as readable
4180 void os::make_polling_page_readable(void) {
4181   DWORD old_status;
4182   if (!VirtualProtect((char *)_polling_page, os::vm_page_size(),
4183                       PAGE_READONLY, &old_status)) {
4184     fatal("Could not enable polling page");
4185   }
4186 }
4187 
4188 // combine the high and low DWORD into a ULONGLONG
4189 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4190   ULONGLONG value = high_word;
4191   value <<= sizeof(high_word) * 8;
4192   value |= low_word;
4193   return value;
4194 }
4195 
4196 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4197 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4198   ::memset((void*)sbuf, 0, sizeof(struct stat));
4199   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4200   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4201                                   file_data.ftLastWriteTime.dwLowDateTime);
4202   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4203                                   file_data.ftCreationTime.dwLowDateTime);
4204   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4205                                   file_data.ftLastAccessTime.dwLowDateTime);
4206   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4207     sbuf->st_mode |= S_IFDIR;
4208   } else {
4209     sbuf->st_mode |= S_IFREG;
4210   }
4211 }
4212 
4213 // The following function is adapted from java.base/windows/native/libjava/canonicalize_md.c
4214 // Creates an UNC path from a single byte path. Return buffer is
4215 // allocated in C heap and needs to be freed by the caller.
4216 // Returns NULL on error.
4217 static wchar_t* create_unc_path(const char* path, errno_t &err) {
4218   wchar_t* wpath = NULL;
4219   size_t converted_chars = 0;
4220   size_t path_len = strlen(path) + 1; // includes the terminating NULL
4221   if (path[0] == '\\' && path[1] == '\\') {
4222     if (path[2] == '?' && path[3] == '\\'){
4223       // if it already has a \\?\ don't do the prefix
4224       wpath = (wchar_t*)os::malloc(path_len * sizeof(wchar_t), mtInternal);
4225       if (wpath != NULL) {
4226         err = ::mbstowcs_s(&converted_chars, wpath, path_len, path, path_len);
4227       } else {
4228         err = ENOMEM;
4229       }
4230     } else {
4231       // only UNC pathname includes double slashes here
4232       wpath = (wchar_t*)os::malloc((path_len + 7) * sizeof(wchar_t), mtInternal);
4233       if (wpath != NULL) {
4234         ::wcscpy(wpath, L"\\\\?\\UNC\0");
4235         err = ::mbstowcs_s(&converted_chars, &wpath[7], path_len, path, path_len);
4236       } else {
4237         err = ENOMEM;
4238       }
4239     }
4240   } else {
4241     wpath = (wchar_t*)os::malloc((path_len + 4) * sizeof(wchar_t), mtInternal);
4242     if (wpath != NULL) {
4243       ::wcscpy(wpath, L"\\\\?\\\0");
4244       err = ::mbstowcs_s(&converted_chars, &wpath[4], path_len, path, path_len);
4245     } else {
4246       err = ENOMEM;
4247     }
4248   }
4249   return wpath;
4250 }
4251 
4252 static void destroy_unc_path(wchar_t* wpath) {
4253   os::free(wpath);
4254 }
4255 
4256 int os::stat(const char *path, struct stat *sbuf) {
4257   char* pathbuf = (char*)os::strdup(path, mtInternal);
4258   if (pathbuf == NULL) {
4259     errno = ENOMEM;
4260     return -1;
4261   }
4262   os::native_path(pathbuf);
4263   int ret;
4264   WIN32_FILE_ATTRIBUTE_DATA file_data;
4265   // Not using stat() to avoid the problem described in JDK-6539723
4266   if (strlen(path) < MAX_PATH) {
4267     BOOL bret = ::GetFileAttributesExA(pathbuf, GetFileExInfoStandard, &file_data);
4268     if (!bret) {
4269       errno = ::GetLastError();
4270       ret = -1;
4271     }
4272     else {
4273       file_attribute_data_to_stat(sbuf, file_data);
4274       ret = 0;
4275     }
4276   } else {
4277     errno_t err = ERROR_SUCCESS;
4278     wchar_t* wpath = create_unc_path(pathbuf, err);
4279     if (err != ERROR_SUCCESS) {
4280       if (wpath != NULL) {
4281         destroy_unc_path(wpath);
4282       }
4283       os::free(pathbuf);
4284       errno = err;
4285       return -1;
4286     }
4287     BOOL bret = ::GetFileAttributesExW(wpath, GetFileExInfoStandard, &file_data);
4288     if (!bret) {
4289       errno = ::GetLastError();
4290       ret = -1;
4291     } else {
4292       file_attribute_data_to_stat(sbuf, file_data);
4293       ret = 0;
4294     }
4295     destroy_unc_path(wpath);
4296   }
4297   os::free(pathbuf);
4298   return ret;
4299 }
4300 
4301 static HANDLE create_read_only_file_handle(const char* file) {
4302   if (file == NULL) {
4303     return INVALID_HANDLE_VALUE;
4304   }
4305 
4306   char* nativepath = (char*)os::strdup(file, mtInternal);
4307   if (nativepath == NULL) {
4308     errno = ENOMEM;
4309     return INVALID_HANDLE_VALUE;
4310   }
4311   os::native_path(nativepath);
4312 
4313   size_t len = strlen(nativepath);
4314   HANDLE handle = INVALID_HANDLE_VALUE;
4315 
4316   if (len < MAX_PATH) {
4317     handle = ::CreateFile(nativepath, 0, FILE_SHARE_READ,
4318                           NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4319   } else {
4320     errno_t err = ERROR_SUCCESS;
4321     wchar_t* wfile = create_unc_path(nativepath, err);
4322     if (err != ERROR_SUCCESS) {
4323       if (wfile != NULL) {
4324         destroy_unc_path(wfile);
4325       }
4326       os::free(nativepath);
4327       return INVALID_HANDLE_VALUE;
4328     }
4329     handle = ::CreateFileW(wfile, 0, FILE_SHARE_READ,
4330                            NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4331     destroy_unc_path(wfile);
4332   }
4333 
4334   os::free(nativepath);
4335   return handle;
4336 }
4337 
4338 bool os::same_files(const char* file1, const char* file2) {
4339 
4340   if (file1 == NULL && file2 == NULL) {
4341     return true;
4342   }
4343 
4344   if (file1 == NULL || file2 == NULL) {
4345     return false;
4346   }
4347 
4348   if (strcmp(file1, file2) == 0) {
4349     return true;
4350   }
4351 
4352   HANDLE handle1 = create_read_only_file_handle(file1);
4353   HANDLE handle2 = create_read_only_file_handle(file2);
4354   bool result = false;
4355 
4356   // if we could open both paths...
4357   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4358     BY_HANDLE_FILE_INFORMATION fileInfo1;
4359     BY_HANDLE_FILE_INFORMATION fileInfo2;
4360     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4361       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4362       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4363       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4364         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4365         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4366         result = true;
4367       }
4368     }
4369   }
4370 
4371   //free the handles
4372   if (handle1 != INVALID_HANDLE_VALUE) {
4373     ::CloseHandle(handle1);
4374   }
4375 
4376   if (handle2 != INVALID_HANDLE_VALUE) {
4377     ::CloseHandle(handle2);
4378   }
4379 
4380   return result;
4381 }
4382 
4383 
4384 #define FT2INT64(ft) \
4385   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4386 
4387 
4388 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4389 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4390 // of a thread.
4391 //
4392 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4393 // the fast estimate available on the platform.
4394 
4395 // current_thread_cpu_time() is not optimized for Windows yet
4396 jlong os::current_thread_cpu_time() {
4397   // return user + sys since the cost is the same
4398   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4399 }
4400 
4401 jlong os::thread_cpu_time(Thread* thread) {
4402   // consistent with what current_thread_cpu_time() returns.
4403   return os::thread_cpu_time(thread, true /* user+sys */);
4404 }
4405 
4406 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4407   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4408 }
4409 
4410 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4411   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4412   // If this function changes, os::is_thread_cpu_time_supported() should too
4413   FILETIME CreationTime;
4414   FILETIME ExitTime;
4415   FILETIME KernelTime;
4416   FILETIME UserTime;
4417 
4418   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4419                       &ExitTime, &KernelTime, &UserTime) == 0) {
4420     return -1;
4421   } else if (user_sys_cpu_time) {
4422     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4423   } else {
4424     return FT2INT64(UserTime) * 100;
4425   }
4426 }
4427 
4428 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4429   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4430   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4431   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4432   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4433 }
4434 
4435 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4436   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4437   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4438   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4439   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4440 }
4441 
4442 bool os::is_thread_cpu_time_supported() {
4443   // see os::thread_cpu_time
4444   FILETIME CreationTime;
4445   FILETIME ExitTime;
4446   FILETIME KernelTime;
4447   FILETIME UserTime;
4448 
4449   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4450                       &KernelTime, &UserTime) == 0) {
4451     return false;
4452   } else {
4453     return true;
4454   }
4455 }
4456 
4457 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4458 // It does have primitives (PDH API) to get CPU usage and run queue length.
4459 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4460 // If we wanted to implement loadavg on Windows, we have a few options:
4461 //
4462 // a) Query CPU usage and run queue length and "fake" an answer by
4463 //    returning the CPU usage if it's under 100%, and the run queue
4464 //    length otherwise.  It turns out that querying is pretty slow
4465 //    on Windows, on the order of 200 microseconds on a fast machine.
4466 //    Note that on the Windows the CPU usage value is the % usage
4467 //    since the last time the API was called (and the first call
4468 //    returns 100%), so we'd have to deal with that as well.
4469 //
4470 // b) Sample the "fake" answer using a sampling thread and store
4471 //    the answer in a global variable.  The call to loadavg would
4472 //    just return the value of the global, avoiding the slow query.
4473 //
4474 // c) Sample a better answer using exponential decay to smooth the
4475 //    value.  This is basically the algorithm used by UNIX kernels.
4476 //
4477 // Note that sampling thread starvation could affect both (b) and (c).
4478 int os::loadavg(double loadavg[], int nelem) {
4479   return -1;
4480 }
4481 
4482 
4483 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4484 bool os::dont_yield() {
4485   return DontYieldALot;
4486 }
4487 
4488 // This method is a slightly reworked copy of JDK's sysOpen
4489 // from src/windows/hpi/src/sys_api_md.c
4490 
4491 int os::open(const char *path, int oflag, int mode) {
4492   char* pathbuf = (char*)os::strdup(path, mtInternal);
4493   if (pathbuf == NULL) {
4494     errno = ENOMEM;
4495     return -1;
4496   }
4497   os::native_path(pathbuf);
4498   int ret;
4499   if (strlen(path) < MAX_PATH) {
4500     ret = ::open(pathbuf, oflag | O_BINARY | O_NOINHERIT, mode);
4501   } else {
4502     errno_t err = ERROR_SUCCESS;
4503     wchar_t* wpath = create_unc_path(pathbuf, err);
4504     if (err != ERROR_SUCCESS) {
4505       if (wpath != NULL) {
4506         destroy_unc_path(wpath);
4507       }
4508       os::free(pathbuf);
4509       errno = err;
4510       return -1;
4511     }
4512     ret = ::_wopen(wpath, oflag | O_BINARY | O_NOINHERIT, mode);
4513     if (ret == -1) {
4514       errno = ::GetLastError();
4515     }
4516     destroy_unc_path(wpath);
4517   }
4518   os::free(pathbuf);
4519   return ret;
4520 }
4521 
4522 FILE* os::open(int fd, const char* mode) {
4523   return ::_fdopen(fd, mode);
4524 }
4525 
4526 // Is a (classpath) directory empty?
4527 bool os::dir_is_empty(const char* path) {
4528   char* search_path = (char*)os::malloc(strlen(path) + 3, mtInternal);
4529   if (search_path == NULL) {
4530     errno = ENOMEM;
4531     return false;
4532   }
4533   strcpy(search_path, path);
4534   os::native_path(search_path);
4535   // Append "*", or possibly "\\*", to path
4536   if (search_path[1] == ':' &&
4537        (search_path[2] == '\0' ||
4538          (search_path[2] == '\\' && search_path[3] == '\0'))) {
4539     // No '\\' needed for cases like "Z:" or "Z:\"
4540     strcat(search_path, "*");
4541   }
4542   else {
4543     strcat(search_path, "\\*");
4544   }
4545   errno_t err = ERROR_SUCCESS;
4546   wchar_t* wpath = create_unc_path(search_path, err);
4547   if (err != ERROR_SUCCESS) {
4548     if (wpath != NULL) {
4549       destroy_unc_path(wpath);
4550     }
4551     os::free(search_path);
4552     errno = err;
4553     return false;
4554   }
4555   WIN32_FIND_DATAW fd;
4556   HANDLE f = ::FindFirstFileW(wpath, &fd);
4557   destroy_unc_path(wpath);
4558   bool is_empty = true;
4559   if (f != INVALID_HANDLE_VALUE) {
4560     while (is_empty && ::FindNextFileW(f, &fd)) {
4561       // An empty directory contains only the current directory file
4562       // and the previous directory file.
4563       if ((wcscmp(fd.cFileName, L".") != 0) &&
4564           (wcscmp(fd.cFileName, L"..") != 0)) {
4565         is_empty = false;
4566       }
4567     }
4568     FindClose(f);
4569   }
4570   os::free(search_path);
4571   return is_empty;
4572 }
4573 
4574 // create binary file, rewriting existing file if required
4575 int os::create_binary_file(const char* path, bool rewrite_existing) {
4576   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4577   if (!rewrite_existing) {
4578     oflags |= _O_EXCL;
4579   }
4580   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4581 }
4582 
4583 // return current position of file pointer
4584 jlong os::current_file_offset(int fd) {
4585   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4586 }
4587 
4588 // move file pointer to the specified offset
4589 jlong os::seek_to_file_offset(int fd, jlong offset) {
4590   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4591 }
4592 
4593 
4594 jlong os::lseek(int fd, jlong offset, int whence) {
4595   return (jlong) ::_lseeki64(fd, offset, whence);
4596 }
4597 
4598 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4599   OVERLAPPED ov;
4600   DWORD nread;
4601   BOOL result;
4602 
4603   ZeroMemory(&ov, sizeof(ov));
4604   ov.Offset = (DWORD)offset;
4605   ov.OffsetHigh = (DWORD)(offset >> 32);
4606 
4607   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4608 
4609   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4610 
4611   return result ? nread : 0;
4612 }
4613 
4614 
4615 // This method is a slightly reworked copy of JDK's sysNativePath
4616 // from src/windows/hpi/src/path_md.c
4617 
4618 // Convert a pathname to native format.  On win32, this involves forcing all
4619 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4620 // sometimes rejects '/') and removing redundant separators.  The input path is
4621 // assumed to have been converted into the character encoding used by the local
4622 // system.  Because this might be a double-byte encoding, care is taken to
4623 // treat double-byte lead characters correctly.
4624 //
4625 // This procedure modifies the given path in place, as the result is never
4626 // longer than the original.  There is no error return; this operation always
4627 // succeeds.
4628 char * os::native_path(char *path) {
4629   char *src = path, *dst = path, *end = path;
4630   char *colon = NULL;  // If a drive specifier is found, this will
4631                        // point to the colon following the drive letter
4632 
4633   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4634   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4635           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4636 
4637   // Check for leading separators
4638 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4639   while (isfilesep(*src)) {
4640     src++;
4641   }
4642 
4643   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4644     // Remove leading separators if followed by drive specifier.  This
4645     // hack is necessary to support file URLs containing drive
4646     // specifiers (e.g., "file://c:/path").  As a side effect,
4647     // "/c:/path" can be used as an alternative to "c:/path".
4648     *dst++ = *src++;
4649     colon = dst;
4650     *dst++ = ':';
4651     src++;
4652   } else {
4653     src = path;
4654     if (isfilesep(src[0]) && isfilesep(src[1])) {
4655       // UNC pathname: Retain first separator; leave src pointed at
4656       // second separator so that further separators will be collapsed
4657       // into the second separator.  The result will be a pathname
4658       // beginning with "\\\\" followed (most likely) by a host name.
4659       src = dst = path + 1;
4660       path[0] = '\\';     // Force first separator to '\\'
4661     }
4662   }
4663 
4664   end = dst;
4665 
4666   // Remove redundant separators from remainder of path, forcing all
4667   // separators to be '\\' rather than '/'. Also, single byte space
4668   // characters are removed from the end of the path because those
4669   // are not legal ending characters on this operating system.
4670   //
4671   while (*src != '\0') {
4672     if (isfilesep(*src)) {
4673       *dst++ = '\\'; src++;
4674       while (isfilesep(*src)) src++;
4675       if (*src == '\0') {
4676         // Check for trailing separator
4677         end = dst;
4678         if (colon == dst - 2) break;  // "z:\\"
4679         if (dst == path + 1) break;   // "\\"
4680         if (dst == path + 2 && isfilesep(path[0])) {
4681           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4682           // beginning of a UNC pathname.  Even though it is not, by
4683           // itself, a valid UNC pathname, we leave it as is in order
4684           // to be consistent with the path canonicalizer as well
4685           // as the win32 APIs, which treat this case as an invalid
4686           // UNC pathname rather than as an alias for the root
4687           // directory of the current drive.
4688           break;
4689         }
4690         end = --dst;  // Path does not denote a root directory, so
4691                       // remove trailing separator
4692         break;
4693       }
4694       end = dst;
4695     } else {
4696       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4697         *dst++ = *src++;
4698         if (*src) *dst++ = *src++;
4699         end = dst;
4700       } else {  // Copy a single-byte character
4701         char c = *src++;
4702         *dst++ = c;
4703         // Space is not a legal ending character
4704         if (c != ' ') end = dst;
4705       }
4706     }
4707   }
4708 
4709   *end = '\0';
4710 
4711   // For "z:", add "." to work around a bug in the C runtime library
4712   if (colon == dst - 1) {
4713     path[2] = '.';
4714     path[3] = '\0';
4715   }
4716 
4717   return path;
4718 }
4719 
4720 // This code is a copy of JDK's sysSetLength
4721 // from src/windows/hpi/src/sys_api_md.c
4722 
4723 int os::ftruncate(int fd, jlong length) {
4724   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4725   long high = (long)(length >> 32);
4726   DWORD ret;
4727 
4728   if (h == (HANDLE)(-1)) {
4729     return -1;
4730   }
4731 
4732   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4733   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4734     return -1;
4735   }
4736 
4737   if (::SetEndOfFile(h) == FALSE) {
4738     return -1;
4739   }
4740 
4741   return 0;
4742 }
4743 
4744 int os::get_fileno(FILE* fp) {
4745   return _fileno(fp);
4746 }
4747 
4748 // This code is a copy of JDK's sysSync
4749 // from src/windows/hpi/src/sys_api_md.c
4750 // except for the legacy workaround for a bug in Win 98
4751 
4752 int os::fsync(int fd) {
4753   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4754 
4755   if ((!::FlushFileBuffers(handle)) &&
4756       (GetLastError() != ERROR_ACCESS_DENIED)) {
4757     // from winerror.h
4758     return -1;
4759   }
4760   return 0;
4761 }
4762 
4763 static int nonSeekAvailable(int, long *);
4764 static int stdinAvailable(int, long *);
4765 
4766 // This code is a copy of JDK's sysAvailable
4767 // from src/windows/hpi/src/sys_api_md.c
4768 
4769 int os::available(int fd, jlong *bytes) {
4770   jlong cur, end;
4771   struct _stati64 stbuf64;
4772 
4773   if (::_fstati64(fd, &stbuf64) >= 0) {
4774     int mode = stbuf64.st_mode;
4775     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4776       int ret;
4777       long lpbytes;
4778       if (fd == 0) {
4779         ret = stdinAvailable(fd, &lpbytes);
4780       } else {
4781         ret = nonSeekAvailable(fd, &lpbytes);
4782       }
4783       (*bytes) = (jlong)(lpbytes);
4784       return ret;
4785     }
4786     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4787       return FALSE;
4788     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4789       return FALSE;
4790     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4791       return FALSE;
4792     }
4793     *bytes = end - cur;
4794     return TRUE;
4795   } else {
4796     return FALSE;
4797   }
4798 }
4799 
4800 void os::flockfile(FILE* fp) {
4801   _lock_file(fp);
4802 }
4803 
4804 void os::funlockfile(FILE* fp) {
4805   _unlock_file(fp);
4806 }
4807 
4808 // This code is a copy of JDK's nonSeekAvailable
4809 // from src/windows/hpi/src/sys_api_md.c
4810 
4811 static int nonSeekAvailable(int fd, long *pbytes) {
4812   // This is used for available on non-seekable devices
4813   // (like both named and anonymous pipes, such as pipes
4814   //  connected to an exec'd process).
4815   // Standard Input is a special case.
4816   HANDLE han;
4817 
4818   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4819     return FALSE;
4820   }
4821 
4822   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4823     // PeekNamedPipe fails when at EOF.  In that case we
4824     // simply make *pbytes = 0 which is consistent with the
4825     // behavior we get on Solaris when an fd is at EOF.
4826     // The only alternative is to raise an Exception,
4827     // which isn't really warranted.
4828     //
4829     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4830       return FALSE;
4831     }
4832     *pbytes = 0;
4833   }
4834   return TRUE;
4835 }
4836 
4837 #define MAX_INPUT_EVENTS 2000
4838 
4839 // This code is a copy of JDK's stdinAvailable
4840 // from src/windows/hpi/src/sys_api_md.c
4841 
4842 static int stdinAvailable(int fd, long *pbytes) {
4843   HANDLE han;
4844   DWORD numEventsRead = 0;  // Number of events read from buffer
4845   DWORD numEvents = 0;      // Number of events in buffer
4846   DWORD i = 0;              // Loop index
4847   DWORD curLength = 0;      // Position marker
4848   DWORD actualLength = 0;   // Number of bytes readable
4849   BOOL error = FALSE;       // Error holder
4850   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4851 
4852   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4853     return FALSE;
4854   }
4855 
4856   // Construct an array of input records in the console buffer
4857   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4858   if (error == 0) {
4859     return nonSeekAvailable(fd, pbytes);
4860   }
4861 
4862   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4863   if (numEvents > MAX_INPUT_EVENTS) {
4864     numEvents = MAX_INPUT_EVENTS;
4865   }
4866 
4867   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4868   if (lpBuffer == NULL) {
4869     return FALSE;
4870   }
4871 
4872   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4873   if (error == 0) {
4874     os::free(lpBuffer);
4875     return FALSE;
4876   }
4877 
4878   // Examine input records for the number of bytes available
4879   for (i=0; i<numEvents; i++) {
4880     if (lpBuffer[i].EventType == KEY_EVENT) {
4881 
4882       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4883                                       &(lpBuffer[i].Event);
4884       if (keyRecord->bKeyDown == TRUE) {
4885         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4886         curLength++;
4887         if (*keyPressed == '\r') {
4888           actualLength = curLength;
4889         }
4890       }
4891     }
4892   }
4893 
4894   if (lpBuffer != NULL) {
4895     os::free(lpBuffer);
4896   }
4897 
4898   *pbytes = (long) actualLength;
4899   return TRUE;
4900 }
4901 
4902 // Map a block of memory.
4903 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4904                         char *addr, size_t bytes, bool read_only,
4905                         bool allow_exec) {
4906   HANDLE hFile;
4907   char* base;
4908 
4909   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4910                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4911   if (hFile == NULL) {
4912     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4913     return NULL;
4914   }
4915 
4916   if (allow_exec) {
4917     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4918     // unless it comes from a PE image (which the shared archive is not.)
4919     // Even VirtualProtect refuses to give execute access to mapped memory
4920     // that was not previously executable.
4921     //
4922     // Instead, stick the executable region in anonymous memory.  Yuck.
4923     // Penalty is that ~4 pages will not be shareable - in the future
4924     // we might consider DLLizing the shared archive with a proper PE
4925     // header so that mapping executable + sharing is possible.
4926 
4927     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4928                                 PAGE_READWRITE);
4929     if (base == NULL) {
4930       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4931       CloseHandle(hFile);
4932       return NULL;
4933     }
4934 
4935     // Record virtual memory allocation
4936     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4937 
4938     DWORD bytes_read;
4939     OVERLAPPED overlapped;
4940     overlapped.Offset = (DWORD)file_offset;
4941     overlapped.OffsetHigh = 0;
4942     overlapped.hEvent = NULL;
4943     // ReadFile guarantees that if the return value is true, the requested
4944     // number of bytes were read before returning.
4945     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4946     if (!res) {
4947       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4948       release_memory(base, bytes);
4949       CloseHandle(hFile);
4950       return NULL;
4951     }
4952   } else {
4953     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4954                                     NULL /* file_name */);
4955     if (hMap == NULL) {
4956       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4957       CloseHandle(hFile);
4958       return NULL;
4959     }
4960 
4961     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4962     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4963                                   (DWORD)bytes, addr);
4964     if (base == NULL) {
4965       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4966       CloseHandle(hMap);
4967       CloseHandle(hFile);
4968       return NULL;
4969     }
4970 
4971     if (CloseHandle(hMap) == 0) {
4972       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4973       CloseHandle(hFile);
4974       return base;
4975     }
4976   }
4977 
4978   if (allow_exec) {
4979     DWORD old_protect;
4980     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4981     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4982 
4983     if (!res) {
4984       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4985       // Don't consider this a hard error, on IA32 even if the
4986       // VirtualProtect fails, we should still be able to execute
4987       CloseHandle(hFile);
4988       return base;
4989     }
4990   }
4991 
4992   if (CloseHandle(hFile) == 0) {
4993     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4994     return base;
4995   }
4996 
4997   return base;
4998 }
4999 
5000 
5001 // Remap a block of memory.
5002 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5003                           char *addr, size_t bytes, bool read_only,
5004                           bool allow_exec) {
5005   // This OS does not allow existing memory maps to be remapped so we
5006   // would have to unmap the memory before we remap it.
5007 
5008   // Because there is a small window between unmapping memory and mapping
5009   // it in again with different protections, CDS archives are mapped RW
5010   // on windows, so this function isn't called.
5011   ShouldNotReachHere();
5012   return NULL;
5013 }
5014 
5015 
5016 // Unmap a block of memory.
5017 // Returns true=success, otherwise false.
5018 
5019 bool os::pd_unmap_memory(char* addr, size_t bytes) {
5020   MEMORY_BASIC_INFORMATION mem_info;
5021   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5022     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5023     return false;
5024   }
5025 
5026   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5027   // Instead, executable region was allocated using VirtualAlloc(). See
5028   // pd_map_memory() above.
5029   //
5030   // The following flags should match the 'exec_access' flages used for
5031   // VirtualProtect() in pd_map_memory().
5032   if (mem_info.Protect == PAGE_EXECUTE_READ ||
5033       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5034     return pd_release_memory(addr, bytes);
5035   }
5036 
5037   BOOL result = UnmapViewOfFile(addr);
5038   if (result == 0) {
5039     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
5040     return false;
5041   }
5042   return true;
5043 }
5044 
5045 void os::pause() {
5046   char filename[MAX_PATH];
5047   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5048     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
5049   } else {
5050     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5051   }
5052 
5053   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5054   if (fd != -1) {
5055     struct stat buf;
5056     ::close(fd);
5057     while (::stat(filename, &buf) == 0) {
5058       Sleep(100);
5059     }
5060   } else {
5061     jio_fprintf(stderr,
5062                 "Could not open pause file '%s', continuing immediately.\n", filename);
5063   }
5064 }
5065 
5066 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5067 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5068 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5069 
5070 os::ThreadCrashProtection::ThreadCrashProtection() {
5071 }
5072 
5073 // See the caveats for this class in os_windows.hpp
5074 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5075 // into this method and returns false. If no OS EXCEPTION was raised, returns
5076 // true.
5077 // The callback is supposed to provide the method that should be protected.
5078 //
5079 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5080 
5081   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5082 
5083   _protected_thread = Thread::current_or_null();
5084   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5085 
5086   bool success = true;
5087   __try {
5088     _crash_protection = this;
5089     cb.call();
5090   } __except(EXCEPTION_EXECUTE_HANDLER) {
5091     // only for protection, nothing to do
5092     success = false;
5093   }
5094   _crash_protection = NULL;
5095   _protected_thread = NULL;
5096   Thread::muxRelease(&_crash_mux);
5097   return success;
5098 }
5099 
5100 
5101 class HighResolutionInterval : public CHeapObj<mtThread> {
5102   // The default timer resolution seems to be 10 milliseconds.
5103   // (Where is this written down?)
5104   // If someone wants to sleep for only a fraction of the default,
5105   // then we set the timer resolution down to 1 millisecond for
5106   // the duration of their interval.
5107   // We carefully set the resolution back, since otherwise we
5108   // seem to incur an overhead (3%?) that we don't need.
5109   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5110   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5111   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5112   // timeBeginPeriod() if the relative error exceeded some threshold.
5113   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5114   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5115   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5116   // resolution timers running.
5117  private:
5118   jlong resolution;
5119  public:
5120   HighResolutionInterval(jlong ms) {
5121     resolution = ms % 10L;
5122     if (resolution != 0) {
5123       MMRESULT result = timeBeginPeriod(1L);
5124     }
5125   }
5126   ~HighResolutionInterval() {
5127     if (resolution != 0) {
5128       MMRESULT result = timeEndPeriod(1L);
5129     }
5130     resolution = 0L;
5131   }
5132 };
5133 
5134 // An Event wraps a win32 "CreateEvent" kernel handle.
5135 //
5136 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5137 //
5138 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5139 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5140 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5141 //     In addition, an unpark() operation might fetch the handle field, but the
5142 //     event could recycle between the fetch and the SetEvent() operation.
5143 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5144 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5145 //     on an stale but recycled handle would be harmless, but in practice this might
5146 //     confuse other non-Sun code, so it's not a viable approach.
5147 //
5148 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5149 //     with the Event.  The event handle is never closed.  This could be construed
5150 //     as handle leakage, but only up to the maximum # of threads that have been extant
5151 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5152 //     permit a process to have hundreds of thousands of open handles.
5153 //
5154 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5155 //     and release unused handles.
5156 //
5157 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5158 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5159 //
5160 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5161 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5162 //
5163 // We use (2).
5164 //
5165 // TODO-FIXME:
5166 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5167 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5168 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5169 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5170 //     into a single win32 CreateEvent() handle.
5171 //
5172 // Assumption:
5173 //    Only one parker can exist on an event, which is why we allocate
5174 //    them per-thread. Multiple unparkers can coexist.
5175 //
5176 // _Event transitions in park()
5177 //   -1 => -1 : illegal
5178 //    1 =>  0 : pass - return immediately
5179 //    0 => -1 : block; then set _Event to 0 before returning
5180 //
5181 // _Event transitions in unpark()
5182 //    0 => 1 : just return
5183 //    1 => 1 : just return
5184 //   -1 => either 0 or 1; must signal target thread
5185 //         That is, we can safely transition _Event from -1 to either
5186 //         0 or 1.
5187 //
5188 // _Event serves as a restricted-range semaphore.
5189 //   -1 : thread is blocked, i.e. there is a waiter
5190 //    0 : neutral: thread is running or ready,
5191 //        could have been signaled after a wait started
5192 //    1 : signaled - thread is running or ready
5193 //
5194 // Another possible encoding of _Event would be with
5195 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5196 //
5197 
5198 int os::PlatformEvent::park(jlong Millis) {
5199   // Transitions for _Event:
5200   //   -1 => -1 : illegal
5201   //    1 =>  0 : pass - return immediately
5202   //    0 => -1 : block; then set _Event to 0 before returning
5203 
5204   guarantee(_ParkHandle != NULL , "Invariant");
5205   guarantee(Millis > 0          , "Invariant");
5206 
5207   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5208   // the initial park() operation.
5209   // Consider: use atomic decrement instead of CAS-loop
5210 
5211   int v;
5212   for (;;) {
5213     v = _Event;
5214     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5215   }
5216   guarantee((v == 0) || (v == 1), "invariant");
5217   if (v != 0) return OS_OK;
5218 
5219   // Do this the hard way by blocking ...
5220   // TODO: consider a brief spin here, gated on the success of recent
5221   // spin attempts by this thread.
5222   //
5223   // We decompose long timeouts into series of shorter timed waits.
5224   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5225   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5226   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5227   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5228   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5229   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5230   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5231   // for the already waited time.  This policy does not admit any new outcomes.
5232   // In the future, however, we might want to track the accumulated wait time and
5233   // adjust Millis accordingly if we encounter a spurious wakeup.
5234 
5235   const int MAXTIMEOUT = 0x10000000;
5236   DWORD rv = WAIT_TIMEOUT;
5237   while (_Event < 0 && Millis > 0) {
5238     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5239     if (Millis > MAXTIMEOUT) {
5240       prd = MAXTIMEOUT;
5241     }
5242     HighResolutionInterval *phri = NULL;
5243     if (!ForceTimeHighResolution) {
5244       phri = new HighResolutionInterval(prd);
5245     }
5246     rv = ::WaitForSingleObject(_ParkHandle, prd);
5247     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5248     if (rv == WAIT_TIMEOUT) {
5249       Millis -= prd;
5250     }
5251     delete phri; // if it is NULL, harmless
5252   }
5253   v = _Event;
5254   _Event = 0;
5255   // see comment at end of os::PlatformEvent::park() below:
5256   OrderAccess::fence();
5257   // If we encounter a nearly simultanous timeout expiry and unpark()
5258   // we return OS_OK indicating we awoke via unpark().
5259   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5260   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5261 }
5262 
5263 void os::PlatformEvent::park() {
5264   // Transitions for _Event:
5265   //   -1 => -1 : illegal
5266   //    1 =>  0 : pass - return immediately
5267   //    0 => -1 : block; then set _Event to 0 before returning
5268 
5269   guarantee(_ParkHandle != NULL, "Invariant");
5270   // Invariant: Only the thread associated with the Event/PlatformEvent
5271   // may call park().
5272   // Consider: use atomic decrement instead of CAS-loop
5273   int v;
5274   for (;;) {
5275     v = _Event;
5276     if (Atomic::cmpxchg(v-1, &_Event, v) == v) break;
5277   }
5278   guarantee((v == 0) || (v == 1), "invariant");
5279   if (v != 0) return;
5280 
5281   // Do this the hard way by blocking ...
5282   // TODO: consider a brief spin here, gated on the success of recent
5283   // spin attempts by this thread.
5284   while (_Event < 0) {
5285     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5286     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5287   }
5288 
5289   // Usually we'll find _Event == 0 at this point, but as
5290   // an optional optimization we clear it, just in case can
5291   // multiple unpark() operations drove _Event up to 1.
5292   _Event = 0;
5293   OrderAccess::fence();
5294   guarantee(_Event >= 0, "invariant");
5295 }
5296 
5297 void os::PlatformEvent::unpark() {
5298   guarantee(_ParkHandle != NULL, "Invariant");
5299 
5300   // Transitions for _Event:
5301   //    0 => 1 : just return
5302   //    1 => 1 : just return
5303   //   -1 => either 0 or 1; must signal target thread
5304   //         That is, we can safely transition _Event from -1 to either
5305   //         0 or 1.
5306   // See also: "Semaphores in Plan 9" by Mullender & Cox
5307   //
5308   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5309   // that it will take two back-to-back park() calls for the owning
5310   // thread to block. This has the benefit of forcing a spurious return
5311   // from the first park() call after an unpark() call which will help
5312   // shake out uses of park() and unpark() without condition variables.
5313 
5314   if (Atomic::xchg(1, &_Event) >= 0) return;
5315 
5316   ::SetEvent(_ParkHandle);
5317 }
5318 
5319 
5320 // JSR166
5321 // -------------------------------------------------------
5322 
5323 // The Windows implementation of Park is very straightforward: Basic
5324 // operations on Win32 Events turn out to have the right semantics to
5325 // use them directly. We opportunistically resuse the event inherited
5326 // from Monitor.
5327 
5328 void Parker::park(bool isAbsolute, jlong time) {
5329   guarantee(_ParkEvent != NULL, "invariant");
5330   // First, demultiplex/decode time arguments
5331   if (time < 0) { // don't wait
5332     return;
5333   } else if (time == 0 && !isAbsolute) {
5334     time = INFINITE;
5335   } else if (isAbsolute) {
5336     time -= os::javaTimeMillis(); // convert to relative time
5337     if (time <= 0) {  // already elapsed
5338       return;
5339     }
5340   } else { // relative
5341     time /= 1000000;  // Must coarsen from nanos to millis
5342     if (time == 0) {  // Wait for the minimal time unit if zero
5343       time = 1;
5344     }
5345   }
5346 
5347   JavaThread* thread = JavaThread::current();
5348 
5349   // Don't wait if interrupted or already triggered
5350   if (Thread::is_interrupted(thread, false) ||
5351       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5352     ResetEvent(_ParkEvent);
5353     return;
5354   } else {
5355     ThreadBlockInVM tbivm(thread);
5356     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5357     thread->set_suspend_equivalent();
5358 
5359     WaitForSingleObject(_ParkEvent, time);
5360     ResetEvent(_ParkEvent);
5361 
5362     // If externally suspended while waiting, re-suspend
5363     if (thread->handle_special_suspend_equivalent_condition()) {
5364       thread->java_suspend_self();
5365     }
5366   }
5367 }
5368 
5369 void Parker::unpark() {
5370   guarantee(_ParkEvent != NULL, "invariant");
5371   SetEvent(_ParkEvent);
5372 }
5373 
5374 // Platform Monitor implementation
5375 
5376 // Must already be locked
5377 int os::PlatformMonitor::wait(jlong millis) {
5378   assert(millis >= 0, "negative timeout");
5379   int ret = OS_TIMEOUT;
5380   int status = SleepConditionVariableCS(&_cond, &_mutex,
5381                                         millis == 0 ? INFINITE : millis);
5382   if (status != 0) {
5383     ret = OS_OK;
5384   }
5385   #ifndef PRODUCT
5386   else {
5387     DWORD err = GetLastError();
5388     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5389   }
5390   #endif
5391   return ret;
5392 }
5393 
5394 // Run the specified command in a separate process. Return its exit value,
5395 // or -1 on failure (e.g. can't create a new process).
5396 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5397   STARTUPINFO si;
5398   PROCESS_INFORMATION pi;
5399   DWORD exit_code;
5400 
5401   char * cmd_string;
5402   const char * cmd_prefix = "cmd /C ";
5403   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5404   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5405   if (cmd_string == NULL) {
5406     return -1;
5407   }
5408   cmd_string[0] = '\0';
5409   strcat(cmd_string, cmd_prefix);
5410   strcat(cmd_string, cmd);
5411 
5412   // now replace all '\n' with '&'
5413   char * substring = cmd_string;
5414   while ((substring = strchr(substring, '\n')) != NULL) {
5415     substring[0] = '&';
5416     substring++;
5417   }
5418   memset(&si, 0, sizeof(si));
5419   si.cb = sizeof(si);
5420   memset(&pi, 0, sizeof(pi));
5421   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5422                             cmd_string,    // command line
5423                             NULL,   // process security attribute
5424                             NULL,   // thread security attribute
5425                             TRUE,   // inherits system handles
5426                             0,      // no creation flags
5427                             NULL,   // use parent's environment block
5428                             NULL,   // use parent's starting directory
5429                             &si,    // (in) startup information
5430                             &pi);   // (out) process information
5431 
5432   if (rslt) {
5433     // Wait until child process exits.
5434     WaitForSingleObject(pi.hProcess, INFINITE);
5435 
5436     GetExitCodeProcess(pi.hProcess, &exit_code);
5437 
5438     // Close process and thread handles.
5439     CloseHandle(pi.hProcess);
5440     CloseHandle(pi.hThread);
5441   } else {
5442     exit_code = -1;
5443   }
5444 
5445   FREE_C_HEAP_ARRAY(char, cmd_string);
5446   return (int)exit_code;
5447 }
5448 
5449 bool os::find(address addr, outputStream* st) {
5450   int offset = -1;
5451   bool result = false;
5452   char buf[256];
5453   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5454     st->print(PTR_FORMAT " ", addr);
5455     if (strlen(buf) < sizeof(buf) - 1) {
5456       char* p = strrchr(buf, '\\');
5457       if (p) {
5458         st->print("%s", p + 1);
5459       } else {
5460         st->print("%s", buf);
5461       }
5462     } else {
5463         // The library name is probably truncated. Let's omit the library name.
5464         // See also JDK-8147512.
5465     }
5466     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5467       st->print("::%s + 0x%x", buf, offset);
5468     }
5469     st->cr();
5470     result = true;
5471   }
5472   return result;
5473 }
5474 
5475 static jint initSock() {
5476   WSADATA wsadata;
5477 
5478   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5479     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5480                 ::GetLastError());
5481     return JNI_ERR;
5482   }
5483   return JNI_OK;
5484 }
5485 
5486 struct hostent* os::get_host_by_name(char* name) {
5487   return (struct hostent*)gethostbyname(name);
5488 }
5489 
5490 int os::socket_close(int fd) {
5491   return ::closesocket(fd);
5492 }
5493 
5494 int os::socket(int domain, int type, int protocol) {
5495   return ::socket(domain, type, protocol);
5496 }
5497 
5498 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5499   return ::connect(fd, him, len);
5500 }
5501 
5502 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5503   return ::recv(fd, buf, (int)nBytes, flags);
5504 }
5505 
5506 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5507   return ::send(fd, buf, (int)nBytes, flags);
5508 }
5509 
5510 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5511   return ::send(fd, buf, (int)nBytes, flags);
5512 }
5513 
5514 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5515 #if defined(IA32)
5516   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5517 #elif defined (AMD64)
5518   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5519 #endif
5520 
5521 // returns true if thread could be suspended,
5522 // false otherwise
5523 static bool do_suspend(HANDLE* h) {
5524   if (h != NULL) {
5525     if (SuspendThread(*h) != ~0) {
5526       return true;
5527     }
5528   }
5529   return false;
5530 }
5531 
5532 // resume the thread
5533 // calling resume on an active thread is a no-op
5534 static void do_resume(HANDLE* h) {
5535   if (h != NULL) {
5536     ResumeThread(*h);
5537   }
5538 }
5539 
5540 // retrieve a suspend/resume context capable handle
5541 // from the tid. Caller validates handle return value.
5542 void get_thread_handle_for_extended_context(HANDLE* h,
5543                                             OSThread::thread_id_t tid) {
5544   if (h != NULL) {
5545     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5546   }
5547 }
5548 
5549 // Thread sampling implementation
5550 //
5551 void os::SuspendedThreadTask::internal_do_task() {
5552   CONTEXT    ctxt;
5553   HANDLE     h = NULL;
5554 
5555   // get context capable handle for thread
5556   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5557 
5558   // sanity
5559   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5560     return;
5561   }
5562 
5563   // suspend the thread
5564   if (do_suspend(&h)) {
5565     ctxt.ContextFlags = sampling_context_flags;
5566     // get thread context
5567     GetThreadContext(h, &ctxt);
5568     SuspendedThreadTaskContext context(_thread, &ctxt);
5569     // pass context to Thread Sampling impl
5570     do_task(context);
5571     // resume thread
5572     do_resume(&h);
5573   }
5574 
5575   // close handle
5576   CloseHandle(h);
5577 }
5578 
5579 bool os::start_debugging(char *buf, int buflen) {
5580   int len = (int)strlen(buf);
5581   char *p = &buf[len];
5582 
5583   jio_snprintf(p, buflen-len,
5584              "\n\n"
5585              "Do you want to debug the problem?\n\n"
5586              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5587              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5588              "Otherwise, select 'No' to abort...",
5589              os::current_process_id(), os::current_thread_id());
5590 
5591   bool yes = os::message_box("Unexpected Error", buf);
5592 
5593   if (yes) {
5594     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5595     // exception. If VM is running inside a debugger, the debugger will
5596     // catch the exception. Otherwise, the breakpoint exception will reach
5597     // the default windows exception handler, which can spawn a debugger and
5598     // automatically attach to the dying VM.
5599     os::breakpoint();
5600     yes = false;
5601   }
5602   return yes;
5603 }
5604 
5605 void* os::get_default_process_handle() {
5606   return (void*)GetModuleHandle(NULL);
5607 }
5608 
5609 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5610 // which is used to find statically linked in agents.
5611 // Additionally for windows, takes into account __stdcall names.
5612 // Parameters:
5613 //            sym_name: Symbol in library we are looking for
5614 //            lib_name: Name of library to look in, NULL for shared libs.
5615 //            is_absolute_path == true if lib_name is absolute path to agent
5616 //                                     such as "C:/a/b/L.dll"
5617 //            == false if only the base name of the library is passed in
5618 //               such as "L"
5619 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5620                                     bool is_absolute_path) {
5621   char *agent_entry_name;
5622   size_t len;
5623   size_t name_len;
5624   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5625   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5626   const char *start;
5627 
5628   if (lib_name != NULL) {
5629     len = name_len = strlen(lib_name);
5630     if (is_absolute_path) {
5631       // Need to strip path, prefix and suffix
5632       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5633         lib_name = ++start;
5634       } else {
5635         // Need to check for drive prefix
5636         if ((start = strchr(lib_name, ':')) != NULL) {
5637           lib_name = ++start;
5638         }
5639       }
5640       if (len <= (prefix_len + suffix_len)) {
5641         return NULL;
5642       }
5643       lib_name += prefix_len;
5644       name_len = strlen(lib_name) - suffix_len;
5645     }
5646   }
5647   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5648   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5649   if (agent_entry_name == NULL) {
5650     return NULL;
5651   }
5652   if (lib_name != NULL) {
5653     const char *p = strrchr(sym_name, '@');
5654     if (p != NULL && p != sym_name) {
5655       // sym_name == _Agent_OnLoad@XX
5656       strncpy(agent_entry_name, sym_name, (p - sym_name));
5657       agent_entry_name[(p-sym_name)] = '\0';
5658       // agent_entry_name == _Agent_OnLoad
5659       strcat(agent_entry_name, "_");
5660       strncat(agent_entry_name, lib_name, name_len);
5661       strcat(agent_entry_name, p);
5662       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5663     } else {
5664       strcpy(agent_entry_name, sym_name);
5665       strcat(agent_entry_name, "_");
5666       strncat(agent_entry_name, lib_name, name_len);
5667     }
5668   } else {
5669     strcpy(agent_entry_name, sym_name);
5670   }
5671   return agent_entry_name;
5672 }
5673 
5674 #ifndef PRODUCT
5675 
5676 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5677 // contiguous memory block at a particular address.
5678 // The test first tries to find a good approximate address to allocate at by using the same
5679 // method to allocate some memory at any address. The test then tries to allocate memory in
5680 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5681 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5682 // the previously allocated memory is available for allocation. The only actual failure
5683 // that is reported is when the test tries to allocate at a particular location but gets a
5684 // different valid one. A NULL return value at this point is not considered an error but may
5685 // be legitimate.
5686 void TestReserveMemorySpecial_test() {
5687   if (!UseLargePages) {
5688     return;
5689   }
5690   // save current value of globals
5691   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5692   bool old_use_numa_interleaving = UseNUMAInterleaving;
5693 
5694   // set globals to make sure we hit the correct code path
5695   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5696 
5697   // do an allocation at an address selected by the OS to get a good one.
5698   const size_t large_allocation_size = os::large_page_size() * 4;
5699   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5700   if (result == NULL) {
5701   } else {
5702     os::release_memory_special(result, large_allocation_size);
5703 
5704     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5705     // we managed to get it once.
5706     const size_t expected_allocation_size = os::large_page_size();
5707     char* expected_location = result + os::large_page_size();
5708     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5709     if (actual_location == NULL) {
5710     } else {
5711       // release memory
5712       os::release_memory_special(actual_location, expected_allocation_size);
5713       // only now check, after releasing any memory to avoid any leaks.
5714       assert(actual_location == expected_location,
5715              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5716              expected_location, expected_allocation_size, actual_location);
5717     }
5718   }
5719 
5720   // restore globals
5721   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5722   UseNUMAInterleaving = old_use_numa_interleaving;
5723 }
5724 #endif // PRODUCT
5725 
5726 /*
5727   All the defined signal names for Windows.
5728 
5729   NOTE that not all of these names are accepted by FindSignal!
5730 
5731   For various reasons some of these may be rejected at runtime.
5732 
5733   Here are the names currently accepted by a user of sun.misc.Signal with
5734   1.4.1 (ignoring potential interaction with use of chaining, etc):
5735 
5736      (LIST TBD)
5737 
5738 */
5739 int os::get_signal_number(const char* name) {
5740   static const struct {
5741     const char* name;
5742     int         number;
5743   } siglabels [] =
5744     // derived from version 6.0 VC98/include/signal.h
5745   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5746   "FPE",        SIGFPE,         // floating point exception
5747   "SEGV",       SIGSEGV,        // segment violation
5748   "INT",        SIGINT,         // interrupt
5749   "TERM",       SIGTERM,        // software term signal from kill
5750   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5751   "ILL",        SIGILL};        // illegal instruction
5752   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5753     if (strcmp(name, siglabels[i].name) == 0) {
5754       return siglabels[i].number;
5755     }
5756   }
5757   return -1;
5758 }
5759 
5760 // Fast current thread access
5761 
5762 int os::win32::_thread_ptr_offset = 0;
5763 
5764 static void call_wrapper_dummy() {}
5765 
5766 // We need to call the os_exception_wrapper once so that it sets
5767 // up the offset from FS of the thread pointer.
5768 void os::win32::initialize_thread_ptr_offset() {
5769   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5770                            NULL, NULL, NULL, NULL);
5771 }
5772 
5773 bool os::supports_map_sync() {
5774   return false;
5775 }