1 /*
   2  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
  26 #define _WIN32_WINNT 0x0600
  27 
  28 // no precompiled headers
  29 #include "jvm.h"
  30 #include "classfile/classLoader.hpp"
  31 #include "classfile/systemDictionary.hpp"
  32 #include "classfile/vmSymbols.hpp"
  33 #include "code/icBuffer.hpp"
  34 #include "code/vtableStubs.hpp"
  35 #include "compiler/compileBroker.hpp"
  36 #include "compiler/disassembler.hpp"
  37 #include "interpreter/interpreter.hpp"
  38 #include "logging/log.hpp"
  39 #include "logging/logStream.hpp"
  40 #include "memory/allocation.inline.hpp"
  41 #include "memory/filemap.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "os_share_windows.hpp"
  44 #include "os_windows.inline.hpp"
  45 #include "prims/jniFastGetField.hpp"
  46 #include "prims/jvm_misc.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/atomic.hpp"
  49 #include "runtime/extendedPC.hpp"
  50 #include "runtime/globals.hpp"
  51 #include "runtime/interfaceSupport.inline.hpp"
  52 #include "runtime/java.hpp"
  53 #include "runtime/javaCalls.hpp"
  54 #include "runtime/mutexLocker.hpp"
  55 #include "runtime/objectMonitor.hpp"
  56 #include "runtime/orderAccess.hpp"
  57 #include "runtime/osThread.hpp"
  58 #include "runtime/perfMemory.hpp"
  59 #include "runtime/safepointMechanism.hpp"
  60 #include "runtime/sharedRuntime.hpp"
  61 #include "runtime/statSampler.hpp"
  62 #include "runtime/stubRoutines.hpp"
  63 #include "runtime/thread.inline.hpp"
  64 #include "runtime/threadCritical.hpp"
  65 #include "runtime/timer.hpp"
  66 #include "runtime/vm_version.hpp"
  67 #include "services/attachListener.hpp"
  68 #include "services/memTracker.hpp"
  69 #include "services/runtimeService.hpp"
  70 #include "utilities/align.hpp"
  71 #include "utilities/decoder.hpp"
  72 #include "utilities/defaultStream.hpp"
  73 #include "utilities/events.hpp"
  74 #include "utilities/growableArray.hpp"
  75 #include "utilities/macros.hpp"
  76 #include "utilities/vmError.hpp"
  77 #include "symbolengine.hpp"
  78 #include "windbghelp.hpp"
  79 
  80 
  81 #ifdef _DEBUG
  82 #include <crtdbg.h>
  83 #endif
  84 
  85 
  86 #include <windows.h>
  87 #include <sys/types.h>
  88 #include <sys/stat.h>
  89 #include <sys/timeb.h>
  90 #include <objidl.h>
  91 #include <shlobj.h>
  92 
  93 #include <malloc.h>
  94 #include <signal.h>
  95 #include <direct.h>
  96 #include <errno.h>
  97 #include <fcntl.h>
  98 #include <io.h>
  99 #include <process.h>              // For _beginthreadex(), _endthreadex()
 100 #include <imagehlp.h>             // For os::dll_address_to_function_name
 101 // for enumerating dll libraries
 102 #include <vdmdbg.h>
 103 #include <psapi.h>
 104 #include <mmsystem.h>
 105 #include <winsock2.h>
 106 
 107 // for timer info max values which include all bits
 108 #define ALL_64_BITS CONST64(-1)
 109 
 110 // For DLL loading/load error detection
 111 // Values of PE COFF
 112 #define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
 113 #define IMAGE_FILE_SIGNATURE_LENGTH 4
 114 
 115 static HANDLE main_process;
 116 static HANDLE main_thread;
 117 static int    main_thread_id;
 118 
 119 static FILETIME process_creation_time;
 120 static FILETIME process_exit_time;
 121 static FILETIME process_user_time;
 122 static FILETIME process_kernel_time;
 123 
 124 #ifdef _M_AMD64
 125   #define __CPU__ amd64
 126 #else
 127   #define __CPU__ i486
 128 #endif
 129 
 130 #if INCLUDE_AOT
 131 PVOID  topLevelVectoredExceptionHandler = NULL;
 132 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 133 #endif
 134 
 135 // save DLL module handle, used by GetModuleFileName
 136 
 137 HINSTANCE vm_lib_handle;
 138 
 139 BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
 140   switch (reason) {
 141   case DLL_PROCESS_ATTACH:
 142     vm_lib_handle = hinst;
 143     if (ForceTimeHighResolution) {
 144       timeBeginPeriod(1L);
 145     }
 146     WindowsDbgHelp::pre_initialize();
 147     SymbolEngine::pre_initialize();
 148     break;
 149   case DLL_PROCESS_DETACH:
 150     if (ForceTimeHighResolution) {
 151       timeEndPeriod(1L);
 152     }
 153 #if INCLUDE_AOT
 154     if (topLevelVectoredExceptionHandler != NULL) {
 155       RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
 156       topLevelVectoredExceptionHandler = NULL;
 157     }
 158 #endif
 159     break;
 160   default:
 161     break;
 162   }
 163   return true;
 164 }
 165 
 166 static inline double fileTimeAsDouble(FILETIME* time) {
 167   const double high  = (double) ((unsigned int) ~0);
 168   const double split = 10000000.0;
 169   double result = (time->dwLowDateTime / split) +
 170                    time->dwHighDateTime * (high/split);
 171   return result;
 172 }
 173 
 174 // Implementation of os
 175 
 176 bool os::unsetenv(const char* name) {
 177   assert(name != NULL, "Null pointer");
 178   return (SetEnvironmentVariable(name, NULL) == TRUE);
 179 }
 180 
 181 // No setuid programs under Windows.
 182 bool os::have_special_privileges() {
 183   return false;
 184 }
 185 
 186 
 187 // This method is  a periodic task to check for misbehaving JNI applications
 188 // under CheckJNI, we can add any periodic checks here.
 189 // For Windows at the moment does nothing
 190 void os::run_periodic_checks() {
 191   return;
 192 }
 193 
 194 // previous UnhandledExceptionFilter, if there is one
 195 static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
 196 
 197 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
 198 
 199 void os::init_system_properties_values() {
 200   // sysclasspath, java_home, dll_dir
 201   {
 202     char *home_path;
 203     char *dll_path;
 204     char *pslash;
 205     const char *bin = "\\bin";
 206     char home_dir[MAX_PATH + 1];
 207     char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
 208 
 209     if (alt_home_dir != NULL)  {
 210       strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
 211       home_dir[MAX_PATH] = '\0';
 212     } else {
 213       os::jvm_path(home_dir, sizeof(home_dir));
 214       // Found the full path to jvm.dll.
 215       // Now cut the path to <java_home>/jre if we can.
 216       *(strrchr(home_dir, '\\')) = '\0';  // get rid of \jvm.dll
 217       pslash = strrchr(home_dir, '\\');
 218       if (pslash != NULL) {
 219         *pslash = '\0';                   // get rid of \{client|server}
 220         pslash = strrchr(home_dir, '\\');
 221         if (pslash != NULL) {
 222           *pslash = '\0';                 // get rid of \bin
 223         }
 224       }
 225     }
 226 
 227     home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
 228     strcpy(home_path, home_dir);
 229     Arguments::set_java_home(home_path);
 230     FREE_C_HEAP_ARRAY(char, home_path);
 231 
 232     dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
 233                                 mtInternal);
 234     strcpy(dll_path, home_dir);
 235     strcat(dll_path, bin);
 236     Arguments::set_dll_dir(dll_path);
 237     FREE_C_HEAP_ARRAY(char, dll_path);
 238 
 239     if (!set_boot_path('\\', ';')) {
 240       vm_exit_during_initialization("Failed setting boot class path.", NULL);
 241     }
 242   }
 243 
 244 // library_path
 245 #define EXT_DIR "\\lib\\ext"
 246 #define BIN_DIR "\\bin"
 247 #define PACKAGE_DIR "\\Sun\\Java"
 248   {
 249     // Win32 library search order (See the documentation for LoadLibrary):
 250     //
 251     // 1. The directory from which application is loaded.
 252     // 2. The system wide Java Extensions directory (Java only)
 253     // 3. System directory (GetSystemDirectory)
 254     // 4. Windows directory (GetWindowsDirectory)
 255     // 5. The PATH environment variable
 256     // 6. The current directory
 257 
 258     char *library_path;
 259     char tmp[MAX_PATH];
 260     char *path_str = ::getenv("PATH");
 261 
 262     library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
 263                                     sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
 264 
 265     library_path[0] = '\0';
 266 
 267     GetModuleFileName(NULL, tmp, sizeof(tmp));
 268     *(strrchr(tmp, '\\')) = '\0';
 269     strcat(library_path, tmp);
 270 
 271     GetWindowsDirectory(tmp, sizeof(tmp));
 272     strcat(library_path, ";");
 273     strcat(library_path, tmp);
 274     strcat(library_path, PACKAGE_DIR BIN_DIR);
 275 
 276     GetSystemDirectory(tmp, sizeof(tmp));
 277     strcat(library_path, ";");
 278     strcat(library_path, tmp);
 279 
 280     GetWindowsDirectory(tmp, sizeof(tmp));
 281     strcat(library_path, ";");
 282     strcat(library_path, tmp);
 283 
 284     if (path_str) {
 285       strcat(library_path, ";");
 286       strcat(library_path, path_str);
 287     }
 288 
 289     strcat(library_path, ";.");
 290 
 291     Arguments::set_library_path(library_path);
 292     FREE_C_HEAP_ARRAY(char, library_path);
 293   }
 294 
 295   // Default extensions directory
 296   {
 297     char path[MAX_PATH];
 298     char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
 299     GetWindowsDirectory(path, MAX_PATH);
 300     sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
 301             path, PACKAGE_DIR, EXT_DIR);
 302     Arguments::set_ext_dirs(buf);
 303   }
 304   #undef EXT_DIR
 305   #undef BIN_DIR
 306   #undef PACKAGE_DIR
 307 
 308 #ifndef _WIN64
 309   // set our UnhandledExceptionFilter and save any previous one
 310   prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
 311 #endif
 312 
 313   // Done
 314   return;
 315 }
 316 
 317 void os::breakpoint() {
 318   DebugBreak();
 319 }
 320 
 321 // Invoked from the BREAKPOINT Macro
 322 extern "C" void breakpoint() {
 323   os::breakpoint();
 324 }
 325 
 326 // RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
 327 // So far, this method is only used by Native Memory Tracking, which is
 328 // only supported on Windows XP or later.
 329 //
 330 int os::get_native_stack(address* stack, int frames, int toSkip) {
 331   int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
 332   for (int index = captured; index < frames; index ++) {
 333     stack[index] = NULL;
 334   }
 335   return captured;
 336 }
 337 
 338 
 339 // os::current_stack_base()
 340 //
 341 //   Returns the base of the stack, which is the stack's
 342 //   starting address.  This function must be called
 343 //   while running on the stack of the thread being queried.
 344 
 345 address os::current_stack_base() {
 346   MEMORY_BASIC_INFORMATION minfo;
 347   address stack_bottom;
 348   size_t stack_size;
 349 
 350   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 351   stack_bottom =  (address)minfo.AllocationBase;
 352   stack_size = minfo.RegionSize;
 353 
 354   // Add up the sizes of all the regions with the same
 355   // AllocationBase.
 356   while (1) {
 357     VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
 358     if (stack_bottom == (address)minfo.AllocationBase) {
 359       stack_size += minfo.RegionSize;
 360     } else {
 361       break;
 362     }
 363   }
 364   return stack_bottom + stack_size;
 365 }
 366 
 367 size_t os::current_stack_size() {
 368   size_t sz;
 369   MEMORY_BASIC_INFORMATION minfo;
 370   VirtualQuery(&minfo, &minfo, sizeof(minfo));
 371   sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
 372   return sz;
 373 }
 374 
 375 bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
 376   MEMORY_BASIC_INFORMATION minfo;
 377   committed_start = NULL;
 378   committed_size = 0;
 379   address top = start + size;
 380   const address start_addr = start;
 381   while (start < top) {
 382     VirtualQuery(start, &minfo, sizeof(minfo));
 383     if ((minfo.State & MEM_COMMIT) == 0) {  // not committed
 384       if (committed_start != NULL) {
 385         break;
 386       }
 387     } else {  // committed
 388       if (committed_start == NULL) {
 389         committed_start = start;
 390       }
 391       size_t offset = start - (address)minfo.BaseAddress;
 392       committed_size += minfo.RegionSize - offset;
 393     }
 394     start = (address)minfo.BaseAddress + minfo.RegionSize;
 395   }
 396 
 397   if (committed_start == NULL) {
 398     assert(committed_size == 0, "Sanity");
 399     return false;
 400   } else {
 401     assert(committed_start >= start_addr && committed_start < top, "Out of range");
 402     // current region may go beyond the limit, trim to the limit
 403     committed_size = MIN2(committed_size, size_t(top - committed_start));
 404     return true;
 405   }
 406 }
 407 
 408 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
 409   const struct tm* time_struct_ptr = localtime(clock);
 410   if (time_struct_ptr != NULL) {
 411     *res = *time_struct_ptr;
 412     return res;
 413   }
 414   return NULL;
 415 }
 416 
 417 struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
 418   const struct tm* time_struct_ptr = gmtime(clock);
 419   if (time_struct_ptr != NULL) {
 420     *res = *time_struct_ptr;
 421     return res;
 422   }
 423   return NULL;
 424 }
 425 
 426 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
 427 
 428 // Thread start routine for all newly created threads
 429 static unsigned __stdcall thread_native_entry(Thread* thread) {
 430 
 431   thread->record_stack_base_and_size();
 432 
 433   // Try to randomize the cache line index of hot stack frames.
 434   // This helps when threads of the same stack traces evict each other's
 435   // cache lines. The threads can be either from the same JVM instance, or
 436   // from different JVM instances. The benefit is especially true for
 437   // processors with hyperthreading technology.
 438   static int counter = 0;
 439   int pid = os::current_process_id();
 440   _alloca(((pid ^ counter++) & 7) * 128);
 441 
 442   thread->initialize_thread_current();
 443 
 444   OSThread* osthr = thread->osthread();
 445   assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
 446 
 447   if (UseNUMA) {
 448     int lgrp_id = os::numa_get_group_id();
 449     if (lgrp_id != -1) {
 450       thread->set_lgrp_id(lgrp_id);
 451     }
 452   }
 453 
 454   // Diagnostic code to investigate JDK-6573254
 455   int res = 30115;  // non-java thread
 456   if (thread->is_Java_thread()) {
 457     res = 20115;    // java thread
 458   }
 459 
 460   log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
 461 
 462   // Install a win32 structured exception handler around every thread created
 463   // by VM, so VM can generate error dump when an exception occurred in non-
 464   // Java thread (e.g. VM thread).
 465   __try {
 466     thread->call_run();
 467   } __except(topLevelExceptionFilter(
 468                                      (_EXCEPTION_POINTERS*)_exception_info())) {
 469     // Nothing to do.
 470   }
 471 
 472   // Note: at this point the thread object may already have deleted itself.
 473   // Do not dereference it from here on out.
 474 
 475   log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
 476 
 477   // One less thread is executing
 478   // When the VMThread gets here, the main thread may have already exited
 479   // which frees the CodeHeap containing the Atomic::add code
 480   if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
 481     Atomic::dec(&os::win32::_os_thread_count);
 482   }
 483 
 484   // Thread must not return from exit_process_or_thread(), but if it does,
 485   // let it proceed to exit normally
 486   return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
 487 }
 488 
 489 static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
 490                                   int thread_id) {
 491   // Allocate the OSThread object
 492   OSThread* osthread = new OSThread(NULL, NULL);
 493   if (osthread == NULL) return NULL;
 494 
 495   // Initialize the JDK library's interrupt event.
 496   // This should really be done when OSThread is constructed,
 497   // but there is no way for a constructor to report failure to
 498   // allocate the event.
 499   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 500   if (interrupt_event == NULL) {
 501     delete osthread;
 502     return NULL;
 503   }
 504   osthread->set_interrupt_event(interrupt_event);
 505 
 506   // Store info on the Win32 thread into the OSThread
 507   osthread->set_thread_handle(thread_handle);
 508   osthread->set_thread_id(thread_id);
 509 
 510   if (UseNUMA) {
 511     int lgrp_id = os::numa_get_group_id();
 512     if (lgrp_id != -1) {
 513       thread->set_lgrp_id(lgrp_id);
 514     }
 515   }
 516 
 517   // Initial thread state is INITIALIZED, not SUSPENDED
 518   osthread->set_state(INITIALIZED);
 519 
 520   return osthread;
 521 }
 522 
 523 
 524 bool os::create_attached_thread(JavaThread* thread) {
 525 #ifdef ASSERT
 526   thread->verify_not_published();
 527 #endif
 528   HANDLE thread_h;
 529   if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
 530                        &thread_h, THREAD_ALL_ACCESS, false, 0)) {
 531     fatal("DuplicateHandle failed\n");
 532   }
 533   OSThread* osthread = create_os_thread(thread, thread_h,
 534                                         (int)current_thread_id());
 535   if (osthread == NULL) {
 536     return false;
 537   }
 538 
 539   // Initial thread state is RUNNABLE
 540   osthread->set_state(RUNNABLE);
 541 
 542   thread->set_osthread(osthread);
 543 
 544   log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
 545     os::current_thread_id());
 546 
 547   return true;
 548 }
 549 
 550 bool os::create_main_thread(JavaThread* thread) {
 551 #ifdef ASSERT
 552   thread->verify_not_published();
 553 #endif
 554   if (_starting_thread == NULL) {
 555     _starting_thread = create_os_thread(thread, main_thread, main_thread_id);
 556     if (_starting_thread == NULL) {
 557       return false;
 558     }
 559   }
 560 
 561   // The primordial thread is runnable from the start)
 562   _starting_thread->set_state(RUNNABLE);
 563 
 564   thread->set_osthread(_starting_thread);
 565   return true;
 566 }
 567 
 568 // Helper function to trace _beginthreadex attributes,
 569 //  similar to os::Posix::describe_pthread_attr()
 570 static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
 571                                                size_t stacksize, unsigned initflag) {
 572   stringStream ss(buf, buflen);
 573   if (stacksize == 0) {
 574     ss.print("stacksize: default, ");
 575   } else {
 576     ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
 577   }
 578   ss.print("flags: ");
 579   #define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
 580   #define ALL(X) \
 581     X(CREATE_SUSPENDED) \
 582     X(STACK_SIZE_PARAM_IS_A_RESERVATION)
 583   ALL(PRINT_FLAG)
 584   #undef ALL
 585   #undef PRINT_FLAG
 586   return buf;
 587 }
 588 
 589 // Allocate and initialize a new OSThread
 590 bool os::create_thread(Thread* thread, ThreadType thr_type,
 591                        size_t stack_size) {
 592   unsigned thread_id;
 593 
 594   // Allocate the OSThread object
 595   OSThread* osthread = new OSThread(NULL, NULL);
 596   if (osthread == NULL) {
 597     return false;
 598   }
 599 
 600   // Initialize the JDK library's interrupt event.
 601   // This should really be done when OSThread is constructed,
 602   // but there is no way for a constructor to report failure to
 603   // allocate the event.
 604   HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
 605   if (interrupt_event == NULL) {
 606     delete osthread;
 607     return false;
 608   }
 609   osthread->set_interrupt_event(interrupt_event);
 610   // We don't call set_interrupted(false) as it will trip the assert in there
 611   // as we are not operating on the current thread. We don't need to call it
 612   // because the initial state is already correct.
 613 
 614   thread->set_osthread(osthread);
 615 
 616   if (stack_size == 0) {
 617     switch (thr_type) {
 618     case os::java_thread:
 619       // Java threads use ThreadStackSize which default value can be changed with the flag -Xss
 620       if (JavaThread::stack_size_at_create() > 0) {
 621         stack_size = JavaThread::stack_size_at_create();
 622       }
 623       break;
 624     case os::compiler_thread:
 625       if (CompilerThreadStackSize > 0) {
 626         stack_size = (size_t)(CompilerThreadStackSize * K);
 627         break;
 628       } // else fall through:
 629         // use VMThreadStackSize if CompilerThreadStackSize is not defined
 630     case os::vm_thread:
 631     case os::pgc_thread:
 632     case os::cgc_thread:
 633     case os::watcher_thread:
 634       if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
 635       break;
 636     }
 637   }
 638 
 639   // Create the Win32 thread
 640   //
 641   // Contrary to what MSDN document says, "stack_size" in _beginthreadex()
 642   // does not specify stack size. Instead, it specifies the size of
 643   // initially committed space. The stack size is determined by
 644   // PE header in the executable. If the committed "stack_size" is larger
 645   // than default value in the PE header, the stack is rounded up to the
 646   // nearest multiple of 1MB. For example if the launcher has default
 647   // stack size of 320k, specifying any size less than 320k does not
 648   // affect the actual stack size at all, it only affects the initial
 649   // commitment. On the other hand, specifying 'stack_size' larger than
 650   // default value may cause significant increase in memory usage, because
 651   // not only the stack space will be rounded up to MB, but also the
 652   // entire space is committed upfront.
 653   //
 654   // Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
 655   // for CreateThread() that can treat 'stack_size' as stack size. However we
 656   // are not supposed to call CreateThread() directly according to MSDN
 657   // document because JVM uses C runtime library. The good news is that the
 658   // flag appears to work with _beginthredex() as well.
 659 
 660   const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
 661   HANDLE thread_handle =
 662     (HANDLE)_beginthreadex(NULL,
 663                            (unsigned)stack_size,
 664                            (unsigned (__stdcall *)(void*)) thread_native_entry,
 665                            thread,
 666                            initflag,
 667                            &thread_id);
 668 
 669   char buf[64];
 670   if (thread_handle != NULL) {
 671     log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
 672       thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 673   } else {
 674     log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
 675       os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
 676     // Log some OS information which might explain why creating the thread failed.
 677     log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
 678     LogStream st(Log(os, thread)::info());
 679     os::print_memory_info(&st);
 680   }
 681 
 682   if (thread_handle == NULL) {
 683     // Need to clean up stuff we've allocated so far
 684     thread->set_osthread(NULL);
 685     delete osthread;
 686     return false;
 687   }
 688 
 689   Atomic::inc(&os::win32::_os_thread_count);
 690 
 691   // Store info on the Win32 thread into the OSThread
 692   osthread->set_thread_handle(thread_handle);
 693   osthread->set_thread_id(thread_id);
 694 
 695   // Initial thread state is INITIALIZED, not SUSPENDED
 696   osthread->set_state(INITIALIZED);
 697 
 698   // The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
 699   return true;
 700 }
 701 
 702 
 703 // Free Win32 resources related to the OSThread
 704 void os::free_thread(OSThread* osthread) {
 705   assert(osthread != NULL, "osthread not set");
 706 
 707   // We are told to free resources of the argument thread,
 708   // but we can only really operate on the current thread.
 709   assert(Thread::current()->osthread() == osthread,
 710          "os::free_thread but not current thread");
 711 
 712   CloseHandle(osthread->thread_handle());
 713   delete osthread;
 714 }
 715 
 716 static jlong first_filetime;
 717 static jlong initial_performance_count;
 718 static jlong performance_frequency;
 719 
 720 
 721 jlong as_long(LARGE_INTEGER x) {
 722   jlong result = 0; // initialization to avoid warning
 723   set_high(&result, x.HighPart);
 724   set_low(&result, x.LowPart);
 725   return result;
 726 }
 727 
 728 
 729 jlong os::elapsed_counter() {
 730   LARGE_INTEGER count;
 731   QueryPerformanceCounter(&count);
 732   return as_long(count) - initial_performance_count;
 733 }
 734 
 735 
 736 jlong os::elapsed_frequency() {
 737   return performance_frequency;
 738 }
 739 
 740 
 741 julong os::available_memory() {
 742   return win32::available_memory();
 743 }
 744 
 745 julong os::win32::available_memory() {
 746   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
 747   // value if total memory is larger than 4GB
 748   MEMORYSTATUSEX ms;
 749   ms.dwLength = sizeof(ms);
 750   GlobalMemoryStatusEx(&ms);
 751 
 752   return (julong)ms.ullAvailPhys;
 753 }
 754 
 755 julong os::physical_memory() {
 756   return win32::physical_memory();
 757 }
 758 
 759 bool os::has_allocatable_memory_limit(julong* limit) {
 760   MEMORYSTATUSEX ms;
 761   ms.dwLength = sizeof(ms);
 762   GlobalMemoryStatusEx(&ms);
 763 #ifdef _LP64
 764   *limit = (julong)ms.ullAvailVirtual;
 765   return true;
 766 #else
 767   // Limit to 1400m because of the 2gb address space wall
 768   *limit = MIN2((julong)1400*M, (julong)ms.ullAvailVirtual);
 769   return true;
 770 #endif
 771 }
 772 
 773 int os::active_processor_count() {
 774   // User has overridden the number of active processors
 775   if (ActiveProcessorCount > 0) {
 776     log_trace(os)("active_processor_count: "
 777                   "active processor count set by user : %d",
 778                   ActiveProcessorCount);
 779     return ActiveProcessorCount;
 780   }
 781 
 782   DWORD_PTR lpProcessAffinityMask = 0;
 783   DWORD_PTR lpSystemAffinityMask = 0;
 784   int proc_count = processor_count();
 785   if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
 786       GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
 787     // Nof active processors is number of bits in process affinity mask
 788     int bitcount = 0;
 789     while (lpProcessAffinityMask != 0) {
 790       lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
 791       bitcount++;
 792     }
 793     return bitcount;
 794   } else {
 795     return proc_count;
 796   }
 797 }
 798 
 799 uint os::processor_id() {
 800   return (uint)GetCurrentProcessorNumber();
 801 }
 802 
 803 void os::set_native_thread_name(const char *name) {
 804 
 805   // See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
 806   //
 807   // Note that unfortunately this only works if the process
 808   // is already attached to a debugger; debugger must observe
 809   // the exception below to show the correct name.
 810 
 811   // If there is no debugger attached skip raising the exception
 812   if (!IsDebuggerPresent()) {
 813     return;
 814   }
 815 
 816   const DWORD MS_VC_EXCEPTION = 0x406D1388;
 817   struct {
 818     DWORD dwType;     // must be 0x1000
 819     LPCSTR szName;    // pointer to name (in user addr space)
 820     DWORD dwThreadID; // thread ID (-1=caller thread)
 821     DWORD dwFlags;    // reserved for future use, must be zero
 822   } info;
 823 
 824   info.dwType = 0x1000;
 825   info.szName = name;
 826   info.dwThreadID = -1;
 827   info.dwFlags = 0;
 828 
 829   __try {
 830     RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
 831   } __except(EXCEPTION_EXECUTE_HANDLER) {}
 832 }
 833 
 834 bool os::bind_to_processor(uint processor_id) {
 835   // Not yet implemented.
 836   return false;
 837 }
 838 
 839 void os::win32::initialize_performance_counter() {
 840   LARGE_INTEGER count;
 841   QueryPerformanceFrequency(&count);
 842   performance_frequency = as_long(count);
 843   QueryPerformanceCounter(&count);
 844   initial_performance_count = as_long(count);
 845 }
 846 
 847 
 848 double os::elapsedTime() {
 849   return (double) elapsed_counter() / (double) elapsed_frequency();
 850 }
 851 
 852 
 853 // Windows format:
 854 //   The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
 855 // Java format:
 856 //   Java standards require the number of milliseconds since 1/1/1970
 857 
 858 // Constant offset - calculated using offset()
 859 static jlong  _offset   = 116444736000000000;
 860 // Fake time counter for reproducible results when debugging
 861 static jlong  fake_time = 0;
 862 
 863 #ifdef ASSERT
 864 // Just to be safe, recalculate the offset in debug mode
 865 static jlong _calculated_offset = 0;
 866 static int   _has_calculated_offset = 0;
 867 
 868 jlong offset() {
 869   if (_has_calculated_offset) return _calculated_offset;
 870   SYSTEMTIME java_origin;
 871   java_origin.wYear          = 1970;
 872   java_origin.wMonth         = 1;
 873   java_origin.wDayOfWeek     = 0; // ignored
 874   java_origin.wDay           = 1;
 875   java_origin.wHour          = 0;
 876   java_origin.wMinute        = 0;
 877   java_origin.wSecond        = 0;
 878   java_origin.wMilliseconds  = 0;
 879   FILETIME jot;
 880   if (!SystemTimeToFileTime(&java_origin, &jot)) {
 881     fatal("Error = %d\nWindows error", GetLastError());
 882   }
 883   _calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
 884   _has_calculated_offset = 1;
 885   assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
 886   return _calculated_offset;
 887 }
 888 #else
 889 jlong offset() {
 890   return _offset;
 891 }
 892 #endif
 893 
 894 jlong windows_to_java_time(FILETIME wt) {
 895   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 896   return (a - offset()) / 10000;
 897 }
 898 
 899 // Returns time ticks in (10th of micro seconds)
 900 jlong windows_to_time_ticks(FILETIME wt) {
 901   jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
 902   return (a - offset());
 903 }
 904 
 905 FILETIME java_to_windows_time(jlong l) {
 906   jlong a = (l * 10000) + offset();
 907   FILETIME result;
 908   result.dwHighDateTime = high(a);
 909   result.dwLowDateTime  = low(a);
 910   return result;
 911 }
 912 
 913 bool os::supports_vtime() { return true; }
 914 
 915 double os::elapsedVTime() {
 916   FILETIME created;
 917   FILETIME exited;
 918   FILETIME kernel;
 919   FILETIME user;
 920   if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
 921     // the resolution of windows_to_java_time() should be sufficient (ms)
 922     return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
 923   } else {
 924     return elapsedTime();
 925   }
 926 }
 927 
 928 jlong os::javaTimeMillis() {
 929   FILETIME wt;
 930   GetSystemTimeAsFileTime(&wt);
 931   return windows_to_java_time(wt);
 932 }
 933 
 934 void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
 935   FILETIME wt;
 936   GetSystemTimeAsFileTime(&wt);
 937   jlong ticks = windows_to_time_ticks(wt); // 10th of micros
 938   jlong secs = jlong(ticks / 10000000); // 10000 * 1000
 939   seconds = secs;
 940   nanos = jlong(ticks - (secs*10000000)) * 100;
 941 }
 942 
 943 jlong os::javaTimeNanos() {
 944     LARGE_INTEGER current_count;
 945     QueryPerformanceCounter(&current_count);
 946     double current = as_long(current_count);
 947     double freq = performance_frequency;
 948     jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
 949     return time;
 950 }
 951 
 952 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
 953   jlong freq = performance_frequency;
 954   if (freq < NANOSECS_PER_SEC) {
 955     // the performance counter is 64 bits and we will
 956     // be multiplying it -- so no wrap in 64 bits
 957     info_ptr->max_value = ALL_64_BITS;
 958   } else if (freq > NANOSECS_PER_SEC) {
 959     // use the max value the counter can reach to
 960     // determine the max value which could be returned
 961     julong max_counter = (julong)ALL_64_BITS;
 962     info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
 963   } else {
 964     // the performance counter is 64 bits and we will
 965     // be using it directly -- so no wrap in 64 bits
 966     info_ptr->max_value = ALL_64_BITS;
 967   }
 968 
 969   // using a counter, so no skipping
 970   info_ptr->may_skip_backward = false;
 971   info_ptr->may_skip_forward = false;
 972 
 973   info_ptr->kind = JVMTI_TIMER_ELAPSED;                // elapsed not CPU time
 974 }
 975 
 976 char* os::local_time_string(char *buf, size_t buflen) {
 977   SYSTEMTIME st;
 978   GetLocalTime(&st);
 979   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
 980                st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
 981   return buf;
 982 }
 983 
 984 bool os::getTimesSecs(double* process_real_time,
 985                       double* process_user_time,
 986                       double* process_system_time) {
 987   HANDLE h_process = GetCurrentProcess();
 988   FILETIME create_time, exit_time, kernel_time, user_time;
 989   BOOL result = GetProcessTimes(h_process,
 990                                 &create_time,
 991                                 &exit_time,
 992                                 &kernel_time,
 993                                 &user_time);
 994   if (result != 0) {
 995     FILETIME wt;
 996     GetSystemTimeAsFileTime(&wt);
 997     jlong rtc_millis = windows_to_java_time(wt);
 998     *process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
 999     *process_user_time =
1000       (double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1001     *process_system_time =
1002       (double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1003     return true;
1004   } else {
1005     return false;
1006   }
1007 }
1008 
1009 void os::shutdown() {
1010   // allow PerfMemory to attempt cleanup of any persistent resources
1011   perfMemory_exit();
1012 
1013   // flush buffered output, finish log files
1014   ostream_abort();
1015 
1016   // Check for abort hook
1017   abort_hook_t abort_hook = Arguments::abort_hook();
1018   if (abort_hook != NULL) {
1019     abort_hook();
1020   }
1021 }
1022 
1023 
1024 static HANDLE dumpFile = NULL;
1025 
1026 // Check if dump file can be created.
1027 void os::check_dump_limit(char* buffer, size_t buffsz) {
1028   bool status = true;
1029   if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1030     jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1031     status = false;
1032   }
1033 
1034 #ifndef ASSERT
1035   if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1036     jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1037     status = false;
1038   }
1039 #endif
1040 
1041   if (status) {
1042     const char* cwd = get_current_directory(NULL, 0);
1043     int pid = current_process_id();
1044     if (cwd != NULL) {
1045       jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1046     } else {
1047       jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1048     }
1049 
1050     if (dumpFile == NULL &&
1051        (dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1052                  == INVALID_HANDLE_VALUE) {
1053       jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1054       status = false;
1055     }
1056   }
1057   VMError::record_coredump_status(buffer, status);
1058 }
1059 
1060 void os::abort(bool dump_core, void* siginfo, const void* context) {
1061   EXCEPTION_POINTERS ep;
1062   MINIDUMP_EXCEPTION_INFORMATION mei;
1063   MINIDUMP_EXCEPTION_INFORMATION* pmei;
1064 
1065   HANDLE hProcess = GetCurrentProcess();
1066   DWORD processId = GetCurrentProcessId();
1067   MINIDUMP_TYPE dumpType;
1068 
1069   shutdown();
1070   if (!dump_core || dumpFile == NULL) {
1071     if (dumpFile != NULL) {
1072       CloseHandle(dumpFile);
1073     }
1074     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1075   }
1076 
1077   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1078     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1079 
1080   if (siginfo != NULL && context != NULL) {
1081     ep.ContextRecord = (PCONTEXT) context;
1082     ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1083 
1084     mei.ThreadId = GetCurrentThreadId();
1085     mei.ExceptionPointers = &ep;
1086     pmei = &mei;
1087   } else {
1088     pmei = NULL;
1089   }
1090 
1091   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1092   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1093   if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1094       !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1095     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1096   }
1097   CloseHandle(dumpFile);
1098   win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1099 }
1100 
1101 // Die immediately, no exit hook, no abort hook, no cleanup.
1102 void os::die() {
1103   win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1104 }
1105 
1106 // Directory routines copied from src/win32/native/java/io/dirent_md.c
1107 //  * dirent_md.c       1.15 00/02/02
1108 //
1109 // The declarations for DIR and struct dirent are in jvm_win32.h.
1110 
1111 // Caller must have already run dirname through JVM_NativePath, which removes
1112 // duplicate slashes and converts all instances of '/' into '\\'.
1113 
1114 DIR * os::opendir(const char *dirname) {
1115   assert(dirname != NULL, "just checking");   // hotspot change
1116   DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1117   DWORD fattr;                                // hotspot change
1118   char alt_dirname[4] = { 0, 0, 0, 0 };
1119 
1120   if (dirp == 0) {
1121     errno = ENOMEM;
1122     return 0;
1123   }
1124 
1125   // Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1126   // as a directory in FindFirstFile().  We detect this case here and
1127   // prepend the current drive name.
1128   //
1129   if (dirname[1] == '\0' && dirname[0] == '\\') {
1130     alt_dirname[0] = _getdrive() + 'A' - 1;
1131     alt_dirname[1] = ':';
1132     alt_dirname[2] = '\\';
1133     alt_dirname[3] = '\0';
1134     dirname = alt_dirname;
1135   }
1136 
1137   dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1138   if (dirp->path == 0) {
1139     free(dirp);
1140     errno = ENOMEM;
1141     return 0;
1142   }
1143   strcpy(dirp->path, dirname);
1144 
1145   fattr = GetFileAttributes(dirp->path);
1146   if (fattr == 0xffffffff) {
1147     free(dirp->path);
1148     free(dirp);
1149     errno = ENOENT;
1150     return 0;
1151   } else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1152     free(dirp->path);
1153     free(dirp);
1154     errno = ENOTDIR;
1155     return 0;
1156   }
1157 
1158   // Append "*.*", or possibly "\\*.*", to path
1159   if (dirp->path[1] == ':' &&
1160       (dirp->path[2] == '\0' ||
1161       (dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1162     // No '\\' needed for cases like "Z:" or "Z:\"
1163     strcat(dirp->path, "*.*");
1164   } else {
1165     strcat(dirp->path, "\\*.*");
1166   }
1167 
1168   dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1169   if (dirp->handle == INVALID_HANDLE_VALUE) {
1170     if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1171       free(dirp->path);
1172       free(dirp);
1173       errno = EACCES;
1174       return 0;
1175     }
1176   }
1177   return dirp;
1178 }
1179 
1180 struct dirent * os::readdir(DIR *dirp) {
1181   assert(dirp != NULL, "just checking");      // hotspot change
1182   if (dirp->handle == INVALID_HANDLE_VALUE) {
1183     return NULL;
1184   }
1185 
1186   strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1187 
1188   if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1189     if (GetLastError() == ERROR_INVALID_HANDLE) {
1190       errno = EBADF;
1191       return NULL;
1192     }
1193     FindClose(dirp->handle);
1194     dirp->handle = INVALID_HANDLE_VALUE;
1195   }
1196 
1197   return &dirp->dirent;
1198 }
1199 
1200 int os::closedir(DIR *dirp) {
1201   assert(dirp != NULL, "just checking");      // hotspot change
1202   if (dirp->handle != INVALID_HANDLE_VALUE) {
1203     if (!FindClose(dirp->handle)) {
1204       errno = EBADF;
1205       return -1;
1206     }
1207     dirp->handle = INVALID_HANDLE_VALUE;
1208   }
1209   free(dirp->path);
1210   free(dirp);
1211   return 0;
1212 }
1213 
1214 // This must be hard coded because it's the system's temporary
1215 // directory not the java application's temp directory, ala java.io.tmpdir.
1216 const char* os::get_temp_directory() {
1217   static char path_buf[MAX_PATH];
1218   if (GetTempPath(MAX_PATH, path_buf) > 0) {
1219     return path_buf;
1220   } else {
1221     path_buf[0] = '\0';
1222     return path_buf;
1223   }
1224 }
1225 
1226 // Needs to be in os specific directory because windows requires another
1227 // header file <direct.h>
1228 const char* os::get_current_directory(char *buf, size_t buflen) {
1229   int n = static_cast<int>(buflen);
1230   if (buflen > INT_MAX)  n = INT_MAX;
1231   return _getcwd(buf, n);
1232 }
1233 
1234 //-----------------------------------------------------------
1235 // Helper functions for fatal error handler
1236 #ifdef _WIN64
1237 // Helper routine which returns true if address in
1238 // within the NTDLL address space.
1239 //
1240 static bool _addr_in_ntdll(address addr) {
1241   HMODULE hmod;
1242   MODULEINFO minfo;
1243 
1244   hmod = GetModuleHandle("NTDLL.DLL");
1245   if (hmod == NULL) return false;
1246   if (!GetModuleInformation(GetCurrentProcess(), hmod,
1247                                           &minfo, sizeof(MODULEINFO))) {
1248     return false;
1249   }
1250 
1251   if ((addr >= minfo.lpBaseOfDll) &&
1252       (addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1253     return true;
1254   } else {
1255     return false;
1256   }
1257 }
1258 #endif
1259 
1260 struct _modinfo {
1261   address addr;
1262   char*   full_path;   // point to a char buffer
1263   int     buflen;      // size of the buffer
1264   address base_addr;
1265 };
1266 
1267 static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1268                                   address top_address, void * param) {
1269   struct _modinfo *pmod = (struct _modinfo *)param;
1270   if (!pmod) return -1;
1271 
1272   if (base_addr   <= pmod->addr &&
1273       top_address > pmod->addr) {
1274     // if a buffer is provided, copy path name to the buffer
1275     if (pmod->full_path) {
1276       jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1277     }
1278     pmod->base_addr = base_addr;
1279     return 1;
1280   }
1281   return 0;
1282 }
1283 
1284 bool os::dll_address_to_library_name(address addr, char* buf,
1285                                      int buflen, int* offset) {
1286   // buf is not optional, but offset is optional
1287   assert(buf != NULL, "sanity check");
1288 
1289 // NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1290 //       return the full path to the DLL file, sometimes it returns path
1291 //       to the corresponding PDB file (debug info); sometimes it only
1292 //       returns partial path, which makes life painful.
1293 
1294   struct _modinfo mi;
1295   mi.addr      = addr;
1296   mi.full_path = buf;
1297   mi.buflen    = buflen;
1298   if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1299     // buf already contains path name
1300     if (offset) *offset = addr - mi.base_addr;
1301     return true;
1302   }
1303 
1304   buf[0] = '\0';
1305   if (offset) *offset = -1;
1306   return false;
1307 }
1308 
1309 bool os::dll_address_to_function_name(address addr, char *buf,
1310                                       int buflen, int *offset,
1311                                       bool demangle) {
1312   // buf is not optional, but offset is optional
1313   assert(buf != NULL, "sanity check");
1314 
1315   if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1316     return true;
1317   }
1318   if (offset != NULL)  *offset  = -1;
1319   buf[0] = '\0';
1320   return false;
1321 }
1322 
1323 // save the start and end address of jvm.dll into param[0] and param[1]
1324 static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1325                            address top_address, void * param) {
1326   if (!param) return -1;
1327 
1328   if (base_addr   <= (address)_locate_jvm_dll &&
1329       top_address > (address)_locate_jvm_dll) {
1330     ((address*)param)[0] = base_addr;
1331     ((address*)param)[1] = top_address;
1332     return 1;
1333   }
1334   return 0;
1335 }
1336 
1337 address vm_lib_location[2];    // start and end address of jvm.dll
1338 
1339 // check if addr is inside jvm.dll
1340 bool os::address_is_in_vm(address addr) {
1341   if (!vm_lib_location[0] || !vm_lib_location[1]) {
1342     if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1343       assert(false, "Can't find jvm module.");
1344       return false;
1345     }
1346   }
1347 
1348   return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1349 }
1350 
1351 // print module info; param is outputStream*
1352 static int _print_module(const char* fname, address base_address,
1353                          address top_address, void* param) {
1354   if (!param) return -1;
1355 
1356   outputStream* st = (outputStream*)param;
1357 
1358   st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1359   return 0;
1360 }
1361 
1362 // Loads .dll/.so and
1363 // in case of error it checks if .dll/.so was built for the
1364 // same architecture as Hotspot is running on
1365 void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1366   log_info(os)("attempting shared library load of %s", name);
1367 
1368   void * result = LoadLibrary(name);
1369   if (result != NULL) {
1370     Events::log(NULL, "Loaded shared library %s", name);
1371     // Recalculate pdb search path if a DLL was loaded successfully.
1372     SymbolEngine::recalc_search_path();
1373     log_info(os)("shared library load of %s was successful", name);
1374     return result;
1375   }
1376   DWORD errcode = GetLastError();
1377   // Read system error message into ebuf
1378   // It may or may not be overwritten below (in the for loop and just above)
1379   lasterror(ebuf, (size_t) ebuflen);
1380   ebuf[ebuflen - 1] = '\0';
1381   Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1382   log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1383 
1384   if (errcode == ERROR_MOD_NOT_FOUND) {
1385     strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1386     ebuf[ebuflen - 1] = '\0';
1387     return NULL;
1388   }
1389 
1390   // Parsing dll below
1391   // If we can read dll-info and find that dll was built
1392   // for an architecture other than Hotspot is running in
1393   // - then print to buffer "DLL was built for a different architecture"
1394   // else call os::lasterror to obtain system error message
1395   int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1396   if (fd < 0) {
1397     return NULL;
1398   }
1399 
1400   uint32_t signature_offset;
1401   uint16_t lib_arch = 0;
1402   bool failed_to_get_lib_arch =
1403     ( // Go to position 3c in the dll
1404      (os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1405      ||
1406      // Read location of signature
1407      (sizeof(signature_offset) !=
1408      (os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1409      ||
1410      // Go to COFF File Header in dll
1411      // that is located after "signature" (4 bytes long)
1412      (os::seek_to_file_offset(fd,
1413      signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1414      ||
1415      // Read field that contains code of architecture
1416      // that dll was built for
1417      (sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1418     );
1419 
1420   ::close(fd);
1421   if (failed_to_get_lib_arch) {
1422     // file i/o error - report os::lasterror(...) msg
1423     return NULL;
1424   }
1425 
1426   typedef struct {
1427     uint16_t arch_code;
1428     char* arch_name;
1429   } arch_t;
1430 
1431   static const arch_t arch_array[] = {
1432     {IMAGE_FILE_MACHINE_I386,      (char*)"IA 32"},
1433     {IMAGE_FILE_MACHINE_AMD64,     (char*)"AMD 64"}
1434   };
1435 #if (defined _M_AMD64)
1436   static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1437 #elif (defined _M_IX86)
1438   static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1439 #else
1440   #error Method os::dll_load requires that one of following \
1441          is defined :_M_AMD64 or _M_IX86
1442 #endif
1443 
1444 
1445   // Obtain a string for printf operation
1446   // lib_arch_str shall contain string what platform this .dll was built for
1447   // running_arch_str shall string contain what platform Hotspot was built for
1448   char *running_arch_str = NULL, *lib_arch_str = NULL;
1449   for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1450     if (lib_arch == arch_array[i].arch_code) {
1451       lib_arch_str = arch_array[i].arch_name;
1452     }
1453     if (running_arch == arch_array[i].arch_code) {
1454       running_arch_str = arch_array[i].arch_name;
1455     }
1456   }
1457 
1458   assert(running_arch_str,
1459          "Didn't find running architecture code in arch_array");
1460 
1461   // If the architecture is right
1462   // but some other error took place - report os::lasterror(...) msg
1463   if (lib_arch == running_arch) {
1464     return NULL;
1465   }
1466 
1467   if (lib_arch_str != NULL) {
1468     ::_snprintf(ebuf, ebuflen - 1,
1469                 "Can't load %s-bit .dll on a %s-bit platform",
1470                 lib_arch_str, running_arch_str);
1471   } else {
1472     // don't know what architecture this dll was build for
1473     ::_snprintf(ebuf, ebuflen - 1,
1474                 "Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1475                 lib_arch, running_arch_str);
1476   }
1477 
1478   return NULL;
1479 }
1480 
1481 void os::print_dll_info(outputStream *st) {
1482   st->print_cr("Dynamic libraries:");
1483   get_loaded_modules_info(_print_module, (void *)st);
1484 }
1485 
1486 int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1487   HANDLE   hProcess;
1488 
1489 # define MAX_NUM_MODULES 128
1490   HMODULE     modules[MAX_NUM_MODULES];
1491   static char filename[MAX_PATH];
1492   int         result = 0;
1493 
1494   int pid = os::current_process_id();
1495   hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1496                          FALSE, pid);
1497   if (hProcess == NULL) return 0;
1498 
1499   DWORD size_needed;
1500   if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1501     CloseHandle(hProcess);
1502     return 0;
1503   }
1504 
1505   // number of modules that are currently loaded
1506   int num_modules = size_needed / sizeof(HMODULE);
1507 
1508   for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1509     // Get Full pathname:
1510     if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1511       filename[0] = '\0';
1512     }
1513 
1514     MODULEINFO modinfo;
1515     if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1516       modinfo.lpBaseOfDll = NULL;
1517       modinfo.SizeOfImage = 0;
1518     }
1519 
1520     // Invoke callback function
1521     result = callback(filename, (address)modinfo.lpBaseOfDll,
1522                       (address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1523     if (result) break;
1524   }
1525 
1526   CloseHandle(hProcess);
1527   return result;
1528 }
1529 
1530 bool os::get_host_name(char* buf, size_t buflen) {
1531   DWORD size = (DWORD)buflen;
1532   return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1533 }
1534 
1535 void os::get_summary_os_info(char* buf, size_t buflen) {
1536   stringStream sst(buf, buflen);
1537   os::win32::print_windows_version(&sst);
1538   // chop off newline character
1539   char* nl = strchr(buf, '\n');
1540   if (nl != NULL) *nl = '\0';
1541 }
1542 
1543 int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1544 #if _MSC_VER >= 1900
1545   // Starting with Visual Studio 2015, vsnprint is C99 compliant.
1546   int result = ::vsnprintf(buf, len, fmt, args);
1547   // If an encoding error occurred (result < 0) then it's not clear
1548   // whether the buffer is NUL terminated, so ensure it is.
1549   if ((result < 0) && (len > 0)) {
1550     buf[len - 1] = '\0';
1551   }
1552   return result;
1553 #else
1554   // Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1555   // _vsnprintf, whose behavior seems to be *mostly* consistent across
1556   // versions.  However, when len == 0, avoid _vsnprintf too, and just
1557   // go straight to _vscprintf.  The output is going to be truncated in
1558   // that case, except in the unusual case of empty output.  More
1559   // importantly, the documentation for various versions of Visual Studio
1560   // are inconsistent about the behavior of _vsnprintf when len == 0,
1561   // including it possibly being an error.
1562   int result = -1;
1563   if (len > 0) {
1564     result = _vsnprintf(buf, len, fmt, args);
1565     // If output (including NUL terminator) is truncated, the buffer
1566     // won't be NUL terminated.  Add the trailing NUL specified by C99.
1567     if ((result < 0) || ((size_t)result >= len)) {
1568       buf[len - 1] = '\0';
1569     }
1570   }
1571   if (result < 0) {
1572     result = _vscprintf(fmt, args);
1573   }
1574   return result;
1575 #endif // _MSC_VER dispatch
1576 }
1577 
1578 static inline time_t get_mtime(const char* filename) {
1579   struct stat st;
1580   int ret = os::stat(filename, &st);
1581   assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1582   return st.st_mtime;
1583 }
1584 
1585 int os::compare_file_modified_times(const char* file1, const char* file2) {
1586   time_t t1 = get_mtime(file1);
1587   time_t t2 = get_mtime(file2);
1588   return t1 - t2;
1589 }
1590 
1591 void os::print_os_info_brief(outputStream* st) {
1592   os::print_os_info(st);
1593 }
1594 
1595 void os::win32::print_uptime_info(outputStream* st) {
1596   unsigned long long ticks = GetTickCount64();
1597   os::print_dhm(st, "OS uptime:", ticks/1000);
1598 }
1599 
1600 void os::print_os_info(outputStream* st) {
1601 #ifdef ASSERT
1602   char buffer[1024];
1603   st->print("HostName: ");
1604   if (get_host_name(buffer, sizeof(buffer))) {
1605     st->print("%s ", buffer);
1606   } else {
1607     st->print("N/A ");
1608   }
1609 #endif
1610   st->print("OS:");
1611   os::win32::print_windows_version(st);
1612 
1613   os::win32::print_uptime_info(st);
1614 
1615 #ifdef _LP64
1616   VM_Version::print_platform_virtualization_info(st);
1617 #endif
1618 }
1619 
1620 void os::win32::print_windows_version(outputStream* st) {
1621   OSVERSIONINFOEX osvi;
1622   VS_FIXEDFILEINFO *file_info;
1623   TCHAR kernel32_path[MAX_PATH];
1624   UINT len, ret;
1625 
1626   // Use the GetVersionEx information to see if we're on a server or
1627   // workstation edition of Windows. Starting with Windows 8.1 we can't
1628   // trust the OS version information returned by this API.
1629   ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1630   osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1631   if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1632     st->print_cr("Call to GetVersionEx failed");
1633     return;
1634   }
1635   bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1636 
1637   // Get the full path to \Windows\System32\kernel32.dll and use that for
1638   // determining what version of Windows we're running on.
1639   len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1640   ret = GetSystemDirectory(kernel32_path, len);
1641   if (ret == 0 || ret > len) {
1642     st->print_cr("Call to GetSystemDirectory failed");
1643     return;
1644   }
1645   strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1646 
1647   DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1648   if (version_size == 0) {
1649     st->print_cr("Call to GetFileVersionInfoSize failed");
1650     return;
1651   }
1652 
1653   LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1654   if (version_info == NULL) {
1655     st->print_cr("Failed to allocate version_info");
1656     return;
1657   }
1658 
1659   if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1660     os::free(version_info);
1661     st->print_cr("Call to GetFileVersionInfo failed");
1662     return;
1663   }
1664 
1665   if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1666     os::free(version_info);
1667     st->print_cr("Call to VerQueryValue failed");
1668     return;
1669   }
1670 
1671   int major_version = HIWORD(file_info->dwProductVersionMS);
1672   int minor_version = LOWORD(file_info->dwProductVersionMS);
1673   int build_number = HIWORD(file_info->dwProductVersionLS);
1674   int build_minor = LOWORD(file_info->dwProductVersionLS);
1675   int os_vers = major_version * 1000 + minor_version;
1676   os::free(version_info);
1677 
1678   st->print(" Windows ");
1679   switch (os_vers) {
1680 
1681   case 6000:
1682     if (is_workstation) {
1683       st->print("Vista");
1684     } else {
1685       st->print("Server 2008");
1686     }
1687     break;
1688 
1689   case 6001:
1690     if (is_workstation) {
1691       st->print("7");
1692     } else {
1693       st->print("Server 2008 R2");
1694     }
1695     break;
1696 
1697   case 6002:
1698     if (is_workstation) {
1699       st->print("8");
1700     } else {
1701       st->print("Server 2012");
1702     }
1703     break;
1704 
1705   case 6003:
1706     if (is_workstation) {
1707       st->print("8.1");
1708     } else {
1709       st->print("Server 2012 R2");
1710     }
1711     break;
1712 
1713   case 10000:
1714     if (is_workstation) {
1715       st->print("10");
1716     } else {
1717       // distinguish Windows Server 2016 and 2019 by build number
1718       // Windows server 2019 GA 10/2018 build number is 17763
1719       if (build_number > 17762) {
1720         st->print("Server 2019");
1721       } else {
1722         st->print("Server 2016");
1723       }
1724     }
1725     break;
1726 
1727   default:
1728     // Unrecognized windows, print out its major and minor versions
1729     st->print("%d.%d", major_version, minor_version);
1730     break;
1731   }
1732 
1733   // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1734   // find out whether we are running on 64 bit processor or not
1735   SYSTEM_INFO si;
1736   ZeroMemory(&si, sizeof(SYSTEM_INFO));
1737   GetNativeSystemInfo(&si);
1738   if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
1739     st->print(" , 64 bit");
1740   }
1741 
1742   st->print(" Build %d", build_number);
1743   st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1744   st->cr();
1745 }
1746 
1747 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1748   // Nothing to do for now.
1749 }
1750 
1751 void os::get_summary_cpu_info(char* buf, size_t buflen) {
1752   HKEY key;
1753   DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1754                "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1755   if (status == ERROR_SUCCESS) {
1756     DWORD size = (DWORD)buflen;
1757     status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1758     if (status != ERROR_SUCCESS) {
1759         strncpy(buf, "## __CPU__", buflen);
1760     }
1761     RegCloseKey(key);
1762   } else {
1763     // Put generic cpu info to return
1764     strncpy(buf, "## __CPU__", buflen);
1765   }
1766 }
1767 
1768 void os::print_memory_info(outputStream* st) {
1769   st->print("Memory:");
1770   st->print(" %dk page", os::vm_page_size()>>10);
1771 
1772   // Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1773   // value if total memory is larger than 4GB
1774   MEMORYSTATUSEX ms;
1775   ms.dwLength = sizeof(ms);
1776   int r1 = GlobalMemoryStatusEx(&ms);
1777 
1778   if (r1 != 0) {
1779     st->print(", system-wide physical " INT64_FORMAT "M ",
1780              (int64_t) ms.ullTotalPhys >> 20);
1781     st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1782 
1783     st->print("TotalPageFile size " INT64_FORMAT "M ",
1784              (int64_t) ms.ullTotalPageFile >> 20);
1785     st->print("(AvailPageFile size " INT64_FORMAT "M)",
1786              (int64_t) ms.ullAvailPageFile >> 20);
1787 
1788     // on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1789 #if defined(_M_IX86)
1790     st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1791              (int64_t) ms.ullTotalVirtual >> 20);
1792     st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1793 #endif
1794   } else {
1795     st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1796   }
1797 
1798   // extended memory statistics for a process
1799   PROCESS_MEMORY_COUNTERS_EX pmex;
1800   ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1801   pmex.cb = sizeof(pmex);
1802   int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1803 
1804   if (r2 != 0) {
1805     st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1806              (int64_t) pmex.WorkingSetSize >> 20);
1807     st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1808 
1809     st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1810              (int64_t) pmex.PrivateUsage >> 20);
1811     st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1812   } else {
1813     st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1814   }
1815 
1816   st->cr();
1817 }
1818 
1819 bool os::signal_sent_by_kill(const void* siginfo) {
1820   // TODO: Is this possible?
1821   return false;
1822 }
1823 
1824 void os::print_siginfo(outputStream *st, const void* siginfo) {
1825   const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1826   st->print("siginfo:");
1827 
1828   char tmp[64];
1829   if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1830     strcpy(tmp, "EXCEPTION_??");
1831   }
1832   st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1833 
1834   if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1835        er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1836        er->NumberParameters >= 2) {
1837     switch (er->ExceptionInformation[0]) {
1838     case 0: st->print(", reading address"); break;
1839     case 1: st->print(", writing address"); break;
1840     case 8: st->print(", data execution prevention violation at address"); break;
1841     default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1842                        er->ExceptionInformation[0]);
1843     }
1844     st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1845   } else {
1846     int num = er->NumberParameters;
1847     if (num > 0) {
1848       st->print(", ExceptionInformation=");
1849       for (int i = 0; i < num; i++) {
1850         st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1851       }
1852     }
1853   }
1854   st->cr();
1855 }
1856 
1857 bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1858   // TODO: Can we kill thread?
1859   return false;
1860 }
1861 
1862 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1863   // do nothing
1864 }
1865 
1866 static char saved_jvm_path[MAX_PATH] = {0};
1867 
1868 // Find the full path to the current module, jvm.dll
1869 void os::jvm_path(char *buf, jint buflen) {
1870   // Error checking.
1871   if (buflen < MAX_PATH) {
1872     assert(false, "must use a large-enough buffer");
1873     buf[0] = '\0';
1874     return;
1875   }
1876   // Lazy resolve the path to current module.
1877   if (saved_jvm_path[0] != 0) {
1878     strcpy(buf, saved_jvm_path);
1879     return;
1880   }
1881 
1882   buf[0] = '\0';
1883   if (Arguments::sun_java_launcher_is_altjvm()) {
1884     // Support for the java launcher's '-XXaltjvm=<path>' option. Check
1885     // for a JAVA_HOME environment variable and fix up the path so it
1886     // looks like jvm.dll is installed there (append a fake suffix
1887     // hotspot/jvm.dll).
1888     char* java_home_var = ::getenv("JAVA_HOME");
1889     if (java_home_var != NULL && java_home_var[0] != 0 &&
1890         strlen(java_home_var) < (size_t)buflen) {
1891       strncpy(buf, java_home_var, buflen);
1892 
1893       // determine if this is a legacy image or modules image
1894       // modules image doesn't have "jre" subdirectory
1895       size_t len = strlen(buf);
1896       char* jrebin_p = buf + len;
1897       jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1898       if (0 != _access(buf, 0)) {
1899         jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1900       }
1901       len = strlen(buf);
1902       jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
1903     }
1904   }
1905 
1906   if (buf[0] == '\0') {
1907     GetModuleFileName(vm_lib_handle, buf, buflen);
1908   }
1909   strncpy(saved_jvm_path, buf, MAX_PATH);
1910   saved_jvm_path[MAX_PATH - 1] = '\0';
1911 }
1912 
1913 
1914 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1915 #ifndef _WIN64
1916   st->print("_");
1917 #endif
1918 }
1919 
1920 
1921 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1922 #ifndef _WIN64
1923   st->print("@%d", args_size  * sizeof(int));
1924 #endif
1925 }
1926 
1927 // This method is a copy of JDK's sysGetLastErrorString
1928 // from src/windows/hpi/src/system_md.c
1929 
1930 size_t os::lasterror(char* buf, size_t len) {
1931   DWORD errval;
1932 
1933   if ((errval = GetLastError()) != 0) {
1934     // DOS error
1935     size_t n = (size_t)FormatMessage(
1936                                      FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
1937                                      NULL,
1938                                      errval,
1939                                      0,
1940                                      buf,
1941                                      (DWORD)len,
1942                                      NULL);
1943     if (n > 3) {
1944       // Drop final '.', CR, LF
1945       if (buf[n - 1] == '\n') n--;
1946       if (buf[n - 1] == '\r') n--;
1947       if (buf[n - 1] == '.') n--;
1948       buf[n] = '\0';
1949     }
1950     return n;
1951   }
1952 
1953   if (errno != 0) {
1954     // C runtime error that has no corresponding DOS error code
1955     const char* s = os::strerror(errno);
1956     size_t n = strlen(s);
1957     if (n >= len) n = len - 1;
1958     strncpy(buf, s, n);
1959     buf[n] = '\0';
1960     return n;
1961   }
1962 
1963   return 0;
1964 }
1965 
1966 int os::get_last_error() {
1967   DWORD error = GetLastError();
1968   if (error == 0) {
1969     error = errno;
1970   }
1971   return (int)error;
1972 }
1973 
1974 // sun.misc.Signal
1975 // NOTE that this is a workaround for an apparent kernel bug where if
1976 // a signal handler for SIGBREAK is installed then that signal handler
1977 // takes priority over the console control handler for CTRL_CLOSE_EVENT.
1978 // See bug 4416763.
1979 static void (*sigbreakHandler)(int) = NULL;
1980 
1981 static void UserHandler(int sig, void *siginfo, void *context) {
1982   os::signal_notify(sig);
1983   // We need to reinstate the signal handler each time...
1984   os::signal(sig, (void*)UserHandler);
1985 }
1986 
1987 void* os::user_handler() {
1988   return (void*) UserHandler;
1989 }
1990 
1991 void* os::signal(int signal_number, void* handler) {
1992   if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
1993     void (*oldHandler)(int) = sigbreakHandler;
1994     sigbreakHandler = (void (*)(int)) handler;
1995     return (void*) oldHandler;
1996   } else {
1997     return (void*)::signal(signal_number, (void (*)(int))handler);
1998   }
1999 }
2000 
2001 void os::signal_raise(int signal_number) {
2002   raise(signal_number);
2003 }
2004 
2005 // The Win32 C runtime library maps all console control events other than ^C
2006 // into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2007 // logoff, and shutdown events.  We therefore install our own console handler
2008 // that raises SIGTERM for the latter cases.
2009 //
2010 static BOOL WINAPI consoleHandler(DWORD event) {
2011   switch (event) {
2012   case CTRL_C_EVENT:
2013     if (VMError::is_error_reported()) {
2014       // Ctrl-C is pressed during error reporting, likely because the error
2015       // handler fails to abort. Let VM die immediately.
2016       os::die();
2017     }
2018 
2019     os::signal_raise(SIGINT);
2020     return TRUE;
2021     break;
2022   case CTRL_BREAK_EVENT:
2023     if (sigbreakHandler != NULL) {
2024       (*sigbreakHandler)(SIGBREAK);
2025     }
2026     return TRUE;
2027     break;
2028   case CTRL_LOGOFF_EVENT: {
2029     // Don't terminate JVM if it is running in a non-interactive session,
2030     // such as a service process.
2031     USEROBJECTFLAGS flags;
2032     HANDLE handle = GetProcessWindowStation();
2033     if (handle != NULL &&
2034         GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2035         sizeof(USEROBJECTFLAGS), NULL)) {
2036       // If it is a non-interactive session, let next handler to deal
2037       // with it.
2038       if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2039         return FALSE;
2040       }
2041     }
2042   }
2043   case CTRL_CLOSE_EVENT:
2044   case CTRL_SHUTDOWN_EVENT:
2045     os::signal_raise(SIGTERM);
2046     return TRUE;
2047     break;
2048   default:
2049     break;
2050   }
2051   return FALSE;
2052 }
2053 
2054 // The following code is moved from os.cpp for making this
2055 // code platform specific, which it is by its very nature.
2056 
2057 // Return maximum OS signal used + 1 for internal use only
2058 // Used as exit signal for signal_thread
2059 int os::sigexitnum_pd() {
2060   return NSIG;
2061 }
2062 
2063 // a counter for each possible signal value, including signal_thread exit signal
2064 static volatile jint pending_signals[NSIG+1] = { 0 };
2065 static Semaphore* sig_sem = NULL;
2066 
2067 static void jdk_misc_signal_init() {
2068   // Initialize signal structures
2069   memset((void*)pending_signals, 0, sizeof(pending_signals));
2070 
2071   // Initialize signal semaphore
2072   sig_sem = new Semaphore();
2073 
2074   // Programs embedding the VM do not want it to attempt to receive
2075   // events like CTRL_LOGOFF_EVENT, which are used to implement the
2076   // shutdown hooks mechanism introduced in 1.3.  For example, when
2077   // the VM is run as part of a Windows NT service (i.e., a servlet
2078   // engine in a web server), the correct behavior is for any console
2079   // control handler to return FALSE, not TRUE, because the OS's
2080   // "final" handler for such events allows the process to continue if
2081   // it is a service (while terminating it if it is not a service).
2082   // To make this behavior uniform and the mechanism simpler, we
2083   // completely disable the VM's usage of these console events if -Xrs
2084   // (=ReduceSignalUsage) is specified.  This means, for example, that
2085   // the CTRL-BREAK thread dump mechanism is also disabled in this
2086   // case.  See bugs 4323062, 4345157, and related bugs.
2087 
2088   // Add a CTRL-C handler
2089   SetConsoleCtrlHandler(consoleHandler, TRUE);
2090 }
2091 
2092 void os::signal_notify(int sig) {
2093   if (sig_sem != NULL) {
2094     Atomic::inc(&pending_signals[sig]);
2095     sig_sem->signal();
2096   } else {
2097     // Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2098     // initialization isn't called.
2099     assert(ReduceSignalUsage, "signal semaphore should be created");
2100   }
2101 }
2102 
2103 static int check_pending_signals() {
2104   while (true) {
2105     for (int i = 0; i < NSIG + 1; i++) {
2106       jint n = pending_signals[i];
2107       if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2108         return i;
2109       }
2110     }
2111     JavaThread *thread = JavaThread::current();
2112 
2113     ThreadBlockInVM tbivm(thread);
2114 
2115     bool threadIsSuspended;
2116     do {
2117       thread->set_suspend_equivalent();
2118       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
2119       sig_sem->wait();
2120 
2121       // were we externally suspended while we were waiting?
2122       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
2123       if (threadIsSuspended) {
2124         // The semaphore has been incremented, but while we were waiting
2125         // another thread suspended us. We don't want to continue running
2126         // while suspended because that would surprise the thread that
2127         // suspended us.
2128         sig_sem->signal();
2129 
2130         thread->java_suspend_self();
2131       }
2132     } while (threadIsSuspended);
2133   }
2134 }
2135 
2136 int os::signal_wait() {
2137   return check_pending_signals();
2138 }
2139 
2140 // Implicit OS exception handling
2141 
2142 LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2143                       address handler) {
2144   JavaThread* thread = (JavaThread*) Thread::current_or_null();
2145   // Save pc in thread
2146 #ifdef _M_AMD64
2147   // Do not blow up if no thread info available.
2148   if (thread) {
2149     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Rip);
2150   }
2151   // Set pc to handler
2152   exceptionInfo->ContextRecord->Rip = (DWORD64)handler;
2153 #else
2154   // Do not blow up if no thread info available.
2155   if (thread) {
2156     thread->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->Eip);
2157   }
2158   // Set pc to handler
2159   exceptionInfo->ContextRecord->Eip = (DWORD)(DWORD_PTR)handler;
2160 #endif
2161 
2162   // Continue the execution
2163   return EXCEPTION_CONTINUE_EXECUTION;
2164 }
2165 
2166 
2167 // Used for PostMortemDump
2168 extern "C" void safepoints();
2169 extern "C" void find(int x);
2170 extern "C" void events();
2171 
2172 // According to Windows API documentation, an illegal instruction sequence should generate
2173 // the 0xC000001C exception code. However, real world experience shows that occasionnaly
2174 // the execution of an illegal instruction can generate the exception code 0xC000001E. This
2175 // seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2176 
2177 #define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2178 
2179 // From "Execution Protection in the Windows Operating System" draft 0.35
2180 // Once a system header becomes available, the "real" define should be
2181 // included or copied here.
2182 #define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2183 
2184 // Windows Vista/2008 heap corruption check
2185 #define EXCEPTION_HEAP_CORRUPTION        0xC0000374
2186 
2187 // All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2188 // C++ compiler contain this error code. Because this is a compiler-generated
2189 // error, the code is not listed in the Win32 API header files.
2190 // The code is actually a cryptic mnemonic device, with the initial "E"
2191 // standing for "exception" and the final 3 bytes (0x6D7363) representing the
2192 // ASCII values of "msc".
2193 
2194 #define EXCEPTION_UNCAUGHT_CXX_EXCEPTION    0xE06D7363
2195 
2196 #define def_excpt(val) { #val, (val) }
2197 
2198 static const struct { const char* name; uint number; } exceptlabels[] = {
2199     def_excpt(EXCEPTION_ACCESS_VIOLATION),
2200     def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2201     def_excpt(EXCEPTION_BREAKPOINT),
2202     def_excpt(EXCEPTION_SINGLE_STEP),
2203     def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2204     def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2205     def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2206     def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2207     def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2208     def_excpt(EXCEPTION_FLT_OVERFLOW),
2209     def_excpt(EXCEPTION_FLT_STACK_CHECK),
2210     def_excpt(EXCEPTION_FLT_UNDERFLOW),
2211     def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2212     def_excpt(EXCEPTION_INT_OVERFLOW),
2213     def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2214     def_excpt(EXCEPTION_IN_PAGE_ERROR),
2215     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2216     def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2217     def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2218     def_excpt(EXCEPTION_STACK_OVERFLOW),
2219     def_excpt(EXCEPTION_INVALID_DISPOSITION),
2220     def_excpt(EXCEPTION_GUARD_PAGE),
2221     def_excpt(EXCEPTION_INVALID_HANDLE),
2222     def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2223     def_excpt(EXCEPTION_HEAP_CORRUPTION)
2224 };
2225 
2226 #undef def_excpt
2227 
2228 const char* os::exception_name(int exception_code, char *buf, size_t size) {
2229   uint code = static_cast<uint>(exception_code);
2230   for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2231     if (exceptlabels[i].number == code) {
2232       jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2233       return buf;
2234     }
2235   }
2236 
2237   return NULL;
2238 }
2239 
2240 //-----------------------------------------------------------------------------
2241 LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2242   // handle exception caused by idiv; should only happen for -MinInt/-1
2243   // (division by zero is handled explicitly)
2244 #ifdef  _M_AMD64
2245   PCONTEXT ctx = exceptionInfo->ContextRecord;
2246   address pc = (address)ctx->Rip;
2247   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2248   assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2249   if (pc[0] == 0xF7) {
2250     // set correct result values and continue after idiv instruction
2251     ctx->Rip = (DWORD64)pc + 2;        // idiv reg, reg  is 2 bytes
2252   } else {
2253     ctx->Rip = (DWORD64)pc + 3;        // REX idiv reg, reg  is 3 bytes
2254   }
2255   // Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2256   // this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2257   // idiv opcode (0xF7).
2258   ctx->Rdx = (DWORD)0;             // remainder
2259   // Continue the execution
2260 #else
2261   PCONTEXT ctx = exceptionInfo->ContextRecord;
2262   address pc = (address)ctx->Eip;
2263   assert(pc[0] == 0xF7, "not an idiv opcode");
2264   assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2265   assert(ctx->Eax == min_jint, "unexpected idiv exception");
2266   // set correct result values and continue after idiv instruction
2267   ctx->Eip = (DWORD)pc + 2;        // idiv reg, reg  is 2 bytes
2268   ctx->Eax = (DWORD)min_jint;      // result
2269   ctx->Edx = (DWORD)0;             // remainder
2270   // Continue the execution
2271 #endif
2272   return EXCEPTION_CONTINUE_EXECUTION;
2273 }
2274 
2275 //-----------------------------------------------------------------------------
2276 LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2277   PCONTEXT ctx = exceptionInfo->ContextRecord;
2278 #ifndef  _WIN64
2279   // handle exception caused by native method modifying control word
2280   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2281 
2282   switch (exception_code) {
2283   case EXCEPTION_FLT_DENORMAL_OPERAND:
2284   case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2285   case EXCEPTION_FLT_INEXACT_RESULT:
2286   case EXCEPTION_FLT_INVALID_OPERATION:
2287   case EXCEPTION_FLT_OVERFLOW:
2288   case EXCEPTION_FLT_STACK_CHECK:
2289   case EXCEPTION_FLT_UNDERFLOW:
2290     jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2291     if (fp_control_word != ctx->FloatSave.ControlWord) {
2292       // Restore FPCW and mask out FLT exceptions
2293       ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2294       // Mask out pending FLT exceptions
2295       ctx->FloatSave.StatusWord &=  0xffffff00;
2296       return EXCEPTION_CONTINUE_EXECUTION;
2297     }
2298   }
2299 
2300   if (prev_uef_handler != NULL) {
2301     // We didn't handle this exception so pass it to the previous
2302     // UnhandledExceptionFilter.
2303     return (prev_uef_handler)(exceptionInfo);
2304   }
2305 #else // !_WIN64
2306   // On Windows, the mxcsr control bits are non-volatile across calls
2307   // See also CR 6192333
2308   //
2309   jint MxCsr = INITIAL_MXCSR;
2310   // we can't use StubRoutines::addr_mxcsr_std()
2311   // because in Win64 mxcsr is not saved there
2312   if (MxCsr != ctx->MxCsr) {
2313     ctx->MxCsr = MxCsr;
2314     return EXCEPTION_CONTINUE_EXECUTION;
2315   }
2316 #endif // !_WIN64
2317 
2318   return EXCEPTION_CONTINUE_SEARCH;
2319 }
2320 
2321 static inline void report_error(Thread* t, DWORD exception_code,
2322                                 address addr, void* siginfo, void* context) {
2323   VMError::report_and_die(t, exception_code, addr, siginfo, context);
2324 
2325   // If UseOsErrorReporting, this will return here and save the error file
2326   // somewhere where we can find it in the minidump.
2327 }
2328 
2329 bool os::win32::get_frame_at_stack_banging_point(JavaThread* thread,
2330         struct _EXCEPTION_POINTERS* exceptionInfo, address pc, frame* fr) {
2331   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2332   address addr = (address) exceptionRecord->ExceptionInformation[1];
2333   if (Interpreter::contains(pc)) {
2334     *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2335     if (!fr->is_first_java_frame()) {
2336       // get_frame_at_stack_banging_point() is only called when we
2337       // have well defined stacks so java_sender() calls do not need
2338       // to assert safe_for_sender() first.
2339       *fr = fr->java_sender();
2340     }
2341   } else {
2342     // more complex code with compiled code
2343     assert(!Interpreter::contains(pc), "Interpreted methods should have been handled above");
2344     CodeBlob* cb = CodeCache::find_blob(pc);
2345     if (cb == NULL || !cb->is_nmethod() || cb->is_frame_complete_at(pc)) {
2346       // Not sure where the pc points to, fallback to default
2347       // stack overflow handling
2348       return false;
2349     } else {
2350       *fr = os::fetch_frame_from_context((void*)exceptionInfo->ContextRecord);
2351       // in compiled code, the stack banging is performed just after the return pc
2352       // has been pushed on the stack
2353       *fr = frame(fr->sp() + 1, fr->fp(), (address)*(fr->sp()));
2354       if (!fr->is_java_frame()) {
2355         // See java_sender() comment above.
2356         *fr = fr->java_sender();
2357       }
2358     }
2359   }
2360   assert(fr->is_java_frame(), "Safety check");
2361   return true;
2362 }
2363 
2364 #if INCLUDE_AOT
2365 LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2366   PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2367   address addr = (address) exceptionRecord->ExceptionInformation[1];
2368   address pc = (address) exceptionInfo->ContextRecord->Rip;
2369 
2370   // Handle the case where we get an implicit exception in AOT generated
2371   // code.  AOT DLL's loaded are not registered for structured exceptions.
2372   // If the exception occurred in the codeCache or AOT code, pass control
2373   // to our normal exception handler.
2374   CodeBlob* cb = CodeCache::find_blob(pc);
2375   if (cb != NULL) {
2376     return topLevelExceptionFilter(exceptionInfo);
2377   }
2378 
2379   return EXCEPTION_CONTINUE_SEARCH;
2380 }
2381 #endif
2382 
2383 //-----------------------------------------------------------------------------
2384 LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2385   if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2386   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2387 #ifdef _M_AMD64
2388   address pc = (address) exceptionInfo->ContextRecord->Rip;
2389 #else
2390   address pc = (address) exceptionInfo->ContextRecord->Eip;
2391 #endif
2392   Thread* t = Thread::current_or_null_safe();
2393 
2394   // Handle SafeFetch32 and SafeFetchN exceptions.
2395   if (StubRoutines::is_safefetch_fault(pc)) {
2396     return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2397   }
2398 
2399 #ifndef _WIN64
2400   // Execution protection violation - win32 running on AMD64 only
2401   // Handled first to avoid misdiagnosis as a "normal" access violation;
2402   // This is safe to do because we have a new/unique ExceptionInformation
2403   // code for this condition.
2404   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2405     PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2406     int exception_subcode = (int) exceptionRecord->ExceptionInformation[0];
2407     address addr = (address) exceptionRecord->ExceptionInformation[1];
2408 
2409     if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2410       int page_size = os::vm_page_size();
2411 
2412       // Make sure the pc and the faulting address are sane.
2413       //
2414       // If an instruction spans a page boundary, and the page containing
2415       // the beginning of the instruction is executable but the following
2416       // page is not, the pc and the faulting address might be slightly
2417       // different - we still want to unguard the 2nd page in this case.
2418       //
2419       // 15 bytes seems to be a (very) safe value for max instruction size.
2420       bool pc_is_near_addr =
2421         (pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2422       bool instr_spans_page_boundary =
2423         (align_down((intptr_t) pc ^ (intptr_t) addr,
2424                          (intptr_t) page_size) > 0);
2425 
2426       if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2427         static volatile address last_addr =
2428           (address) os::non_memory_address_word();
2429 
2430         // In conservative mode, don't unguard unless the address is in the VM
2431         if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2432             (UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2433 
2434           // Set memory to RWX and retry
2435           address page_start = align_down(addr, page_size);
2436           bool res = os::protect_memory((char*) page_start, page_size,
2437                                         os::MEM_PROT_RWX);
2438 
2439           log_debug(os)("Execution protection violation "
2440                         "at " INTPTR_FORMAT
2441                         ", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2442                         p2i(page_start), (res ? "success" : os::strerror(errno)));
2443 
2444           // Set last_addr so if we fault again at the same address, we don't
2445           // end up in an endless loop.
2446           //
2447           // There are two potential complications here.  Two threads trapping
2448           // at the same address at the same time could cause one of the
2449           // threads to think it already unguarded, and abort the VM.  Likely
2450           // very rare.
2451           //
2452           // The other race involves two threads alternately trapping at
2453           // different addresses and failing to unguard the page, resulting in
2454           // an endless loop.  This condition is probably even more unlikely
2455           // than the first.
2456           //
2457           // Although both cases could be avoided by using locks or thread
2458           // local last_addr, these solutions are unnecessary complication:
2459           // this handler is a best-effort safety net, not a complete solution.
2460           // It is disabled by default and should only be used as a workaround
2461           // in case we missed any no-execute-unsafe VM code.
2462 
2463           last_addr = addr;
2464 
2465           return EXCEPTION_CONTINUE_EXECUTION;
2466         }
2467       }
2468 
2469       // Last unguard failed or not unguarding
2470       tty->print_raw_cr("Execution protection violation");
2471       report_error(t, exception_code, addr, exceptionInfo->ExceptionRecord,
2472                    exceptionInfo->ContextRecord);
2473       return EXCEPTION_CONTINUE_SEARCH;
2474     }
2475   }
2476 #endif // _WIN64
2477 
2478   if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2479       VM_Version::is_cpuinfo_segv_addr(pc)) {
2480     // Verify that OS save/restore AVX registers.
2481     return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2482   }
2483 
2484   if (t != NULL && t->is_Java_thread()) {
2485     JavaThread* thread = (JavaThread*) t;
2486     bool in_java = thread->thread_state() == _thread_in_Java;
2487 
2488     // Handle potential stack overflows up front.
2489     if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2490       if (thread->stack_guards_enabled()) {
2491         if (in_java) {
2492           frame fr;
2493           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2494           address addr = (address) exceptionRecord->ExceptionInformation[1];
2495           if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2496             assert(fr.is_java_frame(), "Must be a Java frame");
2497             SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2498           }
2499         }
2500         // Yellow zone violation.  The o/s has unprotected the first yellow
2501         // zone page for us.  Note:  must call disable_stack_yellow_zone to
2502         // update the enabled status, even if the zone contains only one page.
2503         assert(thread->thread_state() != _thread_in_vm, "Undersized StackShadowPages");
2504         thread->disable_stack_yellow_reserved_zone();
2505         // If not in java code, return and hope for the best.
2506         return in_java
2507             ? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2508             :  EXCEPTION_CONTINUE_EXECUTION;
2509       } else {
2510         // Fatal red zone violation.
2511         thread->disable_stack_red_zone();
2512         tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2513         report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2514                       exceptionInfo->ContextRecord);
2515         return EXCEPTION_CONTINUE_SEARCH;
2516       }
2517     } else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2518       // Either stack overflow or null pointer exception.
2519       if (in_java) {
2520         PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2521         address addr = (address) exceptionRecord->ExceptionInformation[1];
2522         address stack_end = thread->stack_end();
2523         if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2524           // Stack overflow.
2525           assert(!os::uses_stack_guard_pages(),
2526                  "should be caught by red zone code above.");
2527           return Handle_Exception(exceptionInfo,
2528                                   SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2529         }
2530         // Check for safepoint polling and implicit null
2531         // We only expect null pointers in the stubs (vtable)
2532         // the rest are checked explicitly now.
2533         CodeBlob* cb = CodeCache::find_blob(pc);
2534         if (cb != NULL) {
2535           if (SafepointMechanism::is_poll_address(addr)) {
2536             address stub = SharedRuntime::get_poll_stub(pc);
2537             return Handle_Exception(exceptionInfo, stub);
2538           }
2539         }
2540         {
2541 #ifdef _WIN64
2542           // If it's a legal stack address map the entire region in
2543           //
2544           PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2545           address addr = (address) exceptionRecord->ExceptionInformation[1];
2546           if (thread->is_in_usable_stack(addr)) {
2547             addr = (address)((uintptr_t)addr &
2548                              (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2549             os::commit_memory((char *)addr, thread->stack_base() - addr,
2550                               !ExecMem);
2551             return EXCEPTION_CONTINUE_EXECUTION;
2552           } else
2553 #endif
2554           {
2555             // Null pointer exception.
2556             if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2557               address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2558               if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2559             }
2560             report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2561                          exceptionInfo->ContextRecord);
2562             return EXCEPTION_CONTINUE_SEARCH;
2563           }
2564         }
2565       }
2566 
2567 #ifdef _WIN64
2568       // Special care for fast JNI field accessors.
2569       // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2570       // in and the heap gets shrunk before the field access.
2571       if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2572         address addr = JNI_FastGetField::find_slowcase_pc(pc);
2573         if (addr != (address)-1) {
2574           return Handle_Exception(exceptionInfo, addr);
2575         }
2576       }
2577 #endif
2578 
2579       // Stack overflow or null pointer exception in native code.
2580       report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2581                    exceptionInfo->ContextRecord);
2582       return EXCEPTION_CONTINUE_SEARCH;
2583     } // /EXCEPTION_ACCESS_VIOLATION
2584     // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2585 
2586     if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2587       CompiledMethod* nm = NULL;
2588       JavaThread* thread = (JavaThread*)t;
2589       if (in_java) {
2590         CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2591         nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2592       }
2593 
2594       bool is_unsafe_arraycopy = (thread->thread_state() == _thread_in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2595       if (((thread->thread_state() == _thread_in_vm ||
2596            thread->thread_state() == _thread_in_native ||
2597            is_unsafe_arraycopy) &&
2598           thread->doing_unsafe_access()) ||
2599           (nm != NULL && nm->has_unsafe_access())) {
2600         address next_pc =  Assembler::locate_next_instruction(pc);
2601         if (is_unsafe_arraycopy) {
2602           next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2603         }
2604         return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2605       }
2606     }
2607 
2608     if (in_java) {
2609       switch (exception_code) {
2610       case EXCEPTION_INT_DIVIDE_BY_ZERO:
2611         return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2612 
2613       case EXCEPTION_INT_OVERFLOW:
2614         return Handle_IDiv_Exception(exceptionInfo);
2615 
2616       } // switch
2617     }
2618     if (((thread->thread_state() == _thread_in_Java) ||
2619          (thread->thread_state() == _thread_in_native)) &&
2620          exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2621       LONG result=Handle_FLT_Exception(exceptionInfo);
2622       if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2623     }
2624   }
2625 
2626   if (exception_code != EXCEPTION_BREAKPOINT) {
2627     report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2628                  exceptionInfo->ContextRecord);
2629   }
2630   return EXCEPTION_CONTINUE_SEARCH;
2631 }
2632 
2633 #ifndef _WIN64
2634 // Special care for fast JNI accessors.
2635 // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2636 // the heap gets shrunk before the field access.
2637 // Need to install our own structured exception handler since native code may
2638 // install its own.
2639 LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2640   DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2641   if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2642     address pc = (address) exceptionInfo->ContextRecord->Eip;
2643     address addr = JNI_FastGetField::find_slowcase_pc(pc);
2644     if (addr != (address)-1) {
2645       return Handle_Exception(exceptionInfo, addr);
2646     }
2647   }
2648   return EXCEPTION_CONTINUE_SEARCH;
2649 }
2650 
2651 #define DEFINE_FAST_GETFIELD(Return, Fieldname, Result)                     \
2652   Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env,           \
2653                                                      jobject obj,           \
2654                                                      jfieldID fieldID) {    \
2655     __try {                                                                 \
2656       return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env,       \
2657                                                                  obj,       \
2658                                                                  fieldID);  \
2659     } __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*)        \
2660                                               _exception_info())) {         \
2661     }                                                                       \
2662     return 0;                                                               \
2663   }
2664 
2665 DEFINE_FAST_GETFIELD(jboolean, bool,   Boolean)
2666 DEFINE_FAST_GETFIELD(jbyte,    byte,   Byte)
2667 DEFINE_FAST_GETFIELD(jchar,    char,   Char)
2668 DEFINE_FAST_GETFIELD(jshort,   short,  Short)
2669 DEFINE_FAST_GETFIELD(jint,     int,    Int)
2670 DEFINE_FAST_GETFIELD(jlong,    long,   Long)
2671 DEFINE_FAST_GETFIELD(jfloat,   float,  Float)
2672 DEFINE_FAST_GETFIELD(jdouble,  double, Double)
2673 
2674 address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2675   switch (type) {
2676   case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2677   case T_BYTE:    return (address)jni_fast_GetByteField_wrapper;
2678   case T_CHAR:    return (address)jni_fast_GetCharField_wrapper;
2679   case T_SHORT:   return (address)jni_fast_GetShortField_wrapper;
2680   case T_INT:     return (address)jni_fast_GetIntField_wrapper;
2681   case T_LONG:    return (address)jni_fast_GetLongField_wrapper;
2682   case T_FLOAT:   return (address)jni_fast_GetFloatField_wrapper;
2683   case T_DOUBLE:  return (address)jni_fast_GetDoubleField_wrapper;
2684   default:        ShouldNotReachHere();
2685   }
2686   return (address)-1;
2687 }
2688 #endif
2689 
2690 // Virtual Memory
2691 
2692 int os::vm_page_size() { return os::win32::vm_page_size(); }
2693 int os::vm_allocation_granularity() {
2694   return os::win32::vm_allocation_granularity();
2695 }
2696 
2697 // Windows large page support is available on Windows 2003. In order to use
2698 // large page memory, the administrator must first assign additional privilege
2699 // to the user:
2700 //   + select Control Panel -> Administrative Tools -> Local Security Policy
2701 //   + select Local Policies -> User Rights Assignment
2702 //   + double click "Lock pages in memory", add users and/or groups
2703 //   + reboot
2704 // Note the above steps are needed for administrator as well, as administrators
2705 // by default do not have the privilege to lock pages in memory.
2706 //
2707 // Note about Windows 2003: although the API supports committing large page
2708 // memory on a page-by-page basis and VirtualAlloc() returns success under this
2709 // scenario, I found through experiment it only uses large page if the entire
2710 // memory region is reserved and committed in a single VirtualAlloc() call.
2711 // This makes Windows large page support more or less like Solaris ISM, in
2712 // that the entire heap must be committed upfront. This probably will change
2713 // in the future, if so the code below needs to be revisited.
2714 
2715 #ifndef MEM_LARGE_PAGES
2716   #define MEM_LARGE_PAGES 0x20000000
2717 #endif
2718 
2719 static HANDLE    _hProcess;
2720 static HANDLE    _hToken;
2721 
2722 // Container for NUMA node list info
2723 class NUMANodeListHolder {
2724  private:
2725   int *_numa_used_node_list;  // allocated below
2726   int _numa_used_node_count;
2727 
2728   void free_node_list() {
2729     FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2730   }
2731 
2732  public:
2733   NUMANodeListHolder() {
2734     _numa_used_node_count = 0;
2735     _numa_used_node_list = NULL;
2736     // do rest of initialization in build routine (after function pointers are set up)
2737   }
2738 
2739   ~NUMANodeListHolder() {
2740     free_node_list();
2741   }
2742 
2743   bool build() {
2744     DWORD_PTR proc_aff_mask;
2745     DWORD_PTR sys_aff_mask;
2746     if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2747     ULONG highest_node_number;
2748     if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2749     free_node_list();
2750     _numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2751     for (unsigned int i = 0; i <= highest_node_number; i++) {
2752       ULONGLONG proc_mask_numa_node;
2753       if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2754       if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2755         _numa_used_node_list[_numa_used_node_count++] = i;
2756       }
2757     }
2758     return (_numa_used_node_count > 1);
2759   }
2760 
2761   int get_count() { return _numa_used_node_count; }
2762   int get_node_list_entry(int n) {
2763     // for indexes out of range, returns -1
2764     return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2765   }
2766 
2767 } numa_node_list_holder;
2768 
2769 
2770 
2771 static size_t _large_page_size = 0;
2772 
2773 static bool request_lock_memory_privilege() {
2774   _hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2775                           os::current_process_id());
2776 
2777   LUID luid;
2778   if (_hProcess != NULL &&
2779       OpenProcessToken(_hProcess, TOKEN_ADJUST_PRIVILEGES, &_hToken) &&
2780       LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2781 
2782     TOKEN_PRIVILEGES tp;
2783     tp.PrivilegeCount = 1;
2784     tp.Privileges[0].Luid = luid;
2785     tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2786 
2787     // AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2788     // privilege. Check GetLastError() too. See MSDN document.
2789     if (AdjustTokenPrivileges(_hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2790         (GetLastError() == ERROR_SUCCESS)) {
2791       return true;
2792     }
2793   }
2794 
2795   return false;
2796 }
2797 
2798 static void cleanup_after_large_page_init() {
2799   if (_hProcess) CloseHandle(_hProcess);
2800   _hProcess = NULL;
2801   if (_hToken) CloseHandle(_hToken);
2802   _hToken = NULL;
2803 }
2804 
2805 static bool numa_interleaving_init() {
2806   bool success = false;
2807   bool use_numa_interleaving_specified = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2808 
2809   // print a warning if UseNUMAInterleaving flag is specified on command line
2810   bool warn_on_failure = use_numa_interleaving_specified;
2811 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2812 
2813   // NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2814   size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2815   NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2816 
2817   if (numa_node_list_holder.build()) {
2818     if (log_is_enabled(Debug, os, cpu)) {
2819       Log(os, cpu) log;
2820       log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2821       for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2822         log.debug("  %d ", numa_node_list_holder.get_node_list_entry(i));
2823       }
2824     }
2825     success = true;
2826   } else {
2827     WARN("Process does not cover multiple NUMA nodes.");
2828   }
2829   if (!success) {
2830     if (use_numa_interleaving_specified) WARN("...Ignoring UseNUMAInterleaving flag.");
2831   }
2832   return success;
2833 #undef WARN
2834 }
2835 
2836 // this routine is used whenever we need to reserve a contiguous VA range
2837 // but we need to make separate VirtualAlloc calls for each piece of the range
2838 // Reasons for doing this:
2839 //  * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2840 //  * UseNUMAInterleaving requires a separate node for each piece
2841 static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2842                                          DWORD prot,
2843                                          bool should_inject_error = false) {
2844   char * p_buf;
2845   // note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2846   size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2847   size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2848 
2849   // first reserve enough address space in advance since we want to be
2850   // able to break a single contiguous virtual address range into multiple
2851   // large page commits but WS2003 does not allow reserving large page space
2852   // so we just use 4K pages for reserve, this gives us a legal contiguous
2853   // address space. then we will deallocate that reservation, and re alloc
2854   // using large pages
2855   const size_t size_of_reserve = bytes + chunk_size;
2856   if (bytes > size_of_reserve) {
2857     // Overflowed.
2858     return NULL;
2859   }
2860   p_buf = (char *) VirtualAlloc(addr,
2861                                 size_of_reserve,  // size of Reserve
2862                                 MEM_RESERVE,
2863                                 PAGE_READWRITE);
2864   // If reservation failed, return NULL
2865   if (p_buf == NULL) return NULL;
2866   MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2867   os::release_memory(p_buf, bytes + chunk_size);
2868 
2869   // we still need to round up to a page boundary (in case we are using large pages)
2870   // but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2871   // instead we handle this in the bytes_to_rq computation below
2872   p_buf = align_up(p_buf, page_size);
2873 
2874   // now go through and allocate one chunk at a time until all bytes are
2875   // allocated
2876   size_t  bytes_remaining = bytes;
2877   // An overflow of align_up() would have been caught above
2878   // in the calculation of size_of_reserve.
2879   char * next_alloc_addr = p_buf;
2880   HANDLE hProc = GetCurrentProcess();
2881 
2882 #ifdef ASSERT
2883   // Variable for the failure injection
2884   int ran_num = os::random();
2885   size_t fail_after = ran_num % bytes;
2886 #endif
2887 
2888   int count=0;
2889   while (bytes_remaining) {
2890     // select bytes_to_rq to get to the next chunk_size boundary
2891 
2892     size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2893     // Note allocate and commit
2894     char * p_new;
2895 
2896 #ifdef ASSERT
2897     bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2898 #else
2899     const bool inject_error_now = false;
2900 #endif
2901 
2902     if (inject_error_now) {
2903       p_new = NULL;
2904     } else {
2905       if (!UseNUMAInterleaving) {
2906         p_new = (char *) VirtualAlloc(next_alloc_addr,
2907                                       bytes_to_rq,
2908                                       flags,
2909                                       prot);
2910       } else {
2911         // get the next node to use from the used_node_list
2912         assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
2913         DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
2914         p_new = (char *)VirtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
2915       }
2916     }
2917 
2918     if (p_new == NULL) {
2919       // Free any allocated pages
2920       if (next_alloc_addr > p_buf) {
2921         // Some memory was committed so release it.
2922         size_t bytes_to_release = bytes - bytes_remaining;
2923         // NMT has yet to record any individual blocks, so it
2924         // need to create a dummy 'reserve' record to match
2925         // the release.
2926         MemTracker::record_virtual_memory_reserve((address)p_buf,
2927                                                   bytes_to_release, CALLER_PC);
2928         os::release_memory(p_buf, bytes_to_release);
2929       }
2930 #ifdef ASSERT
2931       if (should_inject_error) {
2932         log_develop_debug(pagesize)("Reserving pages individually failed.");
2933       }
2934 #endif
2935       return NULL;
2936     }
2937 
2938     bytes_remaining -= bytes_to_rq;
2939     next_alloc_addr += bytes_to_rq;
2940     count++;
2941   }
2942   // Although the memory is allocated individually, it is returned as one.
2943   // NMT records it as one block.
2944   if ((flags & MEM_COMMIT) != 0) {
2945     MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
2946   } else {
2947     MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
2948   }
2949 
2950   // made it this far, success
2951   return p_buf;
2952 }
2953 
2954 
2955 
2956 void os::large_page_init() {
2957   if (!UseLargePages) return;
2958 
2959   // print a warning if any large page related flag is specified on command line
2960   bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
2961                          !FLAG_IS_DEFAULT(LargePageSizeInBytes);
2962   bool success = false;
2963 
2964 #define WARN(msg) if (warn_on_failure) { warning(msg); }
2965   if (request_lock_memory_privilege()) {
2966     size_t s = GetLargePageMinimum();
2967     if (s) {
2968 #if defined(IA32) || defined(AMD64)
2969       if (s > 4*M || LargePageSizeInBytes > 4*M) {
2970         WARN("JVM cannot use large pages bigger than 4mb.");
2971       } else {
2972 #endif
2973         if (LargePageSizeInBytes && LargePageSizeInBytes % s == 0) {
2974           _large_page_size = LargePageSizeInBytes;
2975         } else {
2976           _large_page_size = s;
2977         }
2978         success = true;
2979 #if defined(IA32) || defined(AMD64)
2980       }
2981 #endif
2982     } else {
2983       WARN("Large page is not supported by the processor.");
2984     }
2985   } else {
2986     WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
2987   }
2988 #undef WARN
2989 
2990   const size_t default_page_size = (size_t) vm_page_size();
2991   if (success && _large_page_size > default_page_size) {
2992     _page_sizes[0] = _large_page_size;
2993     _page_sizes[1] = default_page_size;
2994     _page_sizes[2] = 0;
2995   }
2996 
2997   cleanup_after_large_page_init();
2998   UseLargePages = success;
2999 }
3000 
3001 int os::create_file_for_heap(const char* dir) {
3002 
3003   const char name_template[] = "/jvmheap.XXXXXX";
3004 
3005   size_t fullname_len = strlen(dir) + strlen(name_template);
3006   char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3007   if (fullname == NULL) {
3008     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3009     return -1;
3010   }
3011   int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3012   assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3013 
3014   os::native_path(fullname);
3015 
3016   char *path = _mktemp(fullname);
3017   if (path == NULL) {
3018     warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3019     os::free(fullname);
3020     return -1;
3021   }
3022 
3023   int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3024 
3025   os::free(fullname);
3026   if (fd < 0) {
3027     warning("Problem opening file for heap (%s)", os::strerror(errno));
3028     return -1;
3029   }
3030   return fd;
3031 }
3032 
3033 // If 'base' is not NULL, function will return NULL if it cannot get 'base'
3034 char* os::map_memory_to_file(char* base, size_t size, int fd) {
3035   assert(fd != -1, "File descriptor is not valid");
3036 
3037   HANDLE fh = (HANDLE)_get_osfhandle(fd);
3038 #ifdef _LP64
3039   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3040     (DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3041 #else
3042   HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3043     0, (DWORD)size, NULL);
3044 #endif
3045   if (fileMapping == NULL) {
3046     if (GetLastError() == ERROR_DISK_FULL) {
3047       vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3048     }
3049     else {
3050       vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3051     }
3052 
3053     return NULL;
3054   }
3055 
3056   LPVOID addr = MapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3057 
3058   CloseHandle(fileMapping);
3059 
3060   return (char*)addr;
3061 }
3062 
3063 char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3064   assert(fd != -1, "File descriptor is not valid");
3065   assert(base != NULL, "Base address cannot be NULL");
3066 
3067   release_memory(base, size);
3068   return map_memory_to_file(base, size, fd);
3069 }
3070 
3071 // On win32, one cannot release just a part of reserved memory, it's an
3072 // all or nothing deal.  When we split a reservation, we must break the
3073 // reservation into two reservations.
3074 void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
3075                                   bool realloc) {
3076   if (size > 0) {
3077     release_memory(base, size);
3078     if (realloc) {
3079       reserve_memory(split, base);
3080     }
3081     if (size != split) {
3082       reserve_memory(size - split, base + split);
3083     }
3084   }
3085 }
3086 
3087 // Multiple threads can race in this code but it's not possible to unmap small sections of
3088 // virtual space to get requested alignment, like posix-like os's.
3089 // Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3090 char* os::reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3091   assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3092          "Alignment must be a multiple of allocation granularity (page size)");
3093   assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3094 
3095   size_t extra_size = size + alignment;
3096   assert(extra_size >= size, "overflow, size is too large to allow alignment");
3097 
3098   char* aligned_base = NULL;
3099 
3100   do {
3101     char* extra_base = os::reserve_memory(extra_size, NULL, alignment, file_desc);
3102     if (extra_base == NULL) {
3103       return NULL;
3104     }
3105     // Do manual alignment
3106     aligned_base = align_up(extra_base, alignment);
3107 
3108     if (file_desc != -1) {
3109       os::unmap_memory(extra_base, extra_size);
3110     } else {
3111       os::release_memory(extra_base, extra_size);
3112     }
3113 
3114     aligned_base = os::reserve_memory(size, aligned_base, 0, file_desc);
3115 
3116   } while (aligned_base == NULL);
3117 
3118   return aligned_base;
3119 }
3120 
3121 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
3122   assert((size_t)addr % os::vm_allocation_granularity() == 0,
3123          "reserve alignment");
3124   assert(bytes % os::vm_page_size() == 0, "reserve page size");
3125   char* res;
3126   // note that if UseLargePages is on, all the areas that require interleaving
3127   // will go thru reserve_memory_special rather than thru here.
3128   bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3129   if (!use_individual) {
3130     res = (char*)VirtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3131   } else {
3132     elapsedTimer reserveTimer;
3133     if (Verbose && PrintMiscellaneous) reserveTimer.start();
3134     // in numa interleaving, we have to allocate pages individually
3135     // (well really chunks of NUMAInterleaveGranularity size)
3136     res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3137     if (res == NULL) {
3138       warning("NUMA page allocation failed");
3139     }
3140     if (Verbose && PrintMiscellaneous) {
3141       reserveTimer.stop();
3142       tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3143                     reserveTimer.milliseconds(), reserveTimer.ticks());
3144     }
3145   }
3146   assert(res == NULL || addr == NULL || addr == res,
3147          "Unexpected address from reserve.");
3148 
3149   return res;
3150 }
3151 
3152 // Reserve memory at an arbitrary address, only if that area is
3153 // available (and not reserved for something else).
3154 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
3155   // Windows os::reserve_memory() fails of the requested address range is
3156   // not avilable.
3157   return reserve_memory(bytes, requested_addr);
3158 }
3159 
3160 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr, int file_desc) {
3161   assert(file_desc >= 0, "file_desc is not valid");
3162   return map_memory_to_file(requested_addr, bytes, file_desc);
3163 }
3164 
3165 size_t os::large_page_size() {
3166   return _large_page_size;
3167 }
3168 
3169 bool os::can_commit_large_page_memory() {
3170   // Windows only uses large page memory when the entire region is reserved
3171   // and committed in a single VirtualAlloc() call. This may change in the
3172   // future, but with Windows 2003 it's not possible to commit on demand.
3173   return false;
3174 }
3175 
3176 bool os::can_execute_large_page_memory() {
3177   return true;
3178 }
3179 
3180 char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, char* addr,
3181                                     bool exec) {
3182   assert(UseLargePages, "only for large pages");
3183 
3184   if (!is_aligned(bytes, os::large_page_size()) || alignment > os::large_page_size()) {
3185     return NULL; // Fallback to small pages.
3186   }
3187 
3188   const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3189   const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3190 
3191   // with large pages, there are two cases where we need to use Individual Allocation
3192   // 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3193   // 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3194   if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3195     log_debug(pagesize)("Reserving large pages individually.");
3196 
3197     char * p_buf = allocate_pages_individually(bytes, addr, flags, prot, LargePagesIndividualAllocationInjectError);
3198     if (p_buf == NULL) {
3199       // give an appropriate warning message
3200       if (UseNUMAInterleaving) {
3201         warning("NUMA large page allocation failed, UseLargePages flag ignored");
3202       }
3203       if (UseLargePagesIndividualAllocation) {
3204         warning("Individually allocated large pages failed, "
3205                 "use -XX:-UseLargePagesIndividualAllocation to turn off");
3206       }
3207       return NULL;
3208     }
3209 
3210     return p_buf;
3211 
3212   } else {
3213     log_debug(pagesize)("Reserving large pages in a single large chunk.");
3214 
3215     // normal policy just allocate it all at once
3216     DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3217     char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
3218 
3219     return res;
3220   }
3221 }
3222 
3223 bool os::pd_release_memory_special(char* base, size_t bytes) {
3224   assert(base != NULL, "Sanity check");
3225   return pd_release_memory(base, bytes);
3226 }
3227 
3228 void os::print_statistics() {
3229 }
3230 
3231 static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3232   int err = os::get_last_error();
3233   char buf[256];
3234   size_t buf_len = os::lasterror(buf, sizeof(buf));
3235   warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3236           ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3237           exec, buf_len != 0 ? buf : "<no_error_string>", err);
3238 }
3239 
3240 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3241   if (bytes == 0) {
3242     // Don't bother the OS with noops.
3243     return true;
3244   }
3245   assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3246   assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3247   // Don't attempt to print anything if the OS call fails. We're
3248   // probably low on resources, so the print itself may cause crashes.
3249 
3250   // unless we have NUMAInterleaving enabled, the range of a commit
3251   // is always within a reserve covered by a single VirtualAlloc
3252   // in that case we can just do a single commit for the requested size
3253   if (!UseNUMAInterleaving) {
3254     if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3255       NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3256       return false;
3257     }
3258     if (exec) {
3259       DWORD oldprot;
3260       // Windows doc says to use VirtualProtect to get execute permissions
3261       if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3262         NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3263         return false;
3264       }
3265     }
3266     return true;
3267   } else {
3268 
3269     // when NUMAInterleaving is enabled, the commit might cover a range that
3270     // came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3271     // VirtualQuery can help us determine that.  The RegionSize that VirtualQuery
3272     // returns represents the number of bytes that can be committed in one step.
3273     size_t bytes_remaining = bytes;
3274     char * next_alloc_addr = addr;
3275     while (bytes_remaining > 0) {
3276       MEMORY_BASIC_INFORMATION alloc_info;
3277       VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3278       size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3279       if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3280                        PAGE_READWRITE) == NULL) {
3281         NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3282                                             exec);)
3283         return false;
3284       }
3285       if (exec) {
3286         DWORD oldprot;
3287         if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3288                             PAGE_EXECUTE_READWRITE, &oldprot)) {
3289           NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3290                                               exec);)
3291           return false;
3292         }
3293       }
3294       bytes_remaining -= bytes_to_rq;
3295       next_alloc_addr += bytes_to_rq;
3296     }
3297   }
3298   // if we made it this far, return true
3299   return true;
3300 }
3301 
3302 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3303                           bool exec) {
3304   // alignment_hint is ignored on this OS
3305   return pd_commit_memory(addr, size, exec);
3306 }
3307 
3308 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3309                                   const char* mesg) {
3310   assert(mesg != NULL, "mesg must be specified");
3311   if (!pd_commit_memory(addr, size, exec)) {
3312     warn_fail_commit_memory(addr, size, exec);
3313     vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3314   }
3315 }
3316 
3317 void os::pd_commit_memory_or_exit(char* addr, size_t size,
3318                                   size_t alignment_hint, bool exec,
3319                                   const char* mesg) {
3320   // alignment_hint is ignored on this OS
3321   pd_commit_memory_or_exit(addr, size, exec, mesg);
3322 }
3323 
3324 bool os::pd_uncommit_memory(char* addr, size_t bytes) {
3325   if (bytes == 0) {
3326     // Don't bother the OS with noops.
3327     return true;
3328   }
3329   assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3330   assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3331   return (VirtualFree(addr, bytes, MEM_DECOMMIT) != 0);
3332 }
3333 
3334 bool os::pd_release_memory(char* addr, size_t bytes) {
3335   return VirtualFree(addr, 0, MEM_RELEASE) != 0;
3336 }
3337 
3338 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3339   return os::commit_memory(addr, size, !ExecMem);
3340 }
3341 
3342 bool os::remove_stack_guard_pages(char* addr, size_t size) {
3343   return os::uncommit_memory(addr, size);
3344 }
3345 
3346 static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3347   uint count = 0;
3348   bool ret = false;
3349   size_t bytes_remaining = bytes;
3350   char * next_protect_addr = addr;
3351 
3352   // Use VirtualQuery() to get the chunk size.
3353   while (bytes_remaining) {
3354     MEMORY_BASIC_INFORMATION alloc_info;
3355     if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3356       return false;
3357     }
3358 
3359     size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3360     // We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3361     // but we don't distinguish here as both cases are protected by same API.
3362     ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3363     warning("Failed protecting pages individually for chunk #%u", count);
3364     if (!ret) {
3365       return false;
3366     }
3367 
3368     bytes_remaining -= bytes_to_protect;
3369     next_protect_addr += bytes_to_protect;
3370     count++;
3371   }
3372   return ret;
3373 }
3374 
3375 // Set protections specified
3376 bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3377                         bool is_committed) {
3378   unsigned int p = 0;
3379   switch (prot) {
3380   case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3381   case MEM_PROT_READ: p = PAGE_READONLY; break;
3382   case MEM_PROT_RW:   p = PAGE_READWRITE; break;
3383   case MEM_PROT_RWX:  p = PAGE_EXECUTE_READWRITE; break;
3384   default:
3385     ShouldNotReachHere();
3386   }
3387 
3388   DWORD old_status;
3389 
3390   // Strange enough, but on Win32 one can change protection only for committed
3391   // memory, not a big deal anyway, as bytes less or equal than 64K
3392   if (!is_committed) {
3393     commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3394                           "cannot commit protection page");
3395   }
3396   // One cannot use os::guard_memory() here, as on Win32 guard page
3397   // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3398   //
3399   // Pages in the region become guard pages. Any attempt to access a guard page
3400   // causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3401   // the guard page status. Guard pages thus act as a one-time access alarm.
3402   bool ret;
3403   if (UseNUMAInterleaving) {
3404     // If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3405     // so we must protect the chunks individually.
3406     ret = protect_pages_individually(addr, bytes, p, &old_status);
3407   } else {
3408     ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3409   }
3410 #ifdef ASSERT
3411   if (!ret) {
3412     int err = os::get_last_error();
3413     char buf[256];
3414     size_t buf_len = os::lasterror(buf, sizeof(buf));
3415     warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3416           ") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3417           buf_len != 0 ? buf : "<no_error_string>", err);
3418   }
3419 #endif
3420   return ret;
3421 }
3422 
3423 bool os::guard_memory(char* addr, size_t bytes) {
3424   DWORD old_status;
3425   return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3426 }
3427 
3428 bool os::unguard_memory(char* addr, size_t bytes) {
3429   DWORD old_status;
3430   return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3431 }
3432 
3433 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3434 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3435 void os::numa_make_global(char *addr, size_t bytes)    { }
3436 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint)    { }
3437 bool os::numa_topology_changed()                       { return false; }
3438 size_t os::numa_get_groups_num()                       { return MAX2(numa_node_list_holder.get_count(), 1); }
3439 int os::numa_get_group_id()                            { return 0; }
3440 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3441   if (numa_node_list_holder.get_count() == 0 && size > 0) {
3442     // Provide an answer for UMA systems
3443     ids[0] = 0;
3444     return 1;
3445   } else {
3446     // check for size bigger than actual groups_num
3447     size = MIN2(size, numa_get_groups_num());
3448     for (int i = 0; i < (int)size; i++) {
3449       ids[i] = numa_node_list_holder.get_node_list_entry(i);
3450     }
3451     return size;
3452   }
3453 }
3454 
3455 int os::numa_get_group_id_for_address(const void* address) {
3456   return 0;
3457 }
3458 
3459 bool os::get_page_info(char *start, page_info* info) {
3460   return false;
3461 }
3462 
3463 char *os::scan_pages(char *start, char* end, page_info* page_expected,
3464                      page_info* page_found) {
3465   return end;
3466 }
3467 
3468 char* os::non_memory_address_word() {
3469   // Must never look like an address returned by reserve_memory,
3470   // even in its subfields (as defined by the CPU immediate fields,
3471   // if the CPU splits constants across multiple instructions).
3472   return (char*)-1;
3473 }
3474 
3475 #define MAX_ERROR_COUNT 100
3476 #define SYS_THREAD_ERROR 0xffffffffUL
3477 
3478 void os::pd_start_thread(Thread* thread) {
3479   DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3480   // Returns previous suspend state:
3481   // 0:  Thread was not suspended
3482   // 1:  Thread is running now
3483   // >1: Thread is still suspended.
3484   assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3485 }
3486 
3487 
3488 // Short sleep, direct OS call.
3489 //
3490 // ms = 0, means allow others (if any) to run.
3491 //
3492 void os::naked_short_sleep(jlong ms) {
3493   assert(ms < 1000, "Un-interruptable sleep, short time use only");
3494   Sleep(ms);
3495 }
3496 
3497 // Windows does not provide sleep functionality with nanosecond resolution, so we
3498 // try to approximate this with spinning combined with yielding if another thread
3499 // is ready to run on the current processor.
3500 void os::naked_short_nanosleep(jlong ns) {
3501   assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3502 
3503   int64_t start = os::javaTimeNanos();
3504   do {
3505     if (SwitchToThread() == 0) {
3506       // Nothing else is ready to run on this cpu, spin a little
3507       SpinPause();
3508     }
3509   } while (os::javaTimeNanos() - start < ns);
3510 }
3511 
3512 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
3513 void os::infinite_sleep() {
3514   while (true) {    // sleep forever ...
3515     Sleep(100000);  // ... 100 seconds at a time
3516   }
3517 }
3518 
3519 typedef BOOL (WINAPI * STTSignature)(void);
3520 
3521 void os::naked_yield() {
3522   // Consider passing back the return value from SwitchToThread().
3523   SwitchToThread();
3524 }
3525 
3526 // Win32 only gives you access to seven real priorities at a time,
3527 // so we compress Java's ten down to seven.  It would be better
3528 // if we dynamically adjusted relative priorities.
3529 
3530 int os::java_to_os_priority[CriticalPriority + 1] = {
3531   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3532   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3533   THREAD_PRIORITY_LOWEST,                       // 2
3534   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3535   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3536   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3537   THREAD_PRIORITY_NORMAL,                       // 6
3538   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3539   THREAD_PRIORITY_ABOVE_NORMAL,                 // 8
3540   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3541   THREAD_PRIORITY_HIGHEST,                      // 10 MaxPriority
3542   THREAD_PRIORITY_HIGHEST                       // 11 CriticalPriority
3543 };
3544 
3545 int prio_policy1[CriticalPriority + 1] = {
3546   THREAD_PRIORITY_IDLE,                         // 0  Entry should never be used
3547   THREAD_PRIORITY_LOWEST,                       // 1  MinPriority
3548   THREAD_PRIORITY_LOWEST,                       // 2
3549   THREAD_PRIORITY_BELOW_NORMAL,                 // 3
3550   THREAD_PRIORITY_BELOW_NORMAL,                 // 4
3551   THREAD_PRIORITY_NORMAL,                       // 5  NormPriority
3552   THREAD_PRIORITY_ABOVE_NORMAL,                 // 6
3553   THREAD_PRIORITY_ABOVE_NORMAL,                 // 7
3554   THREAD_PRIORITY_HIGHEST,                      // 8
3555   THREAD_PRIORITY_HIGHEST,                      // 9  NearMaxPriority
3556   THREAD_PRIORITY_TIME_CRITICAL,                // 10 MaxPriority
3557   THREAD_PRIORITY_TIME_CRITICAL                 // 11 CriticalPriority
3558 };
3559 
3560 static int prio_init() {
3561   // If ThreadPriorityPolicy is 1, switch tables
3562   if (ThreadPriorityPolicy == 1) {
3563     int i;
3564     for (i = 0; i < CriticalPriority + 1; i++) {
3565       os::java_to_os_priority[i] = prio_policy1[i];
3566     }
3567   }
3568   if (UseCriticalJavaThreadPriority) {
3569     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3570   }
3571   return 0;
3572 }
3573 
3574 OSReturn os::set_native_priority(Thread* thread, int priority) {
3575   if (!UseThreadPriorities) return OS_OK;
3576   bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3577   return ret ? OS_OK : OS_ERR;
3578 }
3579 
3580 OSReturn os::get_native_priority(const Thread* const thread,
3581                                  int* priority_ptr) {
3582   if (!UseThreadPriorities) {
3583     *priority_ptr = java_to_os_priority[NormPriority];
3584     return OS_OK;
3585   }
3586   int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3587   if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3588     assert(false, "GetThreadPriority failed");
3589     return OS_ERR;
3590   }
3591   *priority_ptr = os_prio;
3592   return OS_OK;
3593 }
3594 
3595 // GetCurrentThreadId() returns DWORD
3596 intx os::current_thread_id()  { return GetCurrentThreadId(); }
3597 
3598 static int _initial_pid = 0;
3599 
3600 int os::current_process_id() {
3601   return (_initial_pid ? _initial_pid : _getpid());
3602 }
3603 
3604 int    os::win32::_vm_page_size              = 0;
3605 int    os::win32::_vm_allocation_granularity = 0;
3606 int    os::win32::_processor_type            = 0;
3607 // Processor level is not available on non-NT systems, use vm_version instead
3608 int    os::win32::_processor_level           = 0;
3609 julong os::win32::_physical_memory           = 0;
3610 size_t os::win32::_default_stack_size        = 0;
3611 
3612 intx          os::win32::_os_thread_limit    = 0;
3613 volatile intx os::win32::_os_thread_count    = 0;
3614 
3615 bool   os::win32::_is_windows_server         = false;
3616 
3617 // 6573254
3618 // Currently, the bug is observed across all the supported Windows releases,
3619 // including the latest one (as of this writing - Windows Server 2012 R2)
3620 bool   os::win32::_has_exit_bug              = true;
3621 
3622 void os::win32::initialize_system_info() {
3623   SYSTEM_INFO si;
3624   GetSystemInfo(&si);
3625   _vm_page_size    = si.dwPageSize;
3626   _vm_allocation_granularity = si.dwAllocationGranularity;
3627   _processor_type  = si.dwProcessorType;
3628   _processor_level = si.wProcessorLevel;
3629   set_processor_count(si.dwNumberOfProcessors);
3630 
3631   MEMORYSTATUSEX ms;
3632   ms.dwLength = sizeof(ms);
3633 
3634   // also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3635   // dwMemoryLoad (% of memory in use)
3636   GlobalMemoryStatusEx(&ms);
3637   _physical_memory = ms.ullTotalPhys;
3638 
3639   if (FLAG_IS_DEFAULT(MaxRAM)) {
3640     // Adjust MaxRAM according to the maximum virtual address space available.
3641     FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3642   }
3643 
3644   OSVERSIONINFOEX oi;
3645   oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3646   GetVersionEx((OSVERSIONINFO*)&oi);
3647   switch (oi.dwPlatformId) {
3648   case VER_PLATFORM_WIN32_NT:
3649     {
3650       int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3651       if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3652           oi.wProductType == VER_NT_SERVER) {
3653         _is_windows_server = true;
3654       }
3655     }
3656     break;
3657   default: fatal("Unknown platform");
3658   }
3659 
3660   _default_stack_size = os::current_stack_size();
3661   assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3662   assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3663          "stack size not a multiple of page size");
3664 
3665   initialize_performance_counter();
3666 }
3667 
3668 
3669 HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3670                                       int ebuflen) {
3671   char path[MAX_PATH];
3672   DWORD size;
3673   DWORD pathLen = (DWORD)sizeof(path);
3674   HINSTANCE result = NULL;
3675 
3676   // only allow library name without path component
3677   assert(strchr(name, '\\') == NULL, "path not allowed");
3678   assert(strchr(name, ':') == NULL, "path not allowed");
3679   if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3680     jio_snprintf(ebuf, ebuflen,
3681                  "Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3682     return NULL;
3683   }
3684 
3685   // search system directory
3686   if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3687     if (size >= pathLen) {
3688       return NULL; // truncated
3689     }
3690     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3691       return NULL; // truncated
3692     }
3693     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3694       return result;
3695     }
3696   }
3697 
3698   // try Windows directory
3699   if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3700     if (size >= pathLen) {
3701       return NULL; // truncated
3702     }
3703     if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3704       return NULL; // truncated
3705     }
3706     if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3707       return result;
3708     }
3709   }
3710 
3711   jio_snprintf(ebuf, ebuflen,
3712                "os::win32::load_windows_dll() cannot load %s from system directories.", name);
3713   return NULL;
3714 }
3715 
3716 #define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3717 #define EXIT_TIMEOUT 300000 /* 5 minutes */
3718 
3719 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3720   InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3721   return TRUE;
3722 }
3723 
3724 int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3725   // Basic approach:
3726   //  - Each exiting thread registers its intent to exit and then does so.
3727   //  - A thread trying to terminate the process must wait for all
3728   //    threads currently exiting to complete their exit.
3729 
3730   if (os::win32::has_exit_bug()) {
3731     // The array holds handles of the threads that have started exiting by calling
3732     // _endthreadex().
3733     // Should be large enough to avoid blocking the exiting thread due to lack of
3734     // a free slot.
3735     static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3736     static int handle_count = 0;
3737 
3738     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3739     static CRITICAL_SECTION crit_sect;
3740     static volatile DWORD process_exiting = 0;
3741     int i, j;
3742     DWORD res;
3743     HANDLE hproc, hthr;
3744 
3745     // We only attempt to register threads until a process exiting
3746     // thread manages to set the process_exiting flag. Any threads
3747     // that come through here after the process_exiting flag is set
3748     // are unregistered and will be caught in the SuspendThread()
3749     // infinite loop below.
3750     bool registered = false;
3751 
3752     // The first thread that reached this point, initializes the critical section.
3753     if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3754       warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3755     } else if (Atomic::load_acquire(&process_exiting) == 0) {
3756       if (what != EPT_THREAD) {
3757         // Atomically set process_exiting before the critical section
3758         // to increase the visibility between racing threads.
3759         Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3760       }
3761       EnterCriticalSection(&crit_sect);
3762 
3763       if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3764         // Remove from the array those handles of the threads that have completed exiting.
3765         for (i = 0, j = 0; i < handle_count; ++i) {
3766           res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3767           if (res == WAIT_TIMEOUT) {
3768             handles[j++] = handles[i];
3769           } else {
3770             if (res == WAIT_FAILED) {
3771               warning("WaitForSingleObject failed (%u) in %s: %d\n",
3772                       GetLastError(), __FILE__, __LINE__);
3773             }
3774             // Don't keep the handle, if we failed waiting for it.
3775             CloseHandle(handles[i]);
3776           }
3777         }
3778 
3779         // If there's no free slot in the array of the kept handles, we'll have to
3780         // wait until at least one thread completes exiting.
3781         if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3782           // Raise the priority of the oldest exiting thread to increase its chances
3783           // to complete sooner.
3784           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3785           res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3786           if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3787             i = (res - WAIT_OBJECT_0);
3788             handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
3789             for (; i < handle_count; ++i) {
3790               handles[i] = handles[i + 1];
3791             }
3792           } else {
3793             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3794                     (res == WAIT_FAILED ? "failed" : "timed out"),
3795                     GetLastError(), __FILE__, __LINE__);
3796             // Don't keep handles, if we failed waiting for them.
3797             for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
3798               CloseHandle(handles[i]);
3799             }
3800             handle_count = 0;
3801           }
3802         }
3803 
3804         // Store a duplicate of the current thread handle in the array of handles.
3805         hproc = GetCurrentProcess();
3806         hthr = GetCurrentThread();
3807         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
3808                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
3809           warning("DuplicateHandle failed (%u) in %s: %d\n",
3810                   GetLastError(), __FILE__, __LINE__);
3811 
3812           // We can't register this thread (no more handles) so this thread
3813           // may be racing with a thread that is calling exit(). If the thread
3814           // that is calling exit() has managed to set the process_exiting
3815           // flag, then this thread will be caught in the SuspendThread()
3816           // infinite loop below which closes that race. A small timing
3817           // window remains before the process_exiting flag is set, but it
3818           // is only exposed when we are out of handles.
3819         } else {
3820           ++handle_count;
3821           registered = true;
3822 
3823           // The current exiting thread has stored its handle in the array, and now
3824           // should leave the critical section before calling _endthreadex().
3825         }
3826 
3827       } else if (what != EPT_THREAD && handle_count > 0) {
3828         jlong start_time, finish_time, timeout_left;
3829         // Before ending the process, make sure all the threads that had called
3830         // _endthreadex() completed.
3831 
3832         // Set the priority level of the current thread to the same value as
3833         // the priority level of exiting threads.
3834         // This is to ensure it will be given a fair chance to execute if
3835         // the timeout expires.
3836         hthr = GetCurrentThread();
3837         SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
3838         start_time = os::javaTimeNanos();
3839         finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
3840         for (i = 0; ; ) {
3841           int portion_count = handle_count - i;
3842           if (portion_count > MAXIMUM_WAIT_OBJECTS) {
3843             portion_count = MAXIMUM_WAIT_OBJECTS;
3844           }
3845           for (j = 0; j < portion_count; ++j) {
3846             SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
3847           }
3848           timeout_left = (finish_time - start_time) / 1000000L;
3849           if (timeout_left < 0) {
3850             timeout_left = 0;
3851           }
3852           res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
3853           if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
3854             warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
3855                     (res == WAIT_FAILED ? "failed" : "timed out"),
3856                     GetLastError(), __FILE__, __LINE__);
3857             // Reset portion_count so we close the remaining
3858             // handles due to this error.
3859             portion_count = handle_count - i;
3860           }
3861           for (j = 0; j < portion_count; ++j) {
3862             CloseHandle(handles[i + j]);
3863           }
3864           if ((i += portion_count) >= handle_count) {
3865             break;
3866           }
3867           start_time = os::javaTimeNanos();
3868         }
3869         handle_count = 0;
3870       }
3871 
3872       LeaveCriticalSection(&crit_sect);
3873     }
3874 
3875     if (!registered &&
3876         Atomic::load_acquire(&process_exiting) != 0 &&
3877         process_exiting != GetCurrentThreadId()) {
3878       // Some other thread is about to call exit(), so we don't let
3879       // the current unregistered thread proceed to exit() or _endthreadex()
3880       while (true) {
3881         SuspendThread(GetCurrentThread());
3882         // Avoid busy-wait loop, if SuspendThread() failed.
3883         Sleep(EXIT_TIMEOUT);
3884       }
3885     }
3886   }
3887 
3888   // We are here if either
3889   // - there's no 'race at exit' bug on this OS release;
3890   // - initialization of the critical section failed (unlikely);
3891   // - the current thread has registered itself and left the critical section;
3892   // - the process-exiting thread has raised the flag and left the critical section.
3893   if (what == EPT_THREAD) {
3894     _endthreadex((unsigned)exit_code);
3895   } else if (what == EPT_PROCESS) {
3896     ::exit(exit_code);
3897   } else {
3898     _exit(exit_code);
3899   }
3900 
3901   // Should not reach here
3902   return exit_code;
3903 }
3904 
3905 #undef EXIT_TIMEOUT
3906 
3907 void os::win32::setmode_streams() {
3908   _setmode(_fileno(stdin), _O_BINARY);
3909   _setmode(_fileno(stdout), _O_BINARY);
3910   _setmode(_fileno(stderr), _O_BINARY);
3911 }
3912 
3913 void os::wait_for_keypress_at_exit(void) {
3914   if (PauseAtExit) {
3915     fprintf(stderr, "Press any key to continue...\n");
3916     fgetc(stdin);
3917   }
3918 }
3919 
3920 
3921 bool os::message_box(const char* title, const char* message) {
3922   int result = MessageBox(NULL, message, title,
3923                           MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
3924   return result == IDYES;
3925 }
3926 
3927 #ifndef PRODUCT
3928 #ifndef _WIN64
3929 // Helpers to check whether NX protection is enabled
3930 int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
3931   if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
3932       pex->ExceptionRecord->NumberParameters > 0 &&
3933       pex->ExceptionRecord->ExceptionInformation[0] ==
3934       EXCEPTION_INFO_EXEC_VIOLATION) {
3935     return EXCEPTION_EXECUTE_HANDLER;
3936   }
3937   return EXCEPTION_CONTINUE_SEARCH;
3938 }
3939 
3940 void nx_check_protection() {
3941   // If NX is enabled we'll get an exception calling into code on the stack
3942   char code[] = { (char)0xC3 }; // ret
3943   void *code_ptr = (void *)code;
3944   __try {
3945     __asm call code_ptr
3946   } __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
3947     tty->print_raw_cr("NX protection detected.");
3948   }
3949 }
3950 #endif // _WIN64
3951 #endif // PRODUCT
3952 
3953 // This is called _before_ the global arguments have been parsed
3954 void os::init(void) {
3955   _initial_pid = _getpid();
3956 
3957   init_random(1234567);
3958 
3959   win32::initialize_system_info();
3960   win32::setmode_streams();
3961   init_page_sizes((size_t) win32::vm_page_size());
3962 
3963   // This may be overridden later when argument processing is done.
3964   FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
3965 
3966   // Initialize main_process and main_thread
3967   main_process = GetCurrentProcess();  // Remember main_process is a pseudo handle
3968   if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
3969                        &main_thread, THREAD_ALL_ACCESS, false, 0)) {
3970     fatal("DuplicateHandle failed\n");
3971   }
3972   main_thread_id = (int) GetCurrentThreadId();
3973 
3974   // initialize fast thread access - only used for 32-bit
3975   win32::initialize_thread_ptr_offset();
3976 }
3977 
3978 // To install functions for atexit processing
3979 extern "C" {
3980   static void perfMemory_exit_helper() {
3981     perfMemory_exit();
3982   }
3983 }
3984 
3985 static jint initSock();
3986 
3987 // this is called _after_ the global arguments have been parsed
3988 jint os::init_2(void) {
3989 
3990   // This could be set any time but all platforms
3991   // have to set it the same so we have to mirror Solaris.
3992   DEBUG_ONLY(os::set_mutex_init_done();)
3993 
3994   // Setup Windows Exceptions
3995 
3996 #if INCLUDE_AOT
3997   // If AOT is enabled we need to install a vectored exception handler
3998   // in order to forward implicit exceptions from code in AOT
3999   // generated DLLs.  This is necessary since these DLLs are not
4000   // registered for structured exceptions like codecache methods are.
4001   if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
4002     topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
4003   }
4004 #endif
4005 
4006   // for debugging float code generation bugs
4007   if (ForceFloatExceptions) {
4008 #ifndef  _WIN64
4009     static long fp_control_word = 0;
4010     __asm { fstcw fp_control_word }
4011     // see Intel PPro Manual, Vol. 2, p 7-16
4012     const long precision = 0x20;
4013     const long underflow = 0x10;
4014     const long overflow  = 0x08;
4015     const long zero_div  = 0x04;
4016     const long denorm    = 0x02;
4017     const long invalid   = 0x01;
4018     fp_control_word |= invalid;
4019     __asm { fldcw fp_control_word }
4020 #endif
4021   }
4022 
4023   // If stack_commit_size is 0, windows will reserve the default size,
4024   // but only commit a small portion of it.
4025   size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4026   size_t default_reserve_size = os::win32::default_stack_size();
4027   size_t actual_reserve_size = stack_commit_size;
4028   if (stack_commit_size < default_reserve_size) {
4029     // If stack_commit_size == 0, we want this too
4030     actual_reserve_size = default_reserve_size;
4031   }
4032 
4033   // Check minimum allowable stack size for thread creation and to initialize
4034   // the java system classes, including StackOverflowError - depends on page
4035   // size.  Add two 4K pages for compiler2 recursion in main thread.
4036   // Add in 4*BytesPerWord 4K pages to account for VM stack during
4037   // class initialization depending on 32 or 64 bit VM.
4038   size_t min_stack_allowed =
4039             (size_t)(JavaThread::stack_guard_zone_size() +
4040                      JavaThread::stack_shadow_zone_size() +
4041                      (4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4042 
4043   min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4044 
4045   if (actual_reserve_size < min_stack_allowed) {
4046     tty->print_cr("\nThe Java thread stack size specified is too small. "
4047                   "Specify at least %dk",
4048                   min_stack_allowed / K);
4049     return JNI_ERR;
4050   }
4051 
4052   JavaThread::set_stack_size_at_create(stack_commit_size);
4053 
4054   // Calculate theoretical max. size of Threads to guard gainst artifical
4055   // out-of-memory situations, where all available address-space has been
4056   // reserved by thread stacks.
4057   assert(actual_reserve_size != 0, "Must have a stack");
4058 
4059   // Calculate the thread limit when we should start doing Virtual Memory
4060   // banging. Currently when the threads will have used all but 200Mb of space.
4061   //
4062   // TODO: consider performing a similar calculation for commit size instead
4063   // as reserve size, since on a 64-bit platform we'll run into that more
4064   // often than running out of virtual memory space.  We can use the
4065   // lower value of the two calculations as the os_thread_limit.
4066   size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4067   win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4068 
4069   // at exit methods are called in the reverse order of their registration.
4070   // there is no limit to the number of functions registered. atexit does
4071   // not set errno.
4072 
4073   if (PerfAllowAtExitRegistration) {
4074     // only register atexit functions if PerfAllowAtExitRegistration is set.
4075     // atexit functions can be delayed until process exit time, which
4076     // can be problematic for embedded VM situations. Embedded VMs should
4077     // call DestroyJavaVM() to assure that VM resources are released.
4078 
4079     // note: perfMemory_exit_helper atexit function may be removed in
4080     // the future if the appropriate cleanup code can be added to the
4081     // VM_Exit VMOperation's doit method.
4082     if (atexit(perfMemory_exit_helper) != 0) {
4083       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4084     }
4085   }
4086 
4087 #ifndef _WIN64
4088   // Print something if NX is enabled (win32 on AMD64)
4089   NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4090 #endif
4091 
4092   // initialize thread priority policy
4093   prio_init();
4094 
4095   if (UseNUMA && !ForceNUMA) {
4096     UseNUMA = false; // We don't fully support this yet
4097   }
4098 
4099   if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4100     if (!numa_interleaving_init()) {
4101       FLAG_SET_ERGO(UseNUMAInterleaving, false);
4102     } else if (!UseNUMAInterleaving) {
4103       // When NUMA requested, not-NUMA-aware allocations default to interleaving.
4104       FLAG_SET_ERGO(UseNUMAInterleaving, true);
4105     }
4106   }
4107 
4108   if (initSock() != JNI_OK) {
4109     return JNI_ERR;
4110   }
4111 
4112   SymbolEngine::recalc_search_path();
4113 
4114   // Initialize data for jdk.internal.misc.Signal
4115   if (!ReduceSignalUsage) {
4116     jdk_misc_signal_init();
4117   }
4118 
4119   return JNI_OK;
4120 }
4121 
4122 // combine the high and low DWORD into a ULONGLONG
4123 static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4124   ULONGLONG value = high_word;
4125   value <<= sizeof(high_word) * 8;
4126   value |= low_word;
4127   return value;
4128 }
4129 
4130 // Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4131 static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4132   ::memset((void*)sbuf, 0, sizeof(struct stat));
4133   sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4134   sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4135                                   file_data.ftLastWriteTime.dwLowDateTime);
4136   sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4137                                   file_data.ftCreationTime.dwLowDateTime);
4138   sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4139                                   file_data.ftLastAccessTime.dwLowDateTime);
4140   if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4141     sbuf->st_mode |= S_IFDIR;
4142   } else {
4143     sbuf->st_mode |= S_IFREG;
4144   }
4145 }
4146 
4147 static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4148   // Get required buffer size to convert to Unicode
4149   int unicode_path_len = MultiByteToWideChar(CP_ACP,
4150                                              MB_ERR_INVALID_CHARS,
4151                                              char_path, -1,
4152                                              NULL, 0);
4153   if (unicode_path_len == 0) {
4154     return EINVAL;
4155   }
4156 
4157   *unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4158 
4159   int result = MultiByteToWideChar(CP_ACP,
4160                                    MB_ERR_INVALID_CHARS,
4161                                    char_path, -1,
4162                                    *unicode_path, unicode_path_len);
4163   assert(result == unicode_path_len, "length already checked above");
4164 
4165   return ERROR_SUCCESS;
4166 }
4167 
4168 static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4169   // Get required buffer size to convert to full path. The return
4170   // value INCLUDES the terminating null character.
4171   DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4172   if (full_path_len == 0) {
4173     return EINVAL;
4174   }
4175 
4176   *full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4177 
4178   // When the buffer has sufficient size, the return value EXCLUDES the
4179   // terminating null character
4180   DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4181   assert(result <= full_path_len, "length already checked above");
4182 
4183   return ERROR_SUCCESS;
4184 }
4185 
4186 static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4187   *prefix_off = 0;
4188   *needs_fullpath = true;
4189 
4190   if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4191     *prefix = L"\\\\?\\";
4192   } else if (buf[0] == '\\' && buf[1] == '\\') {
4193     if (buf[2] == '?' && buf[3] == '\\') {
4194       *prefix = L"";
4195       *needs_fullpath = false;
4196     } else {
4197       *prefix = L"\\\\?\\UNC";
4198       *prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4199     }
4200   } else {
4201     *prefix = L"\\\\?\\";
4202   }
4203 }
4204 
4205 // Returns the given path as an absolute wide path in unc format. The returned path is NULL
4206 // on error (with err being set accordingly) and should be freed via os::free() otherwise.
4207 // additional_space is the size of space, in wchar_t, the function will additionally add to
4208 // the allocation of return buffer (such that the size of the returned buffer is at least
4209 // wcslen(buf) + 1 + additional_space).
4210 static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4211   if ((path == NULL) || (path[0] == '\0')) {
4212     err = ENOENT;
4213     return NULL;
4214   }
4215 
4216   // Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4217   size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4218   char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4219   strncpy(buf, path, buf_len);
4220   os::native_path(buf);
4221 
4222   LPWSTR prefix = NULL;
4223   int prefix_off = 0;
4224   bool needs_fullpath = true;
4225   set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4226 
4227   LPWSTR unicode_path = NULL;
4228   err = convert_to_unicode(buf, &unicode_path);
4229   FREE_C_HEAP_ARRAY(char, buf);
4230   if (err != ERROR_SUCCESS) {
4231     return NULL;
4232   }
4233 
4234   LPWSTR converted_path = NULL;
4235   if (needs_fullpath) {
4236     err = get_full_path(unicode_path, &converted_path);
4237   } else {
4238     converted_path = unicode_path;
4239   }
4240 
4241   LPWSTR result = NULL;
4242   if (converted_path != NULL) {
4243     size_t prefix_len = wcslen(prefix);
4244     size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4245     result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4246     _snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4247 
4248     // Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4249     result_len = wcslen(result);
4250     if ((result[result_len - 1] == L'\\') &&
4251         !(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4252       result[result_len - 1] = L'\0';
4253     }
4254   }
4255 
4256   if (converted_path != unicode_path) {
4257     FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4258   }
4259   FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4260 
4261   return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4262 }
4263 
4264 int os::stat(const char *path, struct stat *sbuf) {
4265   errno_t err;
4266   wchar_t* wide_path = wide_abs_unc_path(path, err);
4267 
4268   if (wide_path == NULL) {
4269     errno = err;
4270     return -1;
4271   }
4272 
4273   WIN32_FILE_ATTRIBUTE_DATA file_data;;
4274   BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4275   os::free(wide_path);
4276 
4277   if (!bret) {
4278     errno = ::GetLastError();
4279     return -1;
4280   }
4281 
4282   file_attribute_data_to_stat(sbuf, file_data);
4283   return 0;
4284 }
4285 
4286 static HANDLE create_read_only_file_handle(const char* file) {
4287   errno_t err;
4288   wchar_t* wide_path = wide_abs_unc_path(file, err);
4289 
4290   if (wide_path == NULL) {
4291     errno = err;
4292     return INVALID_HANDLE_VALUE;
4293   }
4294 
4295   HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4296                                 NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4297   os::free(wide_path);
4298 
4299   return handle;
4300 }
4301 
4302 bool os::same_files(const char* file1, const char* file2) {
4303 
4304   if (file1 == NULL && file2 == NULL) {
4305     return true;
4306   }
4307 
4308   if (file1 == NULL || file2 == NULL) {
4309     return false;
4310   }
4311 
4312   if (strcmp(file1, file2) == 0) {
4313     return true;
4314   }
4315 
4316   HANDLE handle1 = create_read_only_file_handle(file1);
4317   HANDLE handle2 = create_read_only_file_handle(file2);
4318   bool result = false;
4319 
4320   // if we could open both paths...
4321   if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4322     BY_HANDLE_FILE_INFORMATION fileInfo1;
4323     BY_HANDLE_FILE_INFORMATION fileInfo2;
4324     if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4325       ::GetFileInformationByHandle(handle2, &fileInfo2)) {
4326       // the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4327       if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4328         fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4329         fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4330         result = true;
4331       }
4332     }
4333   }
4334 
4335   //free the handles
4336   if (handle1 != INVALID_HANDLE_VALUE) {
4337     ::CloseHandle(handle1);
4338   }
4339 
4340   if (handle2 != INVALID_HANDLE_VALUE) {
4341     ::CloseHandle(handle2);
4342   }
4343 
4344   return result;
4345 }
4346 
4347 #define FT2INT64(ft) \
4348   ((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4349 
4350 
4351 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4352 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4353 // of a thread.
4354 //
4355 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4356 // the fast estimate available on the platform.
4357 
4358 // current_thread_cpu_time() is not optimized for Windows yet
4359 jlong os::current_thread_cpu_time() {
4360   // return user + sys since the cost is the same
4361   return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4362 }
4363 
4364 jlong os::thread_cpu_time(Thread* thread) {
4365   // consistent with what current_thread_cpu_time() returns.
4366   return os::thread_cpu_time(thread, true /* user+sys */);
4367 }
4368 
4369 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4370   return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4371 }
4372 
4373 jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4374   // This code is copy from clasic VM -> hpi::sysThreadCPUTime
4375   // If this function changes, os::is_thread_cpu_time_supported() should too
4376   FILETIME CreationTime;
4377   FILETIME ExitTime;
4378   FILETIME KernelTime;
4379   FILETIME UserTime;
4380 
4381   if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4382                       &ExitTime, &KernelTime, &UserTime) == 0) {
4383     return -1;
4384   } else if (user_sys_cpu_time) {
4385     return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4386   } else {
4387     return FT2INT64(UserTime) * 100;
4388   }
4389 }
4390 
4391 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4392   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4393   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4394   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4395   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4396 }
4397 
4398 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4399   info_ptr->max_value = ALL_64_BITS;        // the max value -- all 64 bits
4400   info_ptr->may_skip_backward = false;      // GetThreadTimes returns absolute time
4401   info_ptr->may_skip_forward = false;       // GetThreadTimes returns absolute time
4402   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;   // user+system time is returned
4403 }
4404 
4405 bool os::is_thread_cpu_time_supported() {
4406   // see os::thread_cpu_time
4407   FILETIME CreationTime;
4408   FILETIME ExitTime;
4409   FILETIME KernelTime;
4410   FILETIME UserTime;
4411 
4412   if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4413                       &KernelTime, &UserTime) == 0) {
4414     return false;
4415   } else {
4416     return true;
4417   }
4418 }
4419 
4420 // Windows does't provide a loadavg primitive so this is stubbed out for now.
4421 // It does have primitives (PDH API) to get CPU usage and run queue length.
4422 // "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4423 // If we wanted to implement loadavg on Windows, we have a few options:
4424 //
4425 // a) Query CPU usage and run queue length and "fake" an answer by
4426 //    returning the CPU usage if it's under 100%, and the run queue
4427 //    length otherwise.  It turns out that querying is pretty slow
4428 //    on Windows, on the order of 200 microseconds on a fast machine.
4429 //    Note that on the Windows the CPU usage value is the % usage
4430 //    since the last time the API was called (and the first call
4431 //    returns 100%), so we'd have to deal with that as well.
4432 //
4433 // b) Sample the "fake" answer using a sampling thread and store
4434 //    the answer in a global variable.  The call to loadavg would
4435 //    just return the value of the global, avoiding the slow query.
4436 //
4437 // c) Sample a better answer using exponential decay to smooth the
4438 //    value.  This is basically the algorithm used by UNIX kernels.
4439 //
4440 // Note that sampling thread starvation could affect both (b) and (c).
4441 int os::loadavg(double loadavg[], int nelem) {
4442   return -1;
4443 }
4444 
4445 
4446 // DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4447 bool os::dont_yield() {
4448   return DontYieldALot;
4449 }
4450 
4451 int os::open(const char *path, int oflag, int mode) {
4452   errno_t err;
4453   wchar_t* wide_path = wide_abs_unc_path(path, err);
4454 
4455   if (wide_path == NULL) {
4456     errno = err;
4457     return -1;
4458   }
4459   int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4460   os::free(wide_path);
4461 
4462   if (fd == -1) {
4463     errno = ::GetLastError();
4464   }
4465 
4466   return fd;
4467 }
4468 
4469 FILE* os::open(int fd, const char* mode) {
4470   return ::_fdopen(fd, mode);
4471 }
4472 
4473 // Is a (classpath) directory empty?
4474 bool os::dir_is_empty(const char* path) {
4475   errno_t err;
4476   wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4477 
4478   if (wide_path == NULL) {
4479     errno = err;
4480     return false;
4481   }
4482 
4483   // Make sure we end with "\\*"
4484   if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4485     wcscat(wide_path, L"*");
4486   } else {
4487     wcscat(wide_path, L"\\*");
4488   }
4489 
4490   WIN32_FIND_DATAW fd;
4491   HANDLE f = ::FindFirstFileW(wide_path, &fd);
4492   os::free(wide_path);
4493   bool is_empty = true;
4494 
4495   if (f != INVALID_HANDLE_VALUE) {
4496     while (is_empty && ::FindNextFileW(f, &fd)) {
4497       // An empty directory contains only the current directory file
4498       // and the previous directory file.
4499       if ((wcscmp(fd.cFileName, L".") != 0) &&
4500           (wcscmp(fd.cFileName, L"..") != 0)) {
4501         is_empty = false;
4502       }
4503     }
4504     FindClose(f);
4505   } else {
4506     errno = ::GetLastError();
4507   }
4508 
4509   return is_empty;
4510 }
4511 
4512 // create binary file, rewriting existing file if required
4513 int os::create_binary_file(const char* path, bool rewrite_existing) {
4514   int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4515   if (!rewrite_existing) {
4516     oflags |= _O_EXCL;
4517   }
4518   return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4519 }
4520 
4521 // return current position of file pointer
4522 jlong os::current_file_offset(int fd) {
4523   return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4524 }
4525 
4526 // move file pointer to the specified offset
4527 jlong os::seek_to_file_offset(int fd, jlong offset) {
4528   return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4529 }
4530 
4531 
4532 jlong os::lseek(int fd, jlong offset, int whence) {
4533   return (jlong) ::_lseeki64(fd, offset, whence);
4534 }
4535 
4536 ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4537   OVERLAPPED ov;
4538   DWORD nread;
4539   BOOL result;
4540 
4541   ZeroMemory(&ov, sizeof(ov));
4542   ov.Offset = (DWORD)offset;
4543   ov.OffsetHigh = (DWORD)(offset >> 32);
4544 
4545   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4546 
4547   result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4548 
4549   return result ? nread : 0;
4550 }
4551 
4552 
4553 // This method is a slightly reworked copy of JDK's sysNativePath
4554 // from src/windows/hpi/src/path_md.c
4555 
4556 // Convert a pathname to native format.  On win32, this involves forcing all
4557 // separators to be '\\' rather than '/' (both are legal inputs, but Win95
4558 // sometimes rejects '/') and removing redundant separators.  The input path is
4559 // assumed to have been converted into the character encoding used by the local
4560 // system.  Because this might be a double-byte encoding, care is taken to
4561 // treat double-byte lead characters correctly.
4562 //
4563 // This procedure modifies the given path in place, as the result is never
4564 // longer than the original.  There is no error return; this operation always
4565 // succeeds.
4566 char * os::native_path(char *path) {
4567   char *src = path, *dst = path, *end = path;
4568   char *colon = NULL;  // If a drive specifier is found, this will
4569                        // point to the colon following the drive letter
4570 
4571   // Assumption: '/', '\\', ':', and drive letters are never lead bytes
4572   assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4573           && (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4574 
4575   // Check for leading separators
4576 #define isfilesep(c) ((c) == '/' || (c) == '\\')
4577   while (isfilesep(*src)) {
4578     src++;
4579   }
4580 
4581   if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4582     // Remove leading separators if followed by drive specifier.  This
4583     // hack is necessary to support file URLs containing drive
4584     // specifiers (e.g., "file://c:/path").  As a side effect,
4585     // "/c:/path" can be used as an alternative to "c:/path".
4586     *dst++ = *src++;
4587     colon = dst;
4588     *dst++ = ':';
4589     src++;
4590   } else {
4591     src = path;
4592     if (isfilesep(src[0]) && isfilesep(src[1])) {
4593       // UNC pathname: Retain first separator; leave src pointed at
4594       // second separator so that further separators will be collapsed
4595       // into the second separator.  The result will be a pathname
4596       // beginning with "\\\\" followed (most likely) by a host name.
4597       src = dst = path + 1;
4598       path[0] = '\\';     // Force first separator to '\\'
4599     }
4600   }
4601 
4602   end = dst;
4603 
4604   // Remove redundant separators from remainder of path, forcing all
4605   // separators to be '\\' rather than '/'. Also, single byte space
4606   // characters are removed from the end of the path because those
4607   // are not legal ending characters on this operating system.
4608   //
4609   while (*src != '\0') {
4610     if (isfilesep(*src)) {
4611       *dst++ = '\\'; src++;
4612       while (isfilesep(*src)) src++;
4613       if (*src == '\0') {
4614         // Check for trailing separator
4615         end = dst;
4616         if (colon == dst - 2) break;  // "z:\\"
4617         if (dst == path + 1) break;   // "\\"
4618         if (dst == path + 2 && isfilesep(path[0])) {
4619           // "\\\\" is not collapsed to "\\" because "\\\\" marks the
4620           // beginning of a UNC pathname.  Even though it is not, by
4621           // itself, a valid UNC pathname, we leave it as is in order
4622           // to be consistent with the path canonicalizer as well
4623           // as the win32 APIs, which treat this case as an invalid
4624           // UNC pathname rather than as an alias for the root
4625           // directory of the current drive.
4626           break;
4627         }
4628         end = --dst;  // Path does not denote a root directory, so
4629                       // remove trailing separator
4630         break;
4631       }
4632       end = dst;
4633     } else {
4634       if (::IsDBCSLeadByte(*src)) {  // Copy a double-byte character
4635         *dst++ = *src++;
4636         if (*src) *dst++ = *src++;
4637         end = dst;
4638       } else {  // Copy a single-byte character
4639         char c = *src++;
4640         *dst++ = c;
4641         // Space is not a legal ending character
4642         if (c != ' ') end = dst;
4643       }
4644     }
4645   }
4646 
4647   *end = '\0';
4648 
4649   // For "z:", add "." to work around a bug in the C runtime library
4650   if (colon == dst - 1) {
4651     path[2] = '.';
4652     path[3] = '\0';
4653   }
4654 
4655   return path;
4656 }
4657 
4658 // This code is a copy of JDK's sysSetLength
4659 // from src/windows/hpi/src/sys_api_md.c
4660 
4661 int os::ftruncate(int fd, jlong length) {
4662   HANDLE h = (HANDLE)::_get_osfhandle(fd);
4663   long high = (long)(length >> 32);
4664   DWORD ret;
4665 
4666   if (h == (HANDLE)(-1)) {
4667     return -1;
4668   }
4669 
4670   ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4671   if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4672     return -1;
4673   }
4674 
4675   if (::SetEndOfFile(h) == FALSE) {
4676     return -1;
4677   }
4678 
4679   return 0;
4680 }
4681 
4682 int os::get_fileno(FILE* fp) {
4683   return _fileno(fp);
4684 }
4685 
4686 // This code is a copy of JDK's sysSync
4687 // from src/windows/hpi/src/sys_api_md.c
4688 // except for the legacy workaround for a bug in Win 98
4689 
4690 int os::fsync(int fd) {
4691   HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4692 
4693   if ((!::FlushFileBuffers(handle)) &&
4694       (GetLastError() != ERROR_ACCESS_DENIED)) {
4695     // from winerror.h
4696     return -1;
4697   }
4698   return 0;
4699 }
4700 
4701 static int nonSeekAvailable(int, long *);
4702 static int stdinAvailable(int, long *);
4703 
4704 // This code is a copy of JDK's sysAvailable
4705 // from src/windows/hpi/src/sys_api_md.c
4706 
4707 int os::available(int fd, jlong *bytes) {
4708   jlong cur, end;
4709   struct _stati64 stbuf64;
4710 
4711   if (::_fstati64(fd, &stbuf64) >= 0) {
4712     int mode = stbuf64.st_mode;
4713     if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4714       int ret;
4715       long lpbytes;
4716       if (fd == 0) {
4717         ret = stdinAvailable(fd, &lpbytes);
4718       } else {
4719         ret = nonSeekAvailable(fd, &lpbytes);
4720       }
4721       (*bytes) = (jlong)(lpbytes);
4722       return ret;
4723     }
4724     if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4725       return FALSE;
4726     } else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4727       return FALSE;
4728     } else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4729       return FALSE;
4730     }
4731     *bytes = end - cur;
4732     return TRUE;
4733   } else {
4734     return FALSE;
4735   }
4736 }
4737 
4738 void os::flockfile(FILE* fp) {
4739   _lock_file(fp);
4740 }
4741 
4742 void os::funlockfile(FILE* fp) {
4743   _unlock_file(fp);
4744 }
4745 
4746 // This code is a copy of JDK's nonSeekAvailable
4747 // from src/windows/hpi/src/sys_api_md.c
4748 
4749 static int nonSeekAvailable(int fd, long *pbytes) {
4750   // This is used for available on non-seekable devices
4751   // (like both named and anonymous pipes, such as pipes
4752   //  connected to an exec'd process).
4753   // Standard Input is a special case.
4754   HANDLE han;
4755 
4756   if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4757     return FALSE;
4758   }
4759 
4760   if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4761     // PeekNamedPipe fails when at EOF.  In that case we
4762     // simply make *pbytes = 0 which is consistent with the
4763     // behavior we get on Solaris when an fd is at EOF.
4764     // The only alternative is to raise an Exception,
4765     // which isn't really warranted.
4766     //
4767     if (::GetLastError() != ERROR_BROKEN_PIPE) {
4768       return FALSE;
4769     }
4770     *pbytes = 0;
4771   }
4772   return TRUE;
4773 }
4774 
4775 #define MAX_INPUT_EVENTS 2000
4776 
4777 // This code is a copy of JDK's stdinAvailable
4778 // from src/windows/hpi/src/sys_api_md.c
4779 
4780 static int stdinAvailable(int fd, long *pbytes) {
4781   HANDLE han;
4782   DWORD numEventsRead = 0;  // Number of events read from buffer
4783   DWORD numEvents = 0;      // Number of events in buffer
4784   DWORD i = 0;              // Loop index
4785   DWORD curLength = 0;      // Position marker
4786   DWORD actualLength = 0;   // Number of bytes readable
4787   BOOL error = FALSE;       // Error holder
4788   INPUT_RECORD *lpBuffer;   // Pointer to records of input events
4789 
4790   if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
4791     return FALSE;
4792   }
4793 
4794   // Construct an array of input records in the console buffer
4795   error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
4796   if (error == 0) {
4797     return nonSeekAvailable(fd, pbytes);
4798   }
4799 
4800   // lpBuffer must fit into 64K or else PeekConsoleInput fails
4801   if (numEvents > MAX_INPUT_EVENTS) {
4802     numEvents = MAX_INPUT_EVENTS;
4803   }
4804 
4805   lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
4806   if (lpBuffer == NULL) {
4807     return FALSE;
4808   }
4809 
4810   error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
4811   if (error == 0) {
4812     os::free(lpBuffer);
4813     return FALSE;
4814   }
4815 
4816   // Examine input records for the number of bytes available
4817   for (i=0; i<numEvents; i++) {
4818     if (lpBuffer[i].EventType == KEY_EVENT) {
4819 
4820       KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
4821                                       &(lpBuffer[i].Event);
4822       if (keyRecord->bKeyDown == TRUE) {
4823         CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
4824         curLength++;
4825         if (*keyPressed == '\r') {
4826           actualLength = curLength;
4827         }
4828       }
4829     }
4830   }
4831 
4832   if (lpBuffer != NULL) {
4833     os::free(lpBuffer);
4834   }
4835 
4836   *pbytes = (long) actualLength;
4837   return TRUE;
4838 }
4839 
4840 // Map a block of memory.
4841 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4842                         char *addr, size_t bytes, bool read_only,
4843                         bool allow_exec) {
4844   HANDLE hFile;
4845   char* base;
4846 
4847   hFile = CreateFile(file_name, GENERIC_READ, FILE_SHARE_READ, NULL,
4848                      OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4849   if (hFile == NULL) {
4850     log_info(os)("CreateFile() failed: GetLastError->%ld.", GetLastError());
4851     return NULL;
4852   }
4853 
4854   if (allow_exec) {
4855     // CreateFileMapping/MapViewOfFileEx can't map executable memory
4856     // unless it comes from a PE image (which the shared archive is not.)
4857     // Even VirtualProtect refuses to give execute access to mapped memory
4858     // that was not previously executable.
4859     //
4860     // Instead, stick the executable region in anonymous memory.  Yuck.
4861     // Penalty is that ~4 pages will not be shareable - in the future
4862     // we might consider DLLizing the shared archive with a proper PE
4863     // header so that mapping executable + sharing is possible.
4864 
4865     base = (char*) VirtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
4866                                 PAGE_READWRITE);
4867     if (base == NULL) {
4868       log_info(os)("VirtualAlloc() failed: GetLastError->%ld.", GetLastError());
4869       CloseHandle(hFile);
4870       return NULL;
4871     }
4872 
4873     // Record virtual memory allocation
4874     MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
4875 
4876     DWORD bytes_read;
4877     OVERLAPPED overlapped;
4878     overlapped.Offset = (DWORD)file_offset;
4879     overlapped.OffsetHigh = 0;
4880     overlapped.hEvent = NULL;
4881     // ReadFile guarantees that if the return value is true, the requested
4882     // number of bytes were read before returning.
4883     bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
4884     if (!res) {
4885       log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
4886       release_memory(base, bytes);
4887       CloseHandle(hFile);
4888       return NULL;
4889     }
4890   } else {
4891     HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
4892                                     NULL /* file_name */);
4893     if (hMap == NULL) {
4894       log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
4895       CloseHandle(hFile);
4896       return NULL;
4897     }
4898 
4899     DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
4900     base = (char*)MapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
4901                                   (DWORD)bytes, addr);
4902     if (base == NULL) {
4903       log_info(os)("MapViewOfFileEx() failed: GetLastError->%ld.", GetLastError());
4904       CloseHandle(hMap);
4905       CloseHandle(hFile);
4906       return NULL;
4907     }
4908 
4909     if (CloseHandle(hMap) == 0) {
4910       log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
4911       CloseHandle(hFile);
4912       return base;
4913     }
4914   }
4915 
4916   if (allow_exec) {
4917     DWORD old_protect;
4918     DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
4919     bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
4920 
4921     if (!res) {
4922       log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
4923       // Don't consider this a hard error, on IA32 even if the
4924       // VirtualProtect fails, we should still be able to execute
4925       CloseHandle(hFile);
4926       return base;
4927     }
4928   }
4929 
4930   if (CloseHandle(hFile) == 0) {
4931     log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
4932     return base;
4933   }
4934 
4935   return base;
4936 }
4937 
4938 
4939 // Remap a block of memory.
4940 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4941                           char *addr, size_t bytes, bool read_only,
4942                           bool allow_exec) {
4943   // This OS does not allow existing memory maps to be remapped so we
4944   // would have to unmap the memory before we remap it.
4945 
4946   // Because there is a small window between unmapping memory and mapping
4947   // it in again with different protections, CDS archives are mapped RW
4948   // on windows, so this function isn't called.
4949   ShouldNotReachHere();
4950   return NULL;
4951 }
4952 
4953 
4954 // Unmap a block of memory.
4955 // Returns true=success, otherwise false.
4956 
4957 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4958   MEMORY_BASIC_INFORMATION mem_info;
4959   if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
4960     log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
4961     return false;
4962   }
4963 
4964   // Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
4965   // Instead, executable region was allocated using VirtualAlloc(). See
4966   // pd_map_memory() above.
4967   //
4968   // The following flags should match the 'exec_access' flages used for
4969   // VirtualProtect() in pd_map_memory().
4970   if (mem_info.Protect == PAGE_EXECUTE_READ ||
4971       mem_info.Protect == PAGE_EXECUTE_READWRITE) {
4972     return pd_release_memory(addr, bytes);
4973   }
4974 
4975   BOOL result = UnmapViewOfFile(addr);
4976   if (result == 0) {
4977     log_info(os)("UnmapViewOfFile() failed: GetLastError->%ld.", GetLastError());
4978     return false;
4979   }
4980   return true;
4981 }
4982 
4983 void os::pause() {
4984   char filename[MAX_PATH];
4985   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4986     jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
4987   } else {
4988     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4989   }
4990 
4991   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4992   if (fd != -1) {
4993     struct stat buf;
4994     ::close(fd);
4995     while (::stat(filename, &buf) == 0) {
4996       Sleep(100);
4997     }
4998   } else {
4999     jio_fprintf(stderr,
5000                 "Could not open pause file '%s', continuing immediately.\n", filename);
5001   }
5002 }
5003 
5004 Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5005 os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5006 volatile intptr_t os::ThreadCrashProtection::_crash_mux = 0;
5007 
5008 os::ThreadCrashProtection::ThreadCrashProtection() {
5009 }
5010 
5011 // See the caveats for this class in os_windows.hpp
5012 // Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5013 // into this method and returns false. If no OS EXCEPTION was raised, returns
5014 // true.
5015 // The callback is supposed to provide the method that should be protected.
5016 //
5017 bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5018 
5019   Thread::muxAcquire(&_crash_mux, "CrashProtection");
5020 
5021   _protected_thread = Thread::current_or_null();
5022   assert(_protected_thread != NULL, "Cannot crash protect a NULL thread");
5023 
5024   bool success = true;
5025   __try {
5026     _crash_protection = this;
5027     cb.call();
5028   } __except(EXCEPTION_EXECUTE_HANDLER) {
5029     // only for protection, nothing to do
5030     success = false;
5031   }
5032   _crash_protection = NULL;
5033   _protected_thread = NULL;
5034   Thread::muxRelease(&_crash_mux);
5035   return success;
5036 }
5037 
5038 
5039 class HighResolutionInterval : public CHeapObj<mtThread> {
5040   // The default timer resolution seems to be 10 milliseconds.
5041   // (Where is this written down?)
5042   // If someone wants to sleep for only a fraction of the default,
5043   // then we set the timer resolution down to 1 millisecond for
5044   // the duration of their interval.
5045   // We carefully set the resolution back, since otherwise we
5046   // seem to incur an overhead (3%?) that we don't need.
5047   // CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5048   // Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5049   // Alternatively, we could compute the relative error (503/500 = .6%) and only use
5050   // timeBeginPeriod() if the relative error exceeded some threshold.
5051   // timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5052   // to decreased efficiency related to increased timer "tick" rates.  We want to minimize
5053   // (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5054   // resolution timers running.
5055  private:
5056   jlong resolution;
5057  public:
5058   HighResolutionInterval(jlong ms) {
5059     resolution = ms % 10L;
5060     if (resolution != 0) {
5061       MMRESULT result = timeBeginPeriod(1L);
5062     }
5063   }
5064   ~HighResolutionInterval() {
5065     if (resolution != 0) {
5066       MMRESULT result = timeEndPeriod(1L);
5067     }
5068     resolution = 0L;
5069   }
5070 };
5071 
5072 // An Event wraps a win32 "CreateEvent" kernel handle.
5073 //
5074 // We have a number of choices regarding "CreateEvent" win32 handle leakage:
5075 //
5076 // 1:  When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5077 //     field, and call CloseHandle() on the win32 event handle.  Unpark() would
5078 //     need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5079 //     In addition, an unpark() operation might fetch the handle field, but the
5080 //     event could recycle between the fetch and the SetEvent() operation.
5081 //     SetEvent() would either fail because the handle was invalid, or inadvertently work,
5082 //     as the win32 handle value had been recycled.  In an ideal world calling SetEvent()
5083 //     on an stale but recycled handle would be harmless, but in practice this might
5084 //     confuse other non-Sun code, so it's not a viable approach.
5085 //
5086 // 2:  Once a win32 event handle is associated with an Event, it remains associated
5087 //     with the Event.  The event handle is never closed.  This could be construed
5088 //     as handle leakage, but only up to the maximum # of threads that have been extant
5089 //     at any one time.  This shouldn't be an issue, as windows platforms typically
5090 //     permit a process to have hundreds of thousands of open handles.
5091 //
5092 // 3:  Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5093 //     and release unused handles.
5094 //
5095 // 4:  Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5096 //     It's not clear, however, that we wouldn't be trading one type of leak for another.
5097 //
5098 // 5.  Use an RCU-like mechanism (Read-Copy Update).
5099 //     Or perhaps something similar to Maged Michael's "Hazard pointers".
5100 //
5101 // We use (2).
5102 //
5103 // TODO-FIXME:
5104 // 1.  Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5105 // 2.  Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5106 //     to recover from (or at least detect) the dreaded Windows 841176 bug.
5107 // 3.  Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5108 //     into a single win32 CreateEvent() handle.
5109 //
5110 // Assumption:
5111 //    Only one parker can exist on an event, which is why we allocate
5112 //    them per-thread. Multiple unparkers can coexist.
5113 //
5114 // _Event transitions in park()
5115 //   -1 => -1 : illegal
5116 //    1 =>  0 : pass - return immediately
5117 //    0 => -1 : block; then set _Event to 0 before returning
5118 //
5119 // _Event transitions in unpark()
5120 //    0 => 1 : just return
5121 //    1 => 1 : just return
5122 //   -1 => either 0 or 1; must signal target thread
5123 //         That is, we can safely transition _Event from -1 to either
5124 //         0 or 1.
5125 //
5126 // _Event serves as a restricted-range semaphore.
5127 //   -1 : thread is blocked, i.e. there is a waiter
5128 //    0 : neutral: thread is running or ready,
5129 //        could have been signaled after a wait started
5130 //    1 : signaled - thread is running or ready
5131 //
5132 // Another possible encoding of _Event would be with
5133 // explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5134 //
5135 
5136 int os::PlatformEvent::park(jlong Millis) {
5137   // Transitions for _Event:
5138   //   -1 => -1 : illegal
5139   //    1 =>  0 : pass - return immediately
5140   //    0 => -1 : block; then set _Event to 0 before returning
5141 
5142   guarantee(_ParkHandle != NULL , "Invariant");
5143   guarantee(Millis > 0          , "Invariant");
5144 
5145   // CONSIDER: defer assigning a CreateEvent() handle to the Event until
5146   // the initial park() operation.
5147   // Consider: use atomic decrement instead of CAS-loop
5148 
5149   int v;
5150   for (;;) {
5151     v = _Event;
5152     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5153   }
5154   guarantee((v == 0) || (v == 1), "invariant");
5155   if (v != 0) return OS_OK;
5156 
5157   // Do this the hard way by blocking ...
5158   // TODO: consider a brief spin here, gated on the success of recent
5159   // spin attempts by this thread.
5160   //
5161   // We decompose long timeouts into series of shorter timed waits.
5162   // Evidently large timo values passed in WaitForSingleObject() are problematic on some
5163   // versions of Windows.  See EventWait() for details.  This may be superstition.  Or not.
5164   // We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5165   // with os::javaTimeNanos().  Furthermore, we assume that spurious returns from
5166   // ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5167   // to happen early in the wait interval.  Specifically, after a spurious wakeup (rv ==
5168   // WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5169   // for the already waited time.  This policy does not admit any new outcomes.
5170   // In the future, however, we might want to track the accumulated wait time and
5171   // adjust Millis accordingly if we encounter a spurious wakeup.
5172 
5173   const int MAXTIMEOUT = 0x10000000;
5174   DWORD rv = WAIT_TIMEOUT;
5175   while (_Event < 0 && Millis > 0) {
5176     DWORD prd = Millis;     // set prd = MAX (Millis, MAXTIMEOUT)
5177     if (Millis > MAXTIMEOUT) {
5178       prd = MAXTIMEOUT;
5179     }
5180     HighResolutionInterval *phri = NULL;
5181     if (!ForceTimeHighResolution) {
5182       phri = new HighResolutionInterval(prd);
5183     }
5184     rv = ::WaitForSingleObject(_ParkHandle, prd);
5185     assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5186     if (rv == WAIT_TIMEOUT) {
5187       Millis -= prd;
5188     }
5189     delete phri; // if it is NULL, harmless
5190   }
5191   v = _Event;
5192   _Event = 0;
5193   // see comment at end of os::PlatformEvent::park() below:
5194   OrderAccess::fence();
5195   // If we encounter a nearly simultanous timeout expiry and unpark()
5196   // we return OS_OK indicating we awoke via unpark().
5197   // Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5198   return (v >= 0) ? OS_OK : OS_TIMEOUT;
5199 }
5200 
5201 void os::PlatformEvent::park() {
5202   // Transitions for _Event:
5203   //   -1 => -1 : illegal
5204   //    1 =>  0 : pass - return immediately
5205   //    0 => -1 : block; then set _Event to 0 before returning
5206 
5207   guarantee(_ParkHandle != NULL, "Invariant");
5208   // Invariant: Only the thread associated with the Event/PlatformEvent
5209   // may call park().
5210   // Consider: use atomic decrement instead of CAS-loop
5211   int v;
5212   for (;;) {
5213     v = _Event;
5214     if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5215   }
5216   guarantee((v == 0) || (v == 1), "invariant");
5217   if (v != 0) return;
5218 
5219   // Do this the hard way by blocking ...
5220   // TODO: consider a brief spin here, gated on the success of recent
5221   // spin attempts by this thread.
5222   while (_Event < 0) {
5223     DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5224     assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5225   }
5226 
5227   // Usually we'll find _Event == 0 at this point, but as
5228   // an optional optimization we clear it, just in case can
5229   // multiple unpark() operations drove _Event up to 1.
5230   _Event = 0;
5231   OrderAccess::fence();
5232   guarantee(_Event >= 0, "invariant");
5233 }
5234 
5235 void os::PlatformEvent::unpark() {
5236   guarantee(_ParkHandle != NULL, "Invariant");
5237 
5238   // Transitions for _Event:
5239   //    0 => 1 : just return
5240   //    1 => 1 : just return
5241   //   -1 => either 0 or 1; must signal target thread
5242   //         That is, we can safely transition _Event from -1 to either
5243   //         0 or 1.
5244   // See also: "Semaphores in Plan 9" by Mullender & Cox
5245   //
5246   // Note: Forcing a transition from "-1" to "1" on an unpark() means
5247   // that it will take two back-to-back park() calls for the owning
5248   // thread to block. This has the benefit of forcing a spurious return
5249   // from the first park() call after an unpark() call which will help
5250   // shake out uses of park() and unpark() without condition variables.
5251 
5252   if (Atomic::xchg(&_Event, 1) >= 0) return;
5253 
5254   ::SetEvent(_ParkHandle);
5255 }
5256 
5257 
5258 // JSR166
5259 // -------------------------------------------------------
5260 
5261 // The Windows implementation of Park is very straightforward: Basic
5262 // operations on Win32 Events turn out to have the right semantics to
5263 // use them directly. We opportunistically resuse the event inherited
5264 // from Monitor.
5265 
5266 void Parker::park(bool isAbsolute, jlong time) {
5267   guarantee(_ParkEvent != NULL, "invariant");
5268   // First, demultiplex/decode time arguments
5269   if (time < 0) { // don't wait
5270     return;
5271   } else if (time == 0 && !isAbsolute) {
5272     time = INFINITE;
5273   } else if (isAbsolute) {
5274     time -= os::javaTimeMillis(); // convert to relative time
5275     if (time <= 0) {  // already elapsed
5276       return;
5277     }
5278   } else { // relative
5279     time /= 1000000;  // Must coarsen from nanos to millis
5280     if (time == 0) {  // Wait for the minimal time unit if zero
5281       time = 1;
5282     }
5283   }
5284 
5285   JavaThread* thread = JavaThread::current();
5286 
5287   // Don't wait if interrupted or already triggered
5288   if (thread->is_interrupted(false) ||
5289       WaitForSingleObject(_ParkEvent, 0) == WAIT_OBJECT_0) {
5290     ResetEvent(_ParkEvent);
5291     return;
5292   } else {
5293     ThreadBlockInVM tbivm(thread);
5294     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5295     thread->set_suspend_equivalent();
5296 
5297     WaitForSingleObject(_ParkEvent, time);
5298     ResetEvent(_ParkEvent);
5299 
5300     // If externally suspended while waiting, re-suspend
5301     if (thread->handle_special_suspend_equivalent_condition()) {
5302       thread->java_suspend_self();
5303     }
5304   }
5305 }
5306 
5307 void Parker::unpark() {
5308   guarantee(_ParkEvent != NULL, "invariant");
5309   SetEvent(_ParkEvent);
5310 }
5311 
5312 // Platform Monitor implementation
5313 
5314 // Must already be locked
5315 int os::PlatformMonitor::wait(jlong millis) {
5316   assert(millis >= 0, "negative timeout");
5317   int ret = OS_TIMEOUT;
5318   int status = SleepConditionVariableCS(&_cond, &_mutex,
5319                                         millis == 0 ? INFINITE : millis);
5320   if (status != 0) {
5321     ret = OS_OK;
5322   }
5323   #ifndef PRODUCT
5324   else {
5325     DWORD err = GetLastError();
5326     assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5327   }
5328   #endif
5329   return ret;
5330 }
5331 
5332 // Run the specified command in a separate process. Return its exit value,
5333 // or -1 on failure (e.g. can't create a new process).
5334 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
5335   STARTUPINFO si;
5336   PROCESS_INFORMATION pi;
5337   DWORD exit_code;
5338 
5339   char * cmd_string;
5340   const char * cmd_prefix = "cmd /C ";
5341   size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5342   cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5343   if (cmd_string == NULL) {
5344     return -1;
5345   }
5346   cmd_string[0] = '\0';
5347   strcat(cmd_string, cmd_prefix);
5348   strcat(cmd_string, cmd);
5349 
5350   // now replace all '\n' with '&'
5351   char * substring = cmd_string;
5352   while ((substring = strchr(substring, '\n')) != NULL) {
5353     substring[0] = '&';
5354     substring++;
5355   }
5356   memset(&si, 0, sizeof(si));
5357   si.cb = sizeof(si);
5358   memset(&pi, 0, sizeof(pi));
5359   BOOL rslt = CreateProcess(NULL,   // executable name - use command line
5360                             cmd_string,    // command line
5361                             NULL,   // process security attribute
5362                             NULL,   // thread security attribute
5363                             TRUE,   // inherits system handles
5364                             0,      // no creation flags
5365                             NULL,   // use parent's environment block
5366                             NULL,   // use parent's starting directory
5367                             &si,    // (in) startup information
5368                             &pi);   // (out) process information
5369 
5370   if (rslt) {
5371     // Wait until child process exits.
5372     WaitForSingleObject(pi.hProcess, INFINITE);
5373 
5374     GetExitCodeProcess(pi.hProcess, &exit_code);
5375 
5376     // Close process and thread handles.
5377     CloseHandle(pi.hProcess);
5378     CloseHandle(pi.hThread);
5379   } else {
5380     exit_code = -1;
5381   }
5382 
5383   FREE_C_HEAP_ARRAY(char, cmd_string);
5384   return (int)exit_code;
5385 }
5386 
5387 bool os::find(address addr, outputStream* st) {
5388   int offset = -1;
5389   bool result = false;
5390   char buf[256];
5391   if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5392     st->print(PTR_FORMAT " ", addr);
5393     if (strlen(buf) < sizeof(buf) - 1) {
5394       char* p = strrchr(buf, '\\');
5395       if (p) {
5396         st->print("%s", p + 1);
5397       } else {
5398         st->print("%s", buf);
5399       }
5400     } else {
5401         // The library name is probably truncated. Let's omit the library name.
5402         // See also JDK-8147512.
5403     }
5404     if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5405       st->print("::%s + 0x%x", buf, offset);
5406     }
5407     st->cr();
5408     result = true;
5409   }
5410   return result;
5411 }
5412 
5413 static jint initSock() {
5414   WSADATA wsadata;
5415 
5416   if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5417     jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5418                 ::GetLastError());
5419     return JNI_ERR;
5420   }
5421   return JNI_OK;
5422 }
5423 
5424 struct hostent* os::get_host_by_name(char* name) {
5425   return (struct hostent*)gethostbyname(name);
5426 }
5427 
5428 int os::socket_close(int fd) {
5429   return ::closesocket(fd);
5430 }
5431 
5432 int os::socket(int domain, int type, int protocol) {
5433   return ::socket(domain, type, protocol);
5434 }
5435 
5436 int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5437   return ::connect(fd, him, len);
5438 }
5439 
5440 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5441   return ::recv(fd, buf, (int)nBytes, flags);
5442 }
5443 
5444 int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5445   return ::send(fd, buf, (int)nBytes, flags);
5446 }
5447 
5448 int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5449   return ::send(fd, buf, (int)nBytes, flags);
5450 }
5451 
5452 // WINDOWS CONTEXT Flags for THREAD_SAMPLING
5453 #if defined(IA32)
5454   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5455 #elif defined (AMD64)
5456   #define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5457 #endif
5458 
5459 // returns true if thread could be suspended,
5460 // false otherwise
5461 static bool do_suspend(HANDLE* h) {
5462   if (h != NULL) {
5463     if (SuspendThread(*h) != ~0) {
5464       return true;
5465     }
5466   }
5467   return false;
5468 }
5469 
5470 // resume the thread
5471 // calling resume on an active thread is a no-op
5472 static void do_resume(HANDLE* h) {
5473   if (h != NULL) {
5474     ResumeThread(*h);
5475   }
5476 }
5477 
5478 // retrieve a suspend/resume context capable handle
5479 // from the tid. Caller validates handle return value.
5480 void get_thread_handle_for_extended_context(HANDLE* h,
5481                                             OSThread::thread_id_t tid) {
5482   if (h != NULL) {
5483     *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5484   }
5485 }
5486 
5487 // Thread sampling implementation
5488 //
5489 void os::SuspendedThreadTask::internal_do_task() {
5490   CONTEXT    ctxt;
5491   HANDLE     h = NULL;
5492 
5493   // get context capable handle for thread
5494   get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5495 
5496   // sanity
5497   if (h == NULL || h == INVALID_HANDLE_VALUE) {
5498     return;
5499   }
5500 
5501   // suspend the thread
5502   if (do_suspend(&h)) {
5503     ctxt.ContextFlags = sampling_context_flags;
5504     // get thread context
5505     GetThreadContext(h, &ctxt);
5506     SuspendedThreadTaskContext context(_thread, &ctxt);
5507     // pass context to Thread Sampling impl
5508     do_task(context);
5509     // resume thread
5510     do_resume(&h);
5511   }
5512 
5513   // close handle
5514   CloseHandle(h);
5515 }
5516 
5517 bool os::start_debugging(char *buf, int buflen) {
5518   int len = (int)strlen(buf);
5519   char *p = &buf[len];
5520 
5521   jio_snprintf(p, buflen-len,
5522              "\n\n"
5523              "Do you want to debug the problem?\n\n"
5524              "To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5525              "Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5526              "Otherwise, select 'No' to abort...",
5527              os::current_process_id(), os::current_thread_id());
5528 
5529   bool yes = os::message_box("Unexpected Error", buf);
5530 
5531   if (yes) {
5532     // os::breakpoint() calls DebugBreak(), which causes a breakpoint
5533     // exception. If VM is running inside a debugger, the debugger will
5534     // catch the exception. Otherwise, the breakpoint exception will reach
5535     // the default windows exception handler, which can spawn a debugger and
5536     // automatically attach to the dying VM.
5537     os::breakpoint();
5538     yes = false;
5539   }
5540   return yes;
5541 }
5542 
5543 void* os::get_default_process_handle() {
5544   return (void*)GetModuleHandle(NULL);
5545 }
5546 
5547 // Builds a platform dependent Agent_OnLoad_<lib_name> function name
5548 // which is used to find statically linked in agents.
5549 // Additionally for windows, takes into account __stdcall names.
5550 // Parameters:
5551 //            sym_name: Symbol in library we are looking for
5552 //            lib_name: Name of library to look in, NULL for shared libs.
5553 //            is_absolute_path == true if lib_name is absolute path to agent
5554 //                                     such as "C:/a/b/L.dll"
5555 //            == false if only the base name of the library is passed in
5556 //               such as "L"
5557 char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5558                                     bool is_absolute_path) {
5559   char *agent_entry_name;
5560   size_t len;
5561   size_t name_len;
5562   size_t prefix_len = strlen(JNI_LIB_PREFIX);
5563   size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5564   const char *start;
5565 
5566   if (lib_name != NULL) {
5567     len = name_len = strlen(lib_name);
5568     if (is_absolute_path) {
5569       // Need to strip path, prefix and suffix
5570       if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5571         lib_name = ++start;
5572       } else {
5573         // Need to check for drive prefix
5574         if ((start = strchr(lib_name, ':')) != NULL) {
5575           lib_name = ++start;
5576         }
5577       }
5578       if (len <= (prefix_len + suffix_len)) {
5579         return NULL;
5580       }
5581       lib_name += prefix_len;
5582       name_len = strlen(lib_name) - suffix_len;
5583     }
5584   }
5585   len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5586   agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5587   if (agent_entry_name == NULL) {
5588     return NULL;
5589   }
5590   if (lib_name != NULL) {
5591     const char *p = strrchr(sym_name, '@');
5592     if (p != NULL && p != sym_name) {
5593       // sym_name == _Agent_OnLoad@XX
5594       strncpy(agent_entry_name, sym_name, (p - sym_name));
5595       agent_entry_name[(p-sym_name)] = '\0';
5596       // agent_entry_name == _Agent_OnLoad
5597       strcat(agent_entry_name, "_");
5598       strncat(agent_entry_name, lib_name, name_len);
5599       strcat(agent_entry_name, p);
5600       // agent_entry_name == _Agent_OnLoad_lib_name@XX
5601     } else {
5602       strcpy(agent_entry_name, sym_name);
5603       strcat(agent_entry_name, "_");
5604       strncat(agent_entry_name, lib_name, name_len);
5605     }
5606   } else {
5607     strcpy(agent_entry_name, sym_name);
5608   }
5609   return agent_entry_name;
5610 }
5611 
5612 #ifndef PRODUCT
5613 
5614 // test the code path in reserve_memory_special() that tries to allocate memory in a single
5615 // contiguous memory block at a particular address.
5616 // The test first tries to find a good approximate address to allocate at by using the same
5617 // method to allocate some memory at any address. The test then tries to allocate memory in
5618 // the vicinity (not directly after it to avoid possible by-chance use of that location)
5619 // This is of course only some dodgy assumption, there is no guarantee that the vicinity of
5620 // the previously allocated memory is available for allocation. The only actual failure
5621 // that is reported is when the test tries to allocate at a particular location but gets a
5622 // different valid one. A NULL return value at this point is not considered an error but may
5623 // be legitimate.
5624 void TestReserveMemorySpecial_test() {
5625   if (!UseLargePages) {
5626     return;
5627   }
5628   // save current value of globals
5629   bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
5630   bool old_use_numa_interleaving = UseNUMAInterleaving;
5631 
5632   // set globals to make sure we hit the correct code path
5633   UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
5634 
5635   // do an allocation at an address selected by the OS to get a good one.
5636   const size_t large_allocation_size = os::large_page_size() * 4;
5637   char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
5638   if (result == NULL) {
5639   } else {
5640     os::release_memory_special(result, large_allocation_size);
5641 
5642     // allocate another page within the recently allocated memory area which seems to be a good location. At least
5643     // we managed to get it once.
5644     const size_t expected_allocation_size = os::large_page_size();
5645     char* expected_location = result + os::large_page_size();
5646     char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
5647     if (actual_location == NULL) {
5648     } else {
5649       // release memory
5650       os::release_memory_special(actual_location, expected_allocation_size);
5651       // only now check, after releasing any memory to avoid any leaks.
5652       assert(actual_location == expected_location,
5653              "Failed to allocate memory at requested location " PTR_FORMAT " of size " SIZE_FORMAT ", is " PTR_FORMAT " instead",
5654              expected_location, expected_allocation_size, actual_location);
5655     }
5656   }
5657 
5658   // restore globals
5659   UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
5660   UseNUMAInterleaving = old_use_numa_interleaving;
5661 }
5662 #endif // PRODUCT
5663 
5664 /*
5665   All the defined signal names for Windows.
5666 
5667   NOTE that not all of these names are accepted by FindSignal!
5668 
5669   For various reasons some of these may be rejected at runtime.
5670 
5671   Here are the names currently accepted by a user of sun.misc.Signal with
5672   1.4.1 (ignoring potential interaction with use of chaining, etc):
5673 
5674      (LIST TBD)
5675 
5676 */
5677 int os::get_signal_number(const char* name) {
5678   static const struct {
5679     const char* name;
5680     int         number;
5681   } siglabels [] =
5682     // derived from version 6.0 VC98/include/signal.h
5683   {"ABRT",      SIGABRT,        // abnormal termination triggered by abort cl
5684   "FPE",        SIGFPE,         // floating point exception
5685   "SEGV",       SIGSEGV,        // segment violation
5686   "INT",        SIGINT,         // interrupt
5687   "TERM",       SIGTERM,        // software term signal from kill
5688   "BREAK",      SIGBREAK,       // Ctrl-Break sequence
5689   "ILL",        SIGILL};        // illegal instruction
5690   for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5691     if (strcmp(name, siglabels[i].name) == 0) {
5692       return siglabels[i].number;
5693     }
5694   }
5695   return -1;
5696 }
5697 
5698 // Fast current thread access
5699 
5700 int os::win32::_thread_ptr_offset = 0;
5701 
5702 static void call_wrapper_dummy() {}
5703 
5704 // We need to call the os_exception_wrapper once so that it sets
5705 // up the offset from FS of the thread pointer.
5706 void os::win32::initialize_thread_ptr_offset() {
5707   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5708                            NULL, methodHandle(), NULL, NULL);
5709 }
5710 
5711 bool os::supports_map_sync() {
5712   return false;
5713 }